2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
30 #include <linux/kernel.h>
31 #include <linux/export.h>
33 #include <linux/init.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/preempt.h>
37 #include <linux/spinlock.h>
38 #include <linux/memblock.h>
39 #include <linux/of_fdt.h>
41 #include <asm/tlbflush.h>
43 #include <asm/code-patching.h>
47 #ifdef CONFIG_PPC_BOOK3E
48 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
52 .enc = BOOK3E_PAGESZ_4K,
56 .enc = BOOK3E_PAGESZ_16K,
61 .enc = BOOK3E_PAGESZ_64K,
65 .enc = BOOK3E_PAGESZ_1M,
70 .enc = BOOK3E_PAGESZ_16M,
74 .enc = BOOK3E_PAGESZ_256M,
78 .enc = BOOK3E_PAGESZ_1GB,
81 static inline int mmu_get_tsize(int psize)
83 return mmu_psize_defs[psize].enc;
86 static inline int mmu_get_tsize(int psize)
88 /* This isn't used on !Book3E for now */
93 /* The variables below are currently only used on 64-bit Book3E
94 * though this will probably be made common with other nohash
95 * implementations at some point
99 int mmu_linear_psize; /* Page size used for the linear mapping */
100 int mmu_pte_psize; /* Page size used for PTE pages */
101 int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
102 int book3e_htw_enabled; /* Is HW tablewalk enabled ? */
103 unsigned long linear_map_top; /* Top of linear mapping */
105 #endif /* CONFIG_PPC64 */
107 #ifdef CONFIG_PPC_FSL_BOOK3E
108 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
109 DEFINE_PER_CPU(int, next_tlbcam_idx);
110 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
114 * Base TLB flushing operations:
116 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
117 * - flush_tlb_page(vma, vmaddr) flushes one page
118 * - flush_tlb_range(vma, start, end) flushes a range of pages
119 * - flush_tlb_kernel_range(start, end) flushes kernel pages
121 * - local_* variants of page and mm only apply to the current
126 * These are the base non-SMP variants of page and mm flushing
128 void local_flush_tlb_mm(struct mm_struct *mm)
133 pid = mm->context.id;
134 if (pid != MMU_NO_CONTEXT)
138 EXPORT_SYMBOL(local_flush_tlb_mm);
140 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
146 pid = mm ? mm->context.id : 0;
147 if (pid != MMU_NO_CONTEXT)
148 _tlbil_va(vmaddr, pid, tsize, ind);
152 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
154 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
155 mmu_get_tsize(mmu_virtual_psize), 0);
157 EXPORT_SYMBOL(local_flush_tlb_page);
160 * And here are the SMP non-local implementations
164 static DEFINE_RAW_SPINLOCK(tlbivax_lock);
166 static int mm_is_core_local(struct mm_struct *mm)
168 return cpumask_subset(mm_cpumask(mm),
169 topology_thread_cpumask(smp_processor_id()));
172 struct tlb_flush_param {
179 static void do_flush_tlb_mm_ipi(void *param)
181 struct tlb_flush_param *p = param;
183 _tlbil_pid(p ? p->pid : 0);
186 static void do_flush_tlb_page_ipi(void *param)
188 struct tlb_flush_param *p = param;
190 _tlbil_va(p->addr, p->pid, p->tsize, p->ind);
194 /* Note on invalidations and PID:
196 * We snapshot the PID with preempt disabled. At this point, it can still
197 * change either because:
198 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
199 * - we are invaliating some target that isn't currently running here
200 * and is concurrently acquiring a new PID on another CPU
201 * - some other CPU is re-acquiring a lost PID for this mm
204 * However, this shouldn't be a problem as we only guarantee
205 * invalidation of TLB entries present prior to this call, so we
206 * don't care about the PID changing, and invalidating a stale PID
207 * is generally harmless.
210 void flush_tlb_mm(struct mm_struct *mm)
215 pid = mm->context.id;
216 if (unlikely(pid == MMU_NO_CONTEXT))
218 if (!mm_is_core_local(mm)) {
219 struct tlb_flush_param p = { .pid = pid };
220 /* Ignores smp_processor_id() even if set. */
221 smp_call_function_many(mm_cpumask(mm),
222 do_flush_tlb_mm_ipi, &p, 1);
228 EXPORT_SYMBOL(flush_tlb_mm);
230 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
233 struct cpumask *cpu_mask;
237 pid = mm ? mm->context.id : 0;
238 if (unlikely(pid == MMU_NO_CONTEXT))
240 cpu_mask = mm_cpumask(mm);
241 if (!mm_is_core_local(mm)) {
242 /* If broadcast tlbivax is supported, use it */
243 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
244 int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
246 raw_spin_lock(&tlbivax_lock);
247 _tlbivax_bcast(vmaddr, pid, tsize, ind);
249 raw_spin_unlock(&tlbivax_lock);
252 struct tlb_flush_param p = {
258 /* Ignores smp_processor_id() even if set in cpu_mask */
259 smp_call_function_many(cpu_mask,
260 do_flush_tlb_page_ipi, &p, 1);
263 _tlbil_va(vmaddr, pid, tsize, ind);
268 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
270 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
271 mmu_get_tsize(mmu_virtual_psize), 0);
273 EXPORT_SYMBOL(flush_tlb_page);
275 #endif /* CONFIG_SMP */
277 #ifdef CONFIG_PPC_47x
278 void __init early_init_mmu_47x(void)
281 unsigned long root = of_get_flat_dt_root();
282 if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
283 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
284 #endif /* CONFIG_SMP */
286 #endif /* CONFIG_PPC_47x */
289 * Flush kernel TLB entries in the given range
291 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
295 smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
302 EXPORT_SYMBOL(flush_tlb_kernel_range);
305 * Currently, for range flushing, we just do a full mm flush. This should
306 * be optimized based on a threshold on the size of the range, since
307 * some implementation can stack multiple tlbivax before a tlbsync but
308 * for now, we keep it that way
310 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
314 flush_tlb_mm(vma->vm_mm);
316 EXPORT_SYMBOL(flush_tlb_range);
318 void tlb_flush(struct mmu_gather *tlb)
320 flush_tlb_mm(tlb->mm);
324 * Below are functions specific to the 64-bit variant of Book3E though that
325 * may change in the future
331 * Handling of virtual linear page tables or indirect TLB entries
332 * flushing when PTE pages are freed
334 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
336 int tsize = mmu_psize_defs[mmu_pte_psize].enc;
338 if (book3e_htw_enabled) {
339 unsigned long start = address & PMD_MASK;
340 unsigned long end = address + PMD_SIZE;
341 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
343 /* This isn't the most optimal, ideally we would factor out the
344 * while preempt & CPU mask mucking around, or even the IPI but
347 while (start < end) {
348 __flush_tlb_page(tlb->mm, start, tsize, 1);
352 unsigned long rmask = 0xf000000000000000ul;
353 unsigned long rid = (address & rmask) | 0x1000000000000000ul;
354 unsigned long vpte = address & ~rmask;
356 #ifdef CONFIG_PPC_64K_PAGES
357 vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
359 vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
362 __flush_tlb_page(tlb->mm, vpte, tsize, 0);
366 static void setup_page_sizes(void)
368 unsigned int tlb0cfg;
373 #ifdef CONFIG_PPC_FSL_BOOK3E
374 unsigned int mmucfg = mfspr(SPRN_MMUCFG);
376 if (((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) &&
377 (mmu_has_feature(MMU_FTR_TYPE_FSL_E))) {
378 unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
379 unsigned int min_pg, max_pg;
381 min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
382 max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
384 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
385 struct mmu_psize_def *def;
388 def = &mmu_psize_defs[psize];
394 /* adjust to be in terms of 4^shift Kb */
395 shift = (shift - 10) >> 1;
397 if ((shift >= min_pg) && (shift <= max_pg))
398 def->flags |= MMU_PAGE_SIZE_DIRECT;
405 tlb0cfg = mfspr(SPRN_TLB0CFG);
406 tlb0ps = mfspr(SPRN_TLB0PS);
407 eptcfg = mfspr(SPRN_EPTCFG);
409 /* Look for supported direct sizes */
410 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
411 struct mmu_psize_def *def = &mmu_psize_defs[psize];
413 if (tlb0ps & (1U << (def->shift - 10)))
414 def->flags |= MMU_PAGE_SIZE_DIRECT;
417 /* Indirect page sizes supported ? */
418 if ((tlb0cfg & TLBnCFG_IND) == 0)
421 /* Now, we only deal with one IND page size for each
422 * direct size. Hopefully all implementations today are
423 * unambiguous, but we might want to be careful in the
426 for (i = 0; i < 3; i++) {
427 unsigned int ps, sps;
435 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
436 struct mmu_psize_def *def = &mmu_psize_defs[psize];
438 if (ps == (def->shift - 10))
439 def->flags |= MMU_PAGE_SIZE_INDIRECT;
440 if (sps == (def->shift - 10))
446 /* Cleanup array and print summary */
447 pr_info("MMU: Supported page sizes\n");
448 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
449 struct mmu_psize_def *def = &mmu_psize_defs[psize];
450 const char *__page_type_names[] = {
456 if (def->flags == 0) {
460 pr_info(" %8ld KB as %s\n", 1ul << (def->shift - 10),
461 __page_type_names[def->flags & 0x3]);
465 static void __patch_exception(int exc, unsigned long addr)
467 extern unsigned int interrupt_base_book3e;
468 unsigned int *ibase = &interrupt_base_book3e;
470 /* Our exceptions vectors start with a NOP and -then- a branch
471 * to deal with single stepping from userspace which stops on
472 * the second instruction. Thus we need to patch the second
473 * instruction of the exception, not the first one
476 patch_branch(ibase + (exc / 4) + 1, addr, 0);
479 #define patch_exception(exc, name) do { \
480 extern unsigned int name; \
481 __patch_exception((exc), (unsigned long)&name); \
484 static void setup_mmu_htw(void)
486 /* Check if HW tablewalk is present, and if yes, enable it by:
488 * - patching the TLB miss handlers to branch to the
489 * one dedicates to it
491 * - setting the global book3e_htw_enabled
493 unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG);
495 if ((tlb0cfg & TLBnCFG_IND) &&
496 (tlb0cfg & TLBnCFG_PT)) {
497 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
498 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
499 book3e_htw_enabled = 1;
501 pr_info("MMU: Book3E HW tablewalk %s\n",
502 book3e_htw_enabled ? "enabled" : "not supported");
506 * Early initialization of the MMU TLB code
508 static void __early_init_mmu(int boot_cpu)
512 /* XXX This will have to be decided at runtime, but right
513 * now our boot and TLB miss code hard wires it. Ideally
514 * we should find out a suitable page size and patch the
515 * TLB miss code (either that or use the PACA to store
518 mmu_linear_psize = MMU_PAGE_1G;
520 /* XXX This should be decided at runtime based on supported
521 * page sizes in the TLB, but for now let's assume 16M is
522 * always there and a good fit (which it probably is)
524 mmu_vmemmap_psize = MMU_PAGE_16M;
526 /* XXX This code only checks for TLB 0 capabilities and doesn't
527 * check what page size combos are supported by the HW. It
528 * also doesn't handle the case where a separate array holds
529 * the IND entries from the array loaded by the PT.
532 /* Look for supported page sizes */
535 /* Look for HW tablewalk support */
539 /* Set MAS4 based on page table setting */
541 mas4 = 0x4 << MAS4_WIMGED_SHIFT;
542 if (book3e_htw_enabled) {
543 mas4 |= mas4 | MAS4_INDD;
544 #ifdef CONFIG_PPC_64K_PAGES
545 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
546 mmu_pte_psize = MMU_PAGE_256M;
548 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
549 mmu_pte_psize = MMU_PAGE_1M;
552 #ifdef CONFIG_PPC_64K_PAGES
553 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
555 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
557 mmu_pte_psize = mmu_virtual_psize;
559 mtspr(SPRN_MAS4, mas4);
561 /* Set the global containing the top of the linear mapping
562 * for use by the TLB miss code
564 linear_map_top = memblock_end_of_DRAM();
566 #ifdef CONFIG_PPC_FSL_BOOK3E
567 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
568 unsigned int num_cams;
570 /* use a quarter of the TLBCAM for bolted linear map */
571 num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
572 linear_map_top = map_mem_in_cams(linear_map_top, num_cams);
574 /* limit memory so we dont have linear faults */
575 memblock_enforce_memory_limit(linear_map_top);
578 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
579 patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
583 /* A sync won't hurt us after mucking around with
584 * the MMU configuration
588 memblock_set_current_limit(linear_map_top);
591 void __init early_init_mmu(void)
596 void __cpuinit early_init_mmu_secondary(void)
601 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
602 phys_addr_t first_memblock_size)
604 /* On Embedded 64-bit, we adjust the RMA size to match
605 * the bolted TLB entry. We know for now that only 1G
606 * entries are supported though that may eventually
607 * change. We crop it to the size of the first MEMBLOCK to
608 * avoid going over total available memory just in case...
610 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
612 /* Finally limit subsequent allocations */
613 memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
615 #else /* ! CONFIG_PPC64 */
616 void __init early_init_mmu(void)
618 #ifdef CONFIG_PPC_47x
619 early_init_mmu_47x();
622 #endif /* CONFIG_PPC64 */