2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
24 #include <linux/kernel.h>
26 #include <linux/percpu.h>
27 #include <linux/hardirq.h>
28 #include <asm/pgalloc.h>
29 #include <asm/tlbflush.h>
33 #include <trace/events/thp.h>
35 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38 * A linux PTE was changed and the corresponding hash table entry
39 * neesd to be flushed. This function will either perform the flush
40 * immediately or will batch it up if the current CPU has an active
43 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
44 pte_t *ptep, unsigned long pte, int huge)
47 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
56 /* Get page size (maybe move back to caller).
58 * NOTE: when using special 64K mappings in 4K environment like
59 * for SPEs, we obtain the page size from the slice, which thus
60 * must still exist (and thus the VMA not reused) at the time
64 #ifdef CONFIG_HUGETLB_PAGE
65 psize = get_slice_psize(mm, addr);
66 /* Mask the address for the correct page size */
67 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
70 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
73 psize = pte_pagesize_index(mm, addr, pte);
74 /* Mask the address for the standard page size. If we
75 * have a 64k page kernel, but the hardware does not
76 * support 64k pages, this might be different from the
77 * hardware page size encoded in the slice table. */
82 /* Build full vaddr */
83 if (!is_kernel_addr(addr)) {
84 ssize = user_segment_size(addr);
85 vsid = get_vsid(mm->context.id, addr, ssize);
87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
88 ssize = mmu_kernel_ssize;
91 vpn = hpt_vpn(addr, vsid, ssize);
92 rpte = __real_pte(__pte(pte), ptep);
95 * Check if we have an active batch on this CPU. If not, just
96 * flush now and return. For now, we don global invalidates
97 * in that case, might be worth testing the mm cpu mask though
98 * and decide to use local invalidates instead...
100 if (!batch->active) {
101 flush_hash_page(vpn, rpte, psize, ssize, 0);
102 put_cpu_var(ppc64_tlb_batch);
107 * This can happen when we are in the middle of a TLB batch and
108 * we encounter memory pressure (eg copy_page_range when it tries
109 * to allocate a new pte). If we have to reclaim memory and end
110 * up scanning and resetting referenced bits then our batch context
111 * will change mid stream.
113 * We also need to ensure only one page size is present in a given
116 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
117 batch->ssize != ssize)) {
118 __flush_tlb_pending(batch);
123 batch->psize = psize;
124 batch->ssize = ssize;
126 batch->pte[i] = rpte;
129 if (i >= PPC64_TLB_BATCH_NR)
130 __flush_tlb_pending(batch);
131 put_cpu_var(ppc64_tlb_batch);
135 * This function is called when terminating an mmu batch or when a batch
136 * is full. It will perform the flush of all the entries currently stored
139 * Must be called from within some kind of spinlock/non-preempt region...
141 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
143 const struct cpumask *tmp;
147 tmp = cpumask_of(smp_processor_id());
148 if (cpumask_equal(mm_cpumask(batch->mm), tmp))
151 flush_hash_page(batch->vpn[0], batch->pte[0],
152 batch->psize, batch->ssize, local);
154 flush_hash_range(i, local);
158 void hash__tlb_flush(struct mmu_gather *tlb)
160 struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
162 /* If there's a TLB batch pending, then we must flush it because the
163 * pages are going to be freed and we really don't want to have a CPU
164 * access a freed page because it has a stale TLB
167 __flush_tlb_pending(tlbbatch);
169 put_cpu_var(ppc64_tlb_batch);
173 * __flush_hash_table_range - Flush all HPTEs for a given address range
174 * from the hash table (and the TLB). But keeps
175 * the linux PTEs intact.
177 * @mm : mm_struct of the target address space (generally init_mm)
178 * @start : starting address
179 * @end : ending address (not included in the flush)
181 * This function is mostly to be used by some IO hotplug code in order
182 * to remove all hash entries from a given address range used to map IO
183 * space on a removed PCI-PCI bidge without tearing down the full mapping
184 * since 64K pages may overlap with other bridges when using 64K pages
185 * with 4K HW pages on IO space.
187 * Because of that usage pattern, it is implemented for small size rather
190 void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
197 start = _ALIGN_DOWN(start, PAGE_SIZE);
198 end = _ALIGN_UP(end, PAGE_SIZE);
202 /* Note: Normally, we should only ever use a batch within a
203 * PTE locked section. This violates the rule, but will work
204 * since we don't actually modify the PTEs, we just flush the
205 * hash while leaving the PTEs intact (including their reference
206 * to being hashed). This is not the most performance oriented
207 * way to do things but is fine for our needs here.
209 local_irq_save(flags);
210 arch_enter_lazy_mmu_mode();
211 for (; start < end; start += PAGE_SIZE) {
212 pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp,
218 pte = pte_val(*ptep);
220 trace_hugepage_invalidate(start, pte);
221 if (!(pte & H_PAGE_HASHPTE))
223 if (unlikely(is_thp))
224 hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
226 hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
228 arch_leave_lazy_mmu_mode();
229 local_irq_restore(flags);
232 void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
238 addr = _ALIGN_DOWN(addr, PMD_SIZE);
239 /* Note: Normally, we should only ever use a batch within a
240 * PTE locked section. This violates the rule, but will work
241 * since we don't actually modify the PTEs, we just flush the
242 * hash while leaving the PTEs intact (including their reference
243 * to being hashed). This is not the most performance oriented
244 * way to do things but is fine for our needs here.
246 local_irq_save(flags);
247 arch_enter_lazy_mmu_mode();
248 start_pte = pte_offset_map(pmd, addr);
249 for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
250 unsigned long pteval = pte_val(*pte);
251 if (pteval & H_PAGE_HASHPTE)
252 hpte_need_flush(mm, addr, pte, pteval, 0);
255 arch_leave_lazy_mmu_mode();
256 local_irq_restore(flags);