1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
5 #include <linux/sched.h>
7 #include <asm/processor.h>
8 #include <asm/cpufeature.h>
9 #include <asm/special_insns.h>
11 static inline void __invpcid(unsigned long pcid, unsigned long addr,
14 struct { u64 d[2]; } desc = { { pcid, addr } };
17 * The memory clobber is because the whole point is to invalidate
18 * stale TLB entries and, especially if we're flushing global
19 * mappings, we don't want the compiler to reorder any subsequent
20 * memory accesses before the TLB flush.
22 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
23 * invpcid (%rcx), %rax in long mode.
25 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
26 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
29 #define INVPCID_TYPE_INDIV_ADDR 0
30 #define INVPCID_TYPE_SINGLE_CTXT 1
31 #define INVPCID_TYPE_ALL_INCL_GLOBAL 2
32 #define INVPCID_TYPE_ALL_NON_GLOBAL 3
34 /* Flush all mappings for a given pcid and addr, not including globals. */
35 static inline void invpcid_flush_one(unsigned long pcid,
38 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
41 /* Flush all mappings for a given PCID, not including globals. */
42 static inline void invpcid_flush_single_context(unsigned long pcid)
44 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
47 /* Flush all mappings, including globals, for all PCIDs. */
48 static inline void invpcid_flush_all(void)
50 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
53 /* Flush all mappings for all PCIDs except globals. */
54 static inline void invpcid_flush_all_nonglobals(void)
56 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
59 #ifdef CONFIG_PARAVIRT
60 #include <asm/paravirt.h>
62 #define __flush_tlb() __native_flush_tlb()
63 #define __flush_tlb_global() __native_flush_tlb_global()
64 #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
69 struct mm_struct *active_mm;
74 * Access to this CR4 shadow and to H/W CR4 is protected by
75 * disabling interrupts when modifying either one.
79 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
81 /* Initialize cr4 shadow for this CPU. */
82 static inline void cr4_init_shadow(void)
84 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
87 /* Set in this cpu's CR4. */
88 static inline void cr4_set_bits(unsigned long mask)
92 cr4 = this_cpu_read(cpu_tlbstate.cr4);
93 if ((cr4 | mask) != cr4) {
95 this_cpu_write(cpu_tlbstate.cr4, cr4);
100 /* Clear in this cpu's CR4. */
101 static inline void cr4_clear_bits(unsigned long mask)
105 cr4 = this_cpu_read(cpu_tlbstate.cr4);
106 if ((cr4 & ~mask) != cr4) {
108 this_cpu_write(cpu_tlbstate.cr4, cr4);
113 static inline void cr4_toggle_bits(unsigned long mask)
117 cr4 = this_cpu_read(cpu_tlbstate.cr4);
119 this_cpu_write(cpu_tlbstate.cr4, cr4);
123 /* Read the CR4 shadow. */
124 static inline unsigned long cr4_read_shadow(void)
126 return this_cpu_read(cpu_tlbstate.cr4);
130 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
131 * enable and PPro Global page enable), so that any CPU's that boot
132 * up after us can get the correct flags. This should only be used
133 * during boot on the boot cpu.
135 extern unsigned long mmu_cr4_features;
136 extern u32 *trampoline_cr4_features;
138 static inline void cr4_set_bits_and_update_boot(unsigned long mask)
140 mmu_cr4_features |= mask;
141 if (trampoline_cr4_features)
142 *trampoline_cr4_features = mmu_cr4_features;
146 static inline void __native_flush_tlb(void)
149 * If current->mm == NULL then we borrow a mm which may change during a
150 * task switch and therefore we must not be preempted while we write CR3
154 native_write_cr3(native_read_cr3());
158 static inline void __native_flush_tlb_global_irq_disabled(void)
162 cr4 = this_cpu_read(cpu_tlbstate.cr4);
164 native_write_cr4(cr4 & ~X86_CR4_PGE);
165 /* write old PGE again and flush TLBs */
166 native_write_cr4(cr4);
169 static inline void __native_flush_tlb_global(void)
173 if (static_cpu_has(X86_FEATURE_INVPCID)) {
175 * Using INVPCID is considerably faster than a pair of writes
176 * to CR4 sandwiched inside an IRQ flag save/restore.
183 * Read-modify-write to CR4 - protect it from preemption and
184 * from interrupts. (Use the raw variant because this code can
185 * be called from deep inside debugging code.)
187 raw_local_irq_save(flags);
189 __native_flush_tlb_global_irq_disabled();
191 raw_local_irq_restore(flags);
194 static inline void __native_flush_tlb_single(unsigned long addr)
196 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
199 static inline void __flush_tlb_all(void)
201 if (boot_cpu_has(X86_FEATURE_PGE))
202 __flush_tlb_global();
207 static inline void __flush_tlb_one(unsigned long addr)
209 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
210 __flush_tlb_single(addr);
213 #define TLB_FLUSH_ALL -1UL
218 * - flush_tlb_all() flushes all processes TLBs
219 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
220 * - flush_tlb_page(vma, vmaddr) flushes one page
221 * - flush_tlb_range(vma, start, end) flushes a range of pages
222 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
223 * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
225 * ..but the i386 has somewhat limited tlb flushing capabilities,
226 * and page-granular flushes are available only on i486 and up.
231 /* "_up" is for UniProcessor.
233 * This is a helper for other header functions. *Not* intended to be called
234 * directly. All global TLB flushes need to either call this, or to bump the
235 * vm statistics themselves.
237 static inline void __flush_tlb_up(void)
239 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
243 static inline void flush_tlb_all(void)
245 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
249 static inline void local_flush_tlb(void)
254 static inline void flush_tlb_mm(struct mm_struct *mm)
256 if (mm == current->active_mm)
260 static inline void flush_tlb_page(struct vm_area_struct *vma,
263 if (vma->vm_mm == current->active_mm)
264 __flush_tlb_one(addr);
267 static inline void flush_tlb_range(struct vm_area_struct *vma,
268 unsigned long start, unsigned long end)
270 if (vma->vm_mm == current->active_mm)
274 static inline void flush_tlb_mm_range(struct mm_struct *mm,
275 unsigned long start, unsigned long end, unsigned long vmflag)
277 if (mm == current->active_mm)
281 static inline void native_flush_tlb_others(const struct cpumask *cpumask,
282 struct mm_struct *mm,
288 static inline void reset_lazy_tlbstate(void)
292 static inline void flush_tlb_kernel_range(unsigned long start,
302 #define local_flush_tlb() __flush_tlb()
304 #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
306 #define flush_tlb_range(vma, start, end) \
307 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
309 extern void flush_tlb_all(void);
310 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
311 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
312 unsigned long end, unsigned long vmflag);
313 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
315 void native_flush_tlb_others(const struct cpumask *cpumask,
316 struct mm_struct *mm,
317 unsigned long start, unsigned long end);
319 #define TLBSTATE_OK 1
320 #define TLBSTATE_LAZY 2
322 static inline void reset_lazy_tlbstate(void)
324 this_cpu_write(cpu_tlbstate.state, 0);
325 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
330 #ifndef CONFIG_PARAVIRT
331 #define flush_tlb_others(mask, mm, start, end) \
332 native_flush_tlb_others(mask, mm, start, end)
335 #endif /* _ASM_X86_TLBFLUSH_H */