1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
8 #include <trace/events/tlb.h>
10 #include <asm/pgalloc.h>
11 #include <asm/tlbflush.h>
12 #include <asm/paravirt.h>
14 #ifndef CONFIG_PARAVIRT
15 static inline void paravirt_activate_mm(struct mm_struct *prev,
16 struct mm_struct *next)
19 #endif /* !CONFIG_PARAVIRT */
21 #ifdef CONFIG_PERF_EVENTS
22 extern struct static_key rdpmc_always_available;
24 static inline void load_mm_cr4(struct mm_struct *mm)
26 if (static_key_true(&rdpmc_always_available) ||
27 atomic_read(&mm->context.perf_rdpmc_allowed))
28 cr4_set_bits(X86_CR4_PCE);
30 cr4_clear_bits(X86_CR4_PCE);
33 static inline void load_mm_cr4(struct mm_struct *mm) {}
37 * Used for LDT copy/destruction.
39 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
40 void destroy_context(struct mm_struct *mm);
43 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
46 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
47 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
51 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
52 struct task_struct *tsk)
54 unsigned cpu = smp_processor_id();
56 if (likely(prev != next)) {
58 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
59 this_cpu_write(cpu_tlbstate.active_mm, next);
61 cpumask_set_cpu(cpu, mm_cpumask(next));
63 /* Re-load page tables */
65 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
67 /* Stop flush ipis for the previous mm */
68 cpumask_clear_cpu(cpu, mm_cpumask(prev));
70 /* Load per-mm CR4 state */
74 * Load the LDT, if the LDT is different.
76 * It's possible that prev->context.ldt doesn't match
77 * the LDT register. This can happen if leave_mm(prev)
78 * was called and then modify_ldt changed
79 * prev->context.ldt but suppressed an IPI to this CPU.
80 * In this case, prev->context.ldt != NULL, because we
81 * never free an LDT while the mm still exists. That
82 * means that next->context.ldt != prev->context.ldt,
83 * because mms never share an LDT.
85 if (unlikely(prev->context.ldt != next->context.ldt))
86 load_LDT_nolock(&next->context);
90 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
91 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
93 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
95 * On established mms, the mm_cpumask is only changed
96 * from irq context, from ptep_clear_flush() while in
97 * lazy tlb mode, and here. Irqs are blocked during
98 * schedule, protecting us from simultaneous changes.
100 cpumask_set_cpu(cpu, mm_cpumask(next));
102 * We were in lazy tlb mode and leave_mm disabled
103 * tlb flush IPI delivery. We must reload CR3
104 * to make sure to use no freed page tables.
107 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
109 load_LDT_nolock(&next->context);
115 #define activate_mm(prev, next) \
117 paravirt_activate_mm((prev), (next)); \
118 switch_mm((prev), (next), NULL); \
122 #define deactivate_mm(tsk, mm) \
127 #define deactivate_mm(tsk, mm) \
130 loadsegment(fs, 0); \
134 static inline void arch_dup_mmap(struct mm_struct *oldmm,
135 struct mm_struct *mm)
137 paravirt_arch_dup_mmap(oldmm, mm);
140 static inline void arch_exit_mmap(struct mm_struct *mm)
142 paravirt_arch_exit_mmap(mm);
146 static inline bool is_64bit_mm(struct mm_struct *mm)
148 return !config_enabled(CONFIG_IA32_EMULATION) ||
149 !(mm->context.ia32_compat == TIF_IA32);
152 static inline bool is_64bit_mm(struct mm_struct *mm)
158 static inline void arch_bprm_mm_init(struct mm_struct *mm,
159 struct vm_area_struct *vma)
164 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
165 unsigned long start, unsigned long end)
168 * mpx_notify_unmap() goes and reads a rarely-hot
169 * cacheline in the mm_struct. That can be expensive
170 * enough to be seen in profiles.
172 * The mpx_notify_unmap() call and its contents have been
173 * observed to affect munmap() performance on hardware
174 * where MPX is not present.
176 * The unlikely() optimizes for the fast case: no MPX
177 * in the CPU, or no MPX use in the process. Even if
178 * we get this wrong (in the unlikely event that MPX
179 * is widely enabled on some system) the overhead of
180 * MPX itself (reading bounds tables) is expected to
181 * overwhelm the overhead of getting this unlikely()
182 * consistently wrong.
184 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
185 mpx_notify_unmap(mm, vma, start, end);
188 #endif /* _ASM_X86_MMU_CONTEXT_H */