1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
7 #include <linux/pkeys.h>
9 #include <trace/events/tlb.h>
11 #include <asm/pgalloc.h>
12 #include <asm/tlbflush.h>
13 #include <asm/paravirt.h>
15 #ifndef CONFIG_PARAVIRT
16 static inline void paravirt_activate_mm(struct mm_struct *prev,
17 struct mm_struct *next)
20 #endif /* !CONFIG_PARAVIRT */
22 #ifdef CONFIG_PERF_EVENTS
23 extern struct static_key rdpmc_always_available;
25 static inline void load_mm_cr4(struct mm_struct *mm)
27 if (static_key_false(&rdpmc_always_available) ||
28 atomic_read(&mm->context.perf_rdpmc_allowed))
29 cr4_set_bits(X86_CR4_PCE);
31 cr4_clear_bits(X86_CR4_PCE);
34 static inline void load_mm_cr4(struct mm_struct *mm) {}
37 #ifdef CONFIG_MODIFY_LDT_SYSCALL
39 * ldt_structs can be allocated, used, and freed, but they are never
40 * modified while live.
44 * Xen requires page-aligned LDTs with special permissions. This is
45 * needed to prevent us from installing evil descriptors such as
46 * call gates. On native, we could merge the ldt_struct and LDT
47 * allocations, but it's not worth trying to optimize.
49 struct desc_struct *entries;
54 * Used for LDT copy/destruction.
56 int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
57 void destroy_context_ldt(struct mm_struct *mm);
58 #else /* CONFIG_MODIFY_LDT_SYSCALL */
59 static inline int init_new_context_ldt(struct task_struct *tsk,
64 static inline void destroy_context_ldt(struct mm_struct *mm) {}
67 static inline void load_mm_ldt(struct mm_struct *mm)
69 #ifdef CONFIG_MODIFY_LDT_SYSCALL
70 struct ldt_struct *ldt;
72 /* lockless_dereference synchronizes with smp_store_release */
73 ldt = lockless_dereference(mm->context.ldt);
76 * Any change to mm->context.ldt is followed by an IPI to all
77 * CPUs with the mm active. The LDT will not be freed until
78 * after the IPI is handled by all such CPUs. This means that,
79 * if the ldt_struct changes before we return, the values we see
80 * will be safe, and the new values will be loaded before we run
83 * NB: don't try to convert this to use RCU without extreme care.
84 * We would still need IRQs off, because we don't want to change
85 * the local LDT after an IPI loaded a newer value than the one
90 set_ldt(ldt->entries, ldt->size);
97 DEBUG_LOCKS_WARN_ON(preemptible());
100 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
102 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
103 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
106 static inline int init_new_context(struct task_struct *tsk,
107 struct mm_struct *mm)
109 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
110 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
111 /* pkey 0 is the default and always allocated */
112 mm->context.pkey_allocation_map = 0x1;
113 /* -1 means unallocated or invalid */
114 mm->context.execute_only_pkey = -1;
117 init_new_context_ldt(tsk, mm);
121 static inline void destroy_context(struct mm_struct *mm)
123 destroy_context_ldt(mm);
126 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
127 struct task_struct *tsk);
129 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
130 struct task_struct *tsk);
131 #define switch_mm_irqs_off switch_mm_irqs_off
133 #define activate_mm(prev, next) \
135 paravirt_activate_mm((prev), (next)); \
136 switch_mm((prev), (next), NULL); \
140 #define deactivate_mm(tsk, mm) \
145 #define deactivate_mm(tsk, mm) \
148 loadsegment(fs, 0); \
152 static inline void arch_dup_mmap(struct mm_struct *oldmm,
153 struct mm_struct *mm)
155 paravirt_arch_dup_mmap(oldmm, mm);
158 static inline void arch_exit_mmap(struct mm_struct *mm)
160 paravirt_arch_exit_mmap(mm);
164 static inline bool is_64bit_mm(struct mm_struct *mm)
166 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
167 !(mm->context.ia32_compat == TIF_IA32);
170 static inline bool is_64bit_mm(struct mm_struct *mm)
176 static inline void arch_bprm_mm_init(struct mm_struct *mm,
177 struct vm_area_struct *vma)
182 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
183 unsigned long start, unsigned long end)
186 * mpx_notify_unmap() goes and reads a rarely-hot
187 * cacheline in the mm_struct. That can be expensive
188 * enough to be seen in profiles.
190 * The mpx_notify_unmap() call and its contents have been
191 * observed to affect munmap() performance on hardware
192 * where MPX is not present.
194 * The unlikely() optimizes for the fast case: no MPX
195 * in the CPU, or no MPX use in the process. Even if
196 * we get this wrong (in the unlikely event that MPX
197 * is widely enabled on some system) the overhead of
198 * MPX itself (reading bounds tables) is expected to
199 * overwhelm the overhead of getting this unlikely()
200 * consistently wrong.
202 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
203 mpx_notify_unmap(mm, vma, start, end);
206 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
207 static inline int vma_pkey(struct vm_area_struct *vma)
209 unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
210 VM_PKEY_BIT2 | VM_PKEY_BIT3;
212 return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
215 static inline int vma_pkey(struct vm_area_struct *vma)
221 static inline bool __pkru_allows_pkey(u16 pkey, bool write)
223 u32 pkru = read_pkru();
225 if (!__pkru_allows_read(pkru, pkey))
227 if (write && !__pkru_allows_write(pkru, pkey))
234 * We only want to enforce protection keys on the current process
235 * because we effectively have no access to PKRU for other
236 * processes or any way to tell *which * PKRU in a threaded
237 * process we could use.
239 * So do not enforce things if the VMA is not from the current
240 * mm, or if we are in a kernel thread.
242 static inline bool vma_is_foreign(struct vm_area_struct *vma)
247 * Should PKRU be enforced on the access to this VMA? If
248 * the VMA is from another process, then PKRU has no
249 * relevance and should not be enforced.
251 if (current->mm != vma->vm_mm)
257 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
258 bool write, bool execute, bool foreign)
260 /* pkeys never affect instruction fetches */
263 /* allow access if the VMA is not one from this process */
264 if (foreign || vma_is_foreign(vma))
266 return __pkru_allows_pkey(vma_pkey(vma), write);
271 * This can be used from process context to figure out what the value of
272 * CR3 is without needing to do a (slow) read_cr3().
274 * It's intended to be used for code like KVM that sneakily changes CR3
275 * and needs to restore it. It needs to be used very carefully.
277 static inline unsigned long __get_current_cr3_fast(void)
279 unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
281 /* For now, be very restrictive about when this can be called. */
282 VM_WARN_ON(in_nmi() || !in_atomic());
284 VM_BUG_ON(cr3 != read_cr3());
288 #endif /* _ASM_X86_MMU_CONTEXT_H */