2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _ASM_X86_I387_H
11 #define _ASM_X86_I387_H
15 #include <linux/sched.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/regset.h>
18 #include <linux/hardirq.h>
19 #include <linux/slab.h>
21 #include <asm/cpufeature.h>
22 #include <asm/processor.h>
23 #include <asm/sigcontext.h>
25 #include <asm/uaccess.h>
26 #include <asm/xsave.h>
28 extern unsigned int sig_xstate_size;
29 extern void fpu_init(void);
30 extern void mxcsr_feature_mask_init(void);
31 extern int init_fpu(struct task_struct *child);
32 extern asmlinkage void math_state_restore(void);
33 extern void __math_state_restore(void);
34 extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
36 extern user_regset_active_fn fpregs_active, xfpregs_active;
37 extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
39 extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
43 * xstateregs_active == fpregs_active. Please refer to the comment
44 * at the definition of fpregs_active.
46 #define xstateregs_active fpregs_active
48 extern struct _fpx_sw_bytes fx_sw_reserved;
49 #ifdef CONFIG_IA32_EMULATION
50 extern unsigned int sig_xstate_ia32_size;
51 extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
54 extern int save_i387_xstate_ia32(void __user *buf);
55 extern int restore_i387_xstate_ia32(void __user *buf);
58 #ifdef CONFIG_MATH_EMULATION
59 extern void finit_soft_fpu(struct i387_soft_struct *soft);
61 static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
64 #define X87_FSW_ES (1 << 7) /* Exception Summary */
66 static __always_inline __pure bool use_xsaveopt(void)
68 return static_cpu_has(X86_FEATURE_XSAVEOPT);
71 static __always_inline __pure bool use_xsave(void)
73 return static_cpu_has(X86_FEATURE_XSAVE);
76 static __always_inline __pure bool use_fxsr(void)
78 return static_cpu_has(X86_FEATURE_FXSR);
81 extern void __sanitize_i387_state(struct task_struct *);
83 static inline void sanitize_i387_state(struct task_struct *tsk)
87 __sanitize_i387_state(tsk);
91 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
95 /* See comment in fxsave() below. */
96 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
98 ".section .fixup,\"ax\"\n"
99 "3: movl $-1,%[err]\n"
104 : [fx] "R" (fx), "m" (*fx), "0" (0));
108 static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
113 * Clear the bytes not touched by the fxsave and reserved
116 err = __clear_user(&fx->sw_reserved,
117 sizeof(struct _fpx_sw_bytes));
121 /* See comment in fxsave() below. */
122 asm volatile("1: rex64/fxsave (%[fx])\n\t"
124 ".section .fixup,\"ax\"\n"
125 "3: movl $-1,%[err]\n"
129 : [err] "=r" (err), "=m" (*fx)
130 : [fx] "R" (fx), "0" (0));
132 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
134 /* No need to clear here because the caller clears USED_MATH */
138 static inline void fpu_fxsave(struct fpu *fpu)
140 /* Using "rex64; fxsave %0" is broken because, if the memory operand
141 uses any extended registers for addressing, a second REX prefix
142 will be generated (to the assembler, rex64 followed by semicolon
143 is a separate instruction), and hence the 64-bitness is lost. */
145 #ifdef CONFIG_AS_FXSAVEQ
146 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
147 starting with gas 2.16. */
148 __asm__ __volatile__("fxsaveq %0"
149 : "=m" (fpu->state->fxsave));
151 /* Using, as a workaround, the properly prefixed form below isn't
152 accepted by any binutils version so far released, complaining that
153 the same type of prefix is used twice if an extended register is
154 needed for addressing (fix submitted to mainline 2005-11-21).
155 asm volatile("rex64/fxsave %0"
156 : "=m" (fpu->state->fxsave));
157 This, however, we can work around by forcing the compiler to select
158 an addressing mode that doesn't require extended registers. */
159 asm volatile("rex64/fxsave (%[fx])"
160 : "=m" (fpu->state->fxsave)
161 : [fx] "R" (&fpu->state->fxsave));
165 #else /* CONFIG_X86_32 */
167 /* perform fxrstor iff the processor has extended states, otherwise frstor */
168 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
171 * The "nop" is needed to make the instructions the same
183 static inline void fpu_fxsave(struct fpu *fpu)
185 asm volatile("fxsave %[fx]"
186 : [fx] "=m" (fpu->state->fxsave));
189 #endif /* CONFIG_X86_64 */
191 /* We need a safe address that is cheap to find and that is already
192 in L1 during context switch. The best choices are unfortunately
193 different for UP and SMP */
195 #define safe_address (__per_cpu_offset[0])
197 #define safe_address (kstat_cpu(0).cpustat.user)
201 * These must be called with preempt disabled
203 static inline void fpu_save_init(struct fpu *fpu)
209 * xsave header may indicate the init state of the FP.
211 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
213 } else if (use_fxsr()) {
216 asm volatile("fsave %[fx]; fwait"
217 : [fx] "=m" (fpu->state->fsave));
221 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
222 asm volatile("fnclex");
224 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
225 is pending. Clear the x87 state here by setting it to fixed
226 values. safe_address is a random variable that should be in L1 */
229 "emms\n\t" /* clear stack tags */
230 "fildl %P[addr]", /* set F?P to defined value */
231 X86_FEATURE_FXSAVE_LEAK,
232 [addr] "m" (safe_address));
235 static inline void __save_init_fpu(struct task_struct *tsk)
237 fpu_save_init(&tsk->thread.fpu);
238 task_thread_info(tsk)->status &= ~TS_USEDFPU;
241 static inline int fpu_fxrstor_checking(struct fpu *fpu)
243 return fxrstor_checking(&fpu->state->fxsave);
246 static inline int fpu_restore_checking(struct fpu *fpu)
249 return fpu_xrstor_checking(fpu);
251 return fpu_fxrstor_checking(fpu);
254 static inline int restore_fpu_checking(struct task_struct *tsk)
256 return fpu_restore_checking(&tsk->thread.fpu);
260 * Signal frame handlers...
262 extern int save_i387_xstate(void __user *buf);
263 extern int restore_i387_xstate(void __user *buf);
265 static inline void __unlazy_fpu(struct task_struct *tsk)
267 if (task_thread_info(tsk)->status & TS_USEDFPU) {
268 __save_init_fpu(tsk);
271 tsk->fpu_counter = 0;
274 static inline void __clear_fpu(struct task_struct *tsk)
276 if (task_thread_info(tsk)->status & TS_USEDFPU) {
277 /* Ignore delayed exceptions from user space */
278 asm volatile("1: fwait\n"
280 _ASM_EXTABLE(1b, 2b));
281 task_thread_info(tsk)->status &= ~TS_USEDFPU;
286 static inline void kernel_fpu_begin(void)
288 struct thread_info *me = current_thread_info();
290 if (me->status & TS_USEDFPU)
291 __save_init_fpu(me->task);
296 static inline void kernel_fpu_end(void)
302 static inline bool irq_fpu_usable(void)
304 struct pt_regs *regs;
306 return !in_interrupt() || !(regs = get_irq_regs()) || \
307 user_mode(regs) || (read_cr0() & X86_CR0_TS);
311 * Some instructions like VIA's padlock instructions generate a spurious
312 * DNA fault but don't modify SSE registers. And these instructions
313 * get used from interrupt context as well. To prevent these kernel instructions
314 * in interrupt context interacting wrongly with other user/kernel fpu usage, we
315 * should use them only in the context of irq_ts_save/restore()
317 static inline int irq_ts_save(void)
320 * If in process context and not atomic, we can take a spurious DNA fault.
321 * Otherwise, doing clts() in process context requires disabling preemption
322 * or some heavy lifting like kernel_fpu_begin()
327 if (read_cr0() & X86_CR0_TS) {
335 static inline void irq_ts_restore(int TS_state)
342 * These disable preemption on their own and are safe
344 static inline void save_init_fpu(struct task_struct *tsk)
347 __save_init_fpu(tsk);
352 static inline void unlazy_fpu(struct task_struct *tsk)
359 static inline void clear_fpu(struct task_struct *tsk)
367 * i387 state interaction
369 static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
372 return tsk->thread.fpu.state->fxsave.cwd;
374 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
378 static inline unsigned short get_fpu_swd(struct task_struct *tsk)
381 return tsk->thread.fpu.state->fxsave.swd;
383 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
387 static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
390 return tsk->thread.fpu.state->fxsave.mxcsr;
392 return MXCSR_DEFAULT;
396 static bool fpu_allocated(struct fpu *fpu)
398 return fpu->state != NULL;
401 static inline int fpu_alloc(struct fpu *fpu)
403 if (fpu_allocated(fpu))
405 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
408 WARN_ON((unsigned long)fpu->state & 15);
412 static inline void fpu_free(struct fpu *fpu)
415 kmem_cache_free(task_xstate_cachep, fpu->state);
420 static inline void fpu_copy(struct fpu *dst, struct fpu *src)
422 memcpy(dst->state, src->state, xstate_size);
425 extern void fpu_finit(struct fpu *fpu);
427 #endif /* __ASSEMBLY__ */
429 #endif /* _ASM_X86_I387_H */