2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/regset.h>
10 #include <asm/fpu/signal.h>
11 #include <asm/fpu/types.h>
12 #include <asm/traps.h>
14 #include <linux/hardirq.h>
15 #include <linux/pkeys.h>
17 #define CREATE_TRACE_POINTS
18 #include <asm/trace/fpu.h>
21 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
22 * depending on the FPU hardware format:
24 union fpregs_state init_fpstate __read_mostly;
27 * Track whether the kernel is using the FPU state
32 * - by IRQ context code to potentially use the FPU
35 * - to debug kernel_fpu_begin()/end() correctness
37 static DEFINE_PER_CPU(bool, in_kernel_fpu);
40 * Track which context is using the FPU on the CPU:
42 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
44 static void kernel_fpu_disable(void)
46 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
47 this_cpu_write(in_kernel_fpu, true);
50 static void kernel_fpu_enable(void)
52 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
53 this_cpu_write(in_kernel_fpu, false);
56 static bool kernel_fpu_disabled(void)
58 return this_cpu_read(in_kernel_fpu);
61 static bool interrupted_kernel_fpu_idle(void)
63 return !kernel_fpu_disabled();
67 * Were we in user mode (or vm86 mode) when we were
70 * Doing kernel_fpu_begin/end() is ok if we are running
71 * in an interrupt context from user mode - we'll just
72 * save the FPU state as required.
74 static bool interrupted_user_mode(void)
76 struct pt_regs *regs = get_irq_regs();
77 return regs && user_mode(regs);
81 * Can we use the FPU in kernel mode with the
82 * whole "kernel_fpu_begin/end()" sequence?
84 * It's always ok in process context (ie "not interrupt")
85 * but it is sometimes ok even from an irq.
87 bool irq_fpu_usable(void)
89 return !in_interrupt() ||
90 interrupted_user_mode() ||
91 interrupted_kernel_fpu_idle();
93 EXPORT_SYMBOL(irq_fpu_usable);
95 void __kernel_fpu_begin(void)
97 struct fpu *fpu = ¤t->thread.fpu;
99 WARN_ON_FPU(!irq_fpu_usable());
101 kernel_fpu_disable();
103 if (fpu->fpregs_active) {
105 * Ignore return value -- we don't care if reg state
108 copy_fpregs_to_fpstate(fpu);
110 __cpu_invalidate_fpregs_state();
113 EXPORT_SYMBOL(__kernel_fpu_begin);
115 void __kernel_fpu_end(void)
117 struct fpu *fpu = ¤t->thread.fpu;
119 if (fpu->fpregs_active)
120 copy_kernel_to_fpregs(&fpu->state);
124 EXPORT_SYMBOL(__kernel_fpu_end);
126 void kernel_fpu_begin(void)
129 __kernel_fpu_begin();
131 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
133 void kernel_fpu_end(void)
138 EXPORT_SYMBOL_GPL(kernel_fpu_end);
141 * Save the FPU state (mark it for reload if necessary):
143 * This only ever gets called for the current task.
145 void fpu__save(struct fpu *fpu)
147 WARN_ON_FPU(fpu != ¤t->thread.fpu);
150 trace_x86_fpu_before_save(fpu);
151 if (fpu->fpregs_active) {
152 if (!copy_fpregs_to_fpstate(fpu)) {
153 copy_kernel_to_fpregs(&fpu->state);
156 trace_x86_fpu_after_save(fpu);
159 EXPORT_SYMBOL_GPL(fpu__save);
162 * Legacy x87 fpstate state init:
164 static inline void fpstate_init_fstate(struct fregs_state *fp)
166 fp->cwd = 0xffff037fu;
167 fp->swd = 0xffff0000u;
168 fp->twd = 0xffffffffu;
169 fp->fos = 0xffff0000u;
172 void fpstate_init(union fpregs_state *state)
174 if (!static_cpu_has(X86_FEATURE_FPU)) {
175 fpstate_init_soft(&state->soft);
179 memset(state, 0, fpu_kernel_xstate_size);
182 * XRSTORS requires that this bit is set in xcomp_bv, or
183 * it will #GP. Make sure it is replaced after the memset().
185 if (static_cpu_has(X86_FEATURE_XSAVES))
186 state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
188 if (static_cpu_has(X86_FEATURE_FXSR))
189 fpstate_init_fxstate(&state->fxsave);
191 fpstate_init_fstate(&state->fsave);
193 EXPORT_SYMBOL_GPL(fpstate_init);
195 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
197 dst_fpu->fpregs_active = 0;
198 dst_fpu->last_cpu = -1;
200 if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
203 WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
206 * Don't let 'init optimized' areas of the XSAVE area
207 * leak into the child task:
209 memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
212 * Save current FPU registers directly into the child
213 * FPU context, without any memory-to-memory copying.
214 * In lazy mode, if the FPU context isn't loaded into
215 * fpregs, CR0.TS will be set and do_device_not_available
216 * will load the FPU context.
218 * We have to do all this with preemption disabled,
219 * mostly because of the FNSAVE case, because in that
220 * case we must not allow preemption in the window
221 * between the FNSAVE and us marking the context lazy.
223 * It shouldn't be an issue as even FNSAVE is plenty
224 * fast in terms of critical section length.
227 if (!copy_fpregs_to_fpstate(dst_fpu)) {
228 memcpy(&src_fpu->state, &dst_fpu->state,
229 fpu_kernel_xstate_size);
231 copy_kernel_to_fpregs(&src_fpu->state);
235 trace_x86_fpu_copy_src(src_fpu);
236 trace_x86_fpu_copy_dst(dst_fpu);
242 * Activate the current task's in-memory FPU context,
243 * if it has not been used before:
245 void fpu__activate_curr(struct fpu *fpu)
247 WARN_ON_FPU(fpu != ¤t->thread.fpu);
249 if (!fpu->fpstate_active) {
250 fpstate_init(&fpu->state);
251 trace_x86_fpu_init_state(fpu);
253 trace_x86_fpu_activate_state(fpu);
254 /* Safe to do for the current task: */
255 fpu->fpstate_active = 1;
258 EXPORT_SYMBOL_GPL(fpu__activate_curr);
261 * This function must be called before we read a task's fpstate.
263 * If the task has not used the FPU before then initialize its
266 * If the task has used the FPU before then save it.
268 void fpu__activate_fpstate_read(struct fpu *fpu)
271 * If fpregs are active (in the current CPU), then
272 * copy them to the fpstate:
274 if (fpu->fpregs_active) {
277 if (!fpu->fpstate_active) {
278 fpstate_init(&fpu->state);
279 trace_x86_fpu_init_state(fpu);
281 trace_x86_fpu_activate_state(fpu);
282 /* Safe to do for current and for stopped child tasks: */
283 fpu->fpstate_active = 1;
289 * This function must be called before we write a task's fpstate.
291 * If the task has used the FPU before then unlazy it.
292 * If the task has not used the FPU before then initialize its fpstate.
294 * After this function call, after registers in the fpstate are
295 * modified and the child task has woken up, the child task will
296 * restore the modified FPU state from the modified context. If we
297 * didn't clear its lazy status here then the lazy in-registers
298 * state pending on its former CPU could be restored, corrupting
301 void fpu__activate_fpstate_write(struct fpu *fpu)
304 * Only stopped child tasks can be used to modify the FPU
305 * state in the fpstate buffer:
307 WARN_ON_FPU(fpu == ¤t->thread.fpu);
309 if (fpu->fpstate_active) {
310 /* Invalidate any lazy state: */
311 __fpu_invalidate_fpregs_state(fpu);
313 fpstate_init(&fpu->state);
314 trace_x86_fpu_init_state(fpu);
316 trace_x86_fpu_activate_state(fpu);
317 /* Safe to do for stopped child tasks: */
318 fpu->fpstate_active = 1;
323 * This function must be called before we write the current
326 * This call gets the current FPU register state and moves
327 * it in to the 'fpstate'. Preemption is disabled so that
328 * no writes to the 'fpstate' can occur from context
331 * Must be followed by a fpu__current_fpstate_write_end().
333 void fpu__current_fpstate_write_begin(void)
335 struct fpu *fpu = ¤t->thread.fpu;
338 * Ensure that the context-switching code does not write
339 * over the fpstate while we are doing our update.
344 * Move the fpregs in to the fpu's 'fpstate'.
346 fpu__activate_fpstate_read(fpu);
349 * The caller is about to write to 'fpu'. Ensure that no
350 * CPU thinks that its fpregs match the fpstate. This
351 * ensures we will not be lazy and skip a XRSTOR in the
354 __fpu_invalidate_fpregs_state(fpu);
358 * This function must be paired with fpu__current_fpstate_write_begin()
360 * This will ensure that the modified fpstate gets placed back in
361 * the fpregs if necessary.
363 * Note: This function may be called whether or not an _actual_
364 * write to the fpstate occurred.
366 void fpu__current_fpstate_write_end(void)
368 struct fpu *fpu = ¤t->thread.fpu;
371 * 'fpu' now has an updated copy of the state, but the
372 * registers may still be out of date. Update them with
373 * an XRSTOR if they are active.
376 copy_kernel_to_fpregs(&fpu->state);
379 * Our update is done and the fpregs/fpstate are in sync
380 * if necessary. Context switches can happen again.
386 * 'fpu__restore()' is called to copy FPU registers from
387 * the FPU fpstate to the live hw registers and to activate
388 * access to the hardware registers, so that FPU instructions
389 * can be used afterwards.
391 * Must be called with kernel preemption disabled (for example
392 * with local interrupts disabled, as it is in the case of
393 * do_device_not_available()).
395 void fpu__restore(struct fpu *fpu)
397 fpu__activate_curr(fpu);
399 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
400 kernel_fpu_disable();
401 trace_x86_fpu_before_restore(fpu);
402 fpregs_activate(fpu);
403 copy_kernel_to_fpregs(&fpu->state);
404 trace_x86_fpu_after_restore(fpu);
407 EXPORT_SYMBOL_GPL(fpu__restore);
410 * Drops current FPU state: deactivates the fpregs and
411 * the fpstate. NOTE: it still leaves previous contents
412 * in the fpregs in the eager-FPU case.
414 * This function can be used in cases where we know that
415 * a state-restore is coming: either an explicit one,
418 void fpu__drop(struct fpu *fpu)
422 if (fpu->fpregs_active) {
423 /* Ignore delayed exceptions from user space */
424 asm volatile("1: fwait\n"
426 _ASM_EXTABLE(1b, 2b));
427 fpregs_deactivate(fpu);
430 fpu->fpstate_active = 0;
432 trace_x86_fpu_dropped(fpu);
438 * Clear FPU registers by setting them up from
441 static inline void copy_init_fpstate_to_fpregs(void)
444 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
445 else if (static_cpu_has(X86_FEATURE_FXSR))
446 copy_kernel_to_fxregs(&init_fpstate.fxsave);
448 copy_kernel_to_fregs(&init_fpstate.fsave);
450 if (boot_cpu_has(X86_FEATURE_OSPKE))
451 copy_init_pkru_to_fpregs();
455 * Clear the FPU state back to init state.
457 * Called by sys_execve(), by the signal handler code and by various
460 void fpu__clear(struct fpu *fpu)
462 WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
467 * Make sure fpstate is cleared and initialized.
469 if (static_cpu_has(X86_FEATURE_FPU)) {
470 fpu__activate_curr(fpu);
472 copy_init_fpstate_to_fpregs();
477 * x87 math exception handling:
480 int fpu__exception_code(struct fpu *fpu, int trap_nr)
484 if (trap_nr == X86_TRAP_MF) {
485 unsigned short cwd, swd;
487 * (~cwd & swd) will mask out exceptions that are not set to unmasked
488 * status. 0x3f is the exception bits in these regs, 0x200 is the
489 * C1 reg you need in case of a stack fault, 0x040 is the stack
490 * fault bit. We should only be taking one exception at a time,
491 * so if this combination doesn't produce any single exception,
492 * then we have a bad program that isn't synchronizing its FPU usage
493 * and it will suffer the consequences since we won't be able to
494 * fully reproduce the context of the exception.
496 if (boot_cpu_has(X86_FEATURE_FXSR)) {
497 cwd = fpu->state.fxsave.cwd;
498 swd = fpu->state.fxsave.swd;
500 cwd = (unsigned short)fpu->state.fsave.cwd;
501 swd = (unsigned short)fpu->state.fsave.swd;
507 * The SIMD FPU exceptions are handled a little differently, as there
508 * is only a single status/control register. Thus, to determine which
509 * unmasked exception was caught we must mask the exception mask bits
510 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
512 unsigned short mxcsr = MXCSR_DEFAULT;
514 if (boot_cpu_has(X86_FEATURE_XMM))
515 mxcsr = fpu->state.fxsave.mxcsr;
517 err = ~(mxcsr >> 7) & mxcsr;
520 if (err & 0x001) { /* Invalid op */
522 * swd & 0x240 == 0x040: Stack Underflow
523 * swd & 0x240 == 0x240: Stack Overflow
524 * User must clear the SF bit (0x40) if set
527 } else if (err & 0x004) { /* Divide by Zero */
529 } else if (err & 0x008) { /* Overflow */
531 } else if (err & 0x012) { /* Denormal, Underflow */
533 } else if (err & 0x020) { /* Precision */
538 * If we're using IRQ 13, or supposedly even some trap
539 * X86_TRAP_MF implementations, it's possible
540 * we get a spurious trap, which is not an error.