]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'x86/fpu'
authorIngo Molnar <mingo@kernel.org>
Tue, 9 Feb 2016 16:03:06 +0000 (17:03 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 9 Feb 2016 16:03:06 +0000 (17:03 +0100)
1  2 
arch/x86/include/asm/fpu/internal.h
arch/x86/kernel/traps.c

index c2e46eb96b6d8449c66482005dcad47ccf88e7f4,a1f78a9fbf41808b131e2f05427867d80b8b66af..a2124343edf5448fa06eece0015729505cf93412
@@@ -17,7 -17,6 +17,7 @@@
  #include <asm/user.h>
  #include <asm/fpu/api.h>
  #include <asm/fpu/xstate.h>
 +#include <asm/cpufeature.h>
  
  /*
   * High level FPU state handling functions:
@@@ -59,22 -58,22 +59,22 @@@ extern u64 fpu__get_supported_xfeatures
   */
  static __always_inline __pure bool use_eager_fpu(void)
  {
 -      return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
 +      return static_cpu_has(X86_FEATURE_EAGER_FPU);
  }
  
  static __always_inline __pure bool use_xsaveopt(void)
  {
 -      return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
 +      return static_cpu_has(X86_FEATURE_XSAVEOPT);
  }
  
  static __always_inline __pure bool use_xsave(void)
  {
 -      return static_cpu_has_safe(X86_FEATURE_XSAVE);
 +      return static_cpu_has(X86_FEATURE_XSAVE);
  }
  
  static __always_inline __pure bool use_fxsr(void)
  {
 -      return static_cpu_has_safe(X86_FEATURE_FXSR);
 +      return static_cpu_has(X86_FEATURE_FXSR);
  }
  
  /*
@@@ -301,7 -300,7 +301,7 @@@ static inline void copy_xregs_to_kernel
  
        WARN_ON(system_state != SYSTEM_BOOTING);
  
 -      if (static_cpu_has_safe(X86_FEATURE_XSAVES))
 +      if (static_cpu_has(X86_FEATURE_XSAVES))
                XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
        else
                XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
@@@ -323,7 -322,7 +323,7 @@@ static inline void copy_kernel_to_xregs
  
        WARN_ON(system_state != SYSTEM_BOOTING);
  
 -      if (static_cpu_has_safe(X86_FEATURE_XSAVES))
 +      if (static_cpu_has(X86_FEATURE_XSAVES))
                XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
        else
                XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
@@@ -461,7 -460,7 +461,7 @@@ static inline void copy_kernel_to_fpreg
         * pending. Clear the x87 state here by setting it to fixed values.
         * "m" is a random variable that should be in L1.
         */
 -      if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
 +      if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
                asm volatile(
                        "fnclex\n\t"
                        "emms\n\t"
@@@ -590,7 -589,8 +590,8 @@@ switch_fpu_prepare(struct fpu *old_fpu
         * If the task has used the math, pre-load the FPU on xsave processors
         * or if the past 5 consecutive context-switches used math.
         */
-       fpu.preload = new_fpu->fpstate_active &&
+       fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
+                     new_fpu->fpstate_active &&
                      (use_eager_fpu() || new_fpu->counter > 5);
  
        if (old_fpu->fpregs_active) {
diff --combined arch/x86/kernel/traps.c
index 410e8e2700c5813c99975125ce3906efc0f828a3,36a9c017540e408bea31b6d80dd077d9ccbfd973..1e630d1b7ad9c1ce08be101aaa3e14d68951906a
@@@ -83,16 -83,30 +83,16 @@@ gate_desc idt_table[NR_VECTORS] __page_
  DECLARE_BITMAP(used_vectors, NR_VECTORS);
  EXPORT_SYMBOL_GPL(used_vectors);
  
 -static inline void conditional_sti(struct pt_regs *regs)
 +static inline void cond_local_irq_enable(struct pt_regs *regs)
  {
        if (regs->flags & X86_EFLAGS_IF)
                local_irq_enable();
  }
  
 -static inline void preempt_conditional_sti(struct pt_regs *regs)
 -{
 -      preempt_count_inc();
 -      if (regs->flags & X86_EFLAGS_IF)
 -              local_irq_enable();
 -}
 -
 -static inline void conditional_cli(struct pt_regs *regs)
 -{
 -      if (regs->flags & X86_EFLAGS_IF)
 -              local_irq_disable();
 -}
 -
 -static inline void preempt_conditional_cli(struct pt_regs *regs)
 +static inline void cond_local_irq_disable(struct pt_regs *regs)
  {
        if (regs->flags & X86_EFLAGS_IF)
                local_irq_disable();
 -      preempt_count_dec();
  }
  
  void ist_enter(struct pt_regs *regs)
@@@ -272,7 -286,7 +272,7 @@@ static void do_error_trap(struct pt_reg
  
        if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
                        NOTIFY_STOP) {
 -              conditional_sti(regs);
 +              cond_local_irq_enable(regs);
                do_trap(trapnr, signr, str, regs, error_code,
                        fill_trap_info(regs, signr, trapnr, &info));
        }
@@@ -354,7 -368,7 +354,7 @@@ dotraplinkage void do_bounds(struct pt_
        if (notify_die(DIE_TRAP, "bounds", regs, error_code,
                        X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
                return;
 -      conditional_sti(regs);
 +      cond_local_irq_enable(regs);
  
        if (!user_mode(regs))
                die("bounds", regs, error_code);
@@@ -429,7 -443,7 +429,7 @@@ do_general_protection(struct pt_regs *r
        struct task_struct *tsk;
  
        RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 -      conditional_sti(regs);
 +      cond_local_irq_enable(regs);
  
        if (v8086_mode(regs)) {
                local_irq_enable();
@@@ -503,11 -517,9 +503,11 @@@ dotraplinkage void notrace do_int3(stru
         * as we may switch to the interrupt stack.
         */
        debug_stack_usage_inc();
 -      preempt_conditional_sti(regs);
 +      preempt_disable();
 +      cond_local_irq_enable(regs);
        do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
 -      preempt_conditional_cli(regs);
 +      cond_local_irq_disable(regs);
 +      preempt_enable_no_resched();
        debug_stack_usage_dec();
  exit:
        ist_exit(regs);
@@@ -636,14 -648,12 +636,14 @@@ dotraplinkage void do_debug(struct pt_r
        debug_stack_usage_inc();
  
        /* It's safe to allow irq's after DR6 has been saved */
 -      preempt_conditional_sti(regs);
 +      preempt_disable();
 +      cond_local_irq_enable(regs);
  
        if (v8086_mode(regs)) {
                handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
                                        X86_TRAP_DB);
 -              preempt_conditional_cli(regs);
 +              cond_local_irq_disable(regs);
 +              preempt_enable_no_resched();
                debug_stack_usage_dec();
                goto exit;
        }
        si_code = get_si_code(tsk->thread.debugreg6);
        if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
                send_sigtrap(tsk, regs, error_code, si_code);
 -      preempt_conditional_cli(regs);
 +      cond_local_irq_disable(regs);
 +      preempt_enable_no_resched();
        debug_stack_usage_dec();
  
  exit:
@@@ -687,7 -696,7 +687,7 @@@ static void math_error(struct pt_regs *
  
        if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
                return;
 -      conditional_sti(regs);
 +      cond_local_irq_enable(regs);
  
        if (!user_mode(regs)) {
                if (!fixup_exception(regs)) {
@@@ -734,20 -743,19 +734,19 @@@ do_simd_coprocessor_error(struct pt_reg
  dotraplinkage void
  do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
  {
 -      conditional_sti(regs);
 +      cond_local_irq_enable(regs);
  }
  
  dotraplinkage void
  do_device_not_available(struct pt_regs *regs, long error_code)
  {
        RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
-       BUG_ON(use_eager_fpu());
  
  #ifdef CONFIG_MATH_EMULATION
-       if (read_cr0() & X86_CR0_EM) {
+       if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
                struct math_emu_info info = { };
  
 -              conditional_sti(regs);
 +              cond_local_irq_enable(regs);
  
                info.regs = regs;
                math_emulate(&info);
  #endif
        fpu__restore(&current->thread.fpu); /* interrupts still off */
  #ifdef CONFIG_X86_32
 -      conditional_sti(regs);
 +      cond_local_irq_enable(regs);
  #endif
  }
  NOKPROBE_SYMBOL(do_device_not_available);