]> git.karo-electronics.de Git - linux-beck.git/commitdiff
MIPS: disable preemption whilst initialising MSA
authorPaul Burton <paul.burton@imgtec.com>
Fri, 11 Jul 2014 15:44:35 +0000 (16:44 +0100)
committerRalf Baechle <ralf@linux-mips.org>
Fri, 1 Aug 2014 22:06:44 +0000 (00:06 +0200)
Preemption must be disabled throughout the process of enabling the FPU,
enabling MSA & initialising the vector registers. Without doing so it
is possible to lose the FPU or MSA whilst initialising them causing
that initialisation to fail.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/7307/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/include/asm/fpu.h
arch/mips/kernel/traps.c

index 71d97ebd9090d68cb503ce4e7a1d08486c7b0bab..4d0aeda6839741ea27f956e9049958cdab297b56 100644 (file)
@@ -164,8 +164,6 @@ static inline int init_fpu(void)
 {
        int ret = 0;
 
-       preempt_disable();
-
        if (cpu_has_fpu) {
                ret = __own_fpu();
                if (!ret)
@@ -173,8 +171,6 @@ static inline int init_fpu(void)
        } else
                fpu_emulator_init_fpu();
 
-       preempt_enable();
-
        return ret;
 }
 
index 4716b89543a9f87984f7e52891d76ca07d86cada..22b19c2750447418d5133df5d2d01f40e4b5ca85 100644 (file)
@@ -1093,6 +1093,7 @@ static int enable_restore_fp_context(int msa)
 
        if (!used_math()) {
                /* First time FP context user. */
+               preempt_disable();
                err = init_fpu();
                if (msa && !err) {
                        enable_msa();
@@ -1100,6 +1101,7 @@ static int enable_restore_fp_context(int msa)
                        set_thread_flag(TIF_USEDMSA);
                        set_thread_flag(TIF_MSA_CTX_LIVE);
                }
+               preempt_enable();
                if (!err)
                        set_used_math();
                return err;
@@ -1139,10 +1141,11 @@ static int enable_restore_fp_context(int msa)
         * This task is using or has previously used MSA. Thus we require
         * that Status.FR == 1.
         */
+       preempt_disable();
        was_fpu_owner = is_fpu_owner();
-       err = own_fpu(0);
+       err = own_fpu_inatomic(0);
        if (err)
-               return err;
+               goto out;
 
        enable_msa();
        write_msa_csr(current->thread.fpu.msacsr);
@@ -1158,7 +1161,8 @@ static int enable_restore_fp_context(int msa)
        prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
        if (!prior_msa && was_fpu_owner) {
                _init_msa_upper();
-               return 0;
+
+               goto out;
        }
 
        if (!prior_msa) {
@@ -1182,6 +1186,10 @@ static int enable_restore_fp_context(int msa)
                if (!was_fpu_owner)
                        asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31));
        }
+
+out:
+       preempt_enable();
+
        return 0;
 }