]> git.karo-electronics.de Git - linux-beck.git/commitdiff
powerpc: Move part of giveup_vsx into c
authorAnton Blanchard <anton@samba.org>
Thu, 29 Oct 2015 00:44:02 +0000 (11:44 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 1 Dec 2015 02:52:25 +0000 (13:52 +1100)
Move the MSR modification into c. Removing it from the assembly
function will allow us to avoid costly MSR writes by batching them
up.

Check the FP and VMX bits before calling the relevant giveup_*()
function. This makes giveup_vsx() and flush_vsx_to_thread() perform
more like their sister functions, and allows us to use
flush_vsx_to_thread() in the signal code.

Move the check_if_tm_restore_required() check in.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/kernel/process.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/vector.S

index 6bcf82bed6107d660e1a0915c86b3a39894531fa..0cb627662ded11ab23a096238e65a77037c9e3b0 100644 (file)
@@ -205,6 +205,25 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
+void giveup_vsx(struct task_struct *tsk)
+{
+       u64 oldmsr = mfmsr();
+       u64 newmsr;
+
+       check_if_tm_restore_required(tsk);
+
+       newmsr = oldmsr | (MSR_FP|MSR_VEC|MSR_VSX);
+       if (oldmsr != newmsr)
+               mtmsr_isync(newmsr);
+
+       if (tsk->thread.regs->msr & MSR_FP)
+               __giveup_fpu(tsk);
+       if (tsk->thread.regs->msr & MSR_VEC)
+               __giveup_altivec(tsk);
+       __giveup_vsx(tsk);
+}
+EXPORT_SYMBOL(giveup_vsx);
+
 void enable_kernel_vsx(void)
 {
        WARN_ON(preemptible());
@@ -220,15 +239,6 @@ void enable_kernel_vsx(void)
 }
 EXPORT_SYMBOL(enable_kernel_vsx);
 
-void giveup_vsx(struct task_struct *tsk)
-{
-       check_if_tm_restore_required(tsk);
-       giveup_fpu(tsk);
-       giveup_altivec(tsk);
-       __giveup_vsx(tsk);
-}
-EXPORT_SYMBOL(giveup_vsx);
-
 void flush_vsx_to_thread(struct task_struct *tsk)
 {
        if (tsk->thread.regs) {
index 3cd7a32c8ff4f93e4b23230514e8e78e62691485..4022cbb7e2d6c7ce46d074384b8227d673885cb7 100644 (file)
@@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
         * contains valid data
         */
        if (current->thread.used_vsr && ctx_has_vsx_region) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                if (copy_vsx_to_user(&frame->mc_vsregs, current))
                        return 1;
                msr |= MSR_VSX;
@@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs,
         * contains valid data
         */
        if (current->thread.used_vsr) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                if (copy_vsx_to_user(&frame->mc_vsregs, current))
                        return 1;
                if (msr & MSR_VSX) {
index 6f2b555516e6c1f8e441e984a36184eeaf590d6c..3b2339912911684a7e0f1d497370d398d9a49683 100644 (file)
@@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
         * VMX data.
         */
        if (current->thread.used_vsr && ctx_has_vsx_region) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                v_regs += ELF_NVRREG;
                err |= copy_vsx_to_user(v_regs, current);
                /* set MSR_VSX in the MSR value in the frame to
@@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
         * VMX data.
         */
        if (current->thread.used_vsr) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                v_regs += ELF_NVRREG;
                tm_v_regs += ELF_NVRREG;
 
index 6e925b40a484ebf034fad75eb4848f72adf99a4f..98675b08efe2b9eb2eba2a3fd3526b85f355eba9 100644 (file)
@@ -177,14 +177,8 @@ _GLOBAL(load_up_vsx)
  * __giveup_vsx(tsk)
  * Disable VSX for the task given as the argument.
  * Does NOT save vsx registers.
- * Enables the VSX for use in the kernel on return.
  */
 _GLOBAL(__giveup_vsx)
-       mfmsr   r5
-       oris    r5,r5,MSR_VSX@h
-       mtmsrd  r5                      /* enable use of VSX now */
-       isync
-
        addi    r3,r3,THREAD            /* want THREAD of task */
        ld      r5,PT_REGS(r3)
        cmpdi   0,r5,0