]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/powerpc/kernel/head_64.S
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[karo-tx-linux.git] / arch / powerpc / kernel / head_64.S
index 25e84c0e116695e37d416296e34eb6f8cfc28ba6..cc8fb474d5204ed3c2cf2c48bd754b1fdf3758c9 100644 (file)
@@ -275,7 +275,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
        . = 0xf00
        b       performance_monitor_pSeries
 
-       STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
+       . = 0xf20
+       b       altivec_unavailable_pSeries
+
+       . = 0xf40
+       b       vsx_unavailable_pSeries
 
 #ifdef CONFIG_CBE_RAS
        HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
@@ -295,6 +299,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
 
        /* moved from 0xf00 */
        STD_EXCEPTION_PSERIES(., performance_monitor)
+       STD_EXCEPTION_PSERIES(., altivec_unavailable)
+       STD_EXCEPTION_PSERIES(., vsx_unavailable)
 
 /*
  * An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -739,7 +745,8 @@ fp_unavailable_common:
        ENABLE_INTS
        bl      .kernel_fp_unavailable_exception
        BUG_OPCODE
-1:     b       .load_up_fpu
+1:     bl      .load_up_fpu
+       b       fast_exception_return
 
        .align  7
        .globl altivec_unavailable_common
@@ -747,7 +754,10 @@ altivec_unavailable_common:
        EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
 #ifdef CONFIG_ALTIVEC
 BEGIN_FTR_SECTION
-       bne     .load_up_altivec        /* if from user, just load it up */
+       beq     1f
+       bl      .load_up_altivec
+       b       fast_exception_return
+1:
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif
        bl      .save_nvgprs
@@ -827,9 +837,70 @@ _STATIC(load_up_altivec)
        std     r4,0(r3)
 #endif /* CONFIG_SMP */
        /* restore registers and return */
-       b       fast_exception_return
+       blr
 #endif /* CONFIG_ALTIVEC */
 
+       .align  7
+       .globl vsx_unavailable_common
+vsx_unavailable_common:
+       EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+       bne     .load_up_vsx
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+       bl      .save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       ENABLE_INTS
+       bl      .vsx_unavailable_exception
+       b       .ret_from_except
+
+#ifdef CONFIG_VSX
+/*
+ * load_up_vsx(unused, unused, tsk)
+ * Disable VSX for the task which had it previously,
+ * and save its vector registers in its thread_struct.
+ * Reuse the fp and vsx saves, but first check to see if they have
+ * been saved already.
+ * On entry: r13 == 'current' && last_task_used_vsx != 'current'
+ */
+_STATIC(load_up_vsx)
+/* Load FP and VSX registers if they haven't been done yet */
+       andi.   r5,r12,MSR_FP
+       beql+   load_up_fpu             /* skip if already loaded */
+       andis.  r5,r12,MSR_VEC@h
+       beql+   load_up_altivec         /* skip if already loaded */
+
+#ifndef CONFIG_SMP
+       ld      r3,last_task_used_vsx@got(r2)
+       ld      r4,0(r3)
+       cmpdi   0,r4,0
+       beq     1f
+       /* Disable VSX for last_task_used_vsx */
+       addi    r4,r4,THREAD
+       ld      r5,PT_REGS(r4)
+       ld      r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+       lis     r6,MSR_VSX@h
+       andc    r6,r4,r6
+       std     r6,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+       ld      r4,PACACURRENT(r13)
+       addi    r4,r4,THREAD            /* Get THREAD */
+       li      r6,1
+       stw     r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
+       /* enable use of VSX after return */
+       oris    r12,r12,MSR_VSX@h
+       std     r12,_MSR(r1)
+#ifndef CONFIG_SMP
+       /* Update last_task_used_math to 'current' */
+       ld      r4,PACACURRENT(r13)
+       std     r4,0(r3)
+#endif /* CONFIG_SMP */
+       b       fast_exception_return
+#endif /* CONFIG_VSX */
+
 /*
  * Hash table stuff
  */
@@ -1127,7 +1198,6 @@ _GLOBAL(generic_secondary_smp_init)
 3:     HMT_LOW
        lbz     r23,PACAPROCSTART(r13)  /* Test if this processor should */
                                        /* start.                        */
-       sync
 
 #ifndef CONFIG_SMP
        b       3b                      /* Never go on non-SMP           */
@@ -1135,6 +1205,8 @@ _GLOBAL(generic_secondary_smp_init)
        cmpwi   0,r23,0
        beq     3b                      /* Loop until told to go         */
 
+       sync                            /* order paca.run and cur_cpu_spec */
+
        /* See if we need to call a cpu state restore handler */
        LOAD_REG_IMMEDIATE(r23, cur_cpu_spec)
        ld      r23,0(r23)