1 #include <asm/processor.h>
2 #include <asm/ppc_asm.h>
4 #include <asm/asm-offsets.h>
5 #include <asm/cputable.h>
6 #include <asm/thread_info.h>
8 #include <asm/ptrace.h>
10 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11 /* void do_load_up_transact_altivec(struct thread_struct *thread)
13 * This is similar to load_up_altivec but for the transactional version of the
14 * vector regs. It doesn't mess with the task MSR or valid flags.
15 * Furthermore, VEC laziness is not supported with TM currently.
17 _GLOBAL(do_load_up_transact_altivec)
24 stw r4,THREAD_USED_VR(r3)
26 li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
29 addi r10,r3,THREAD_TRANSACT_VRSTATE
32 /* Disable VEC again. */
40 * Disable VMX for the task which had it previously,
41 * and save its vector registers in its thread_struct.
42 * Enables the VMX for use in the kernel on return.
43 * On SMP we know the VMX is free, since we give it up every
44 * switch (ie, no lazy save of the vector registers).
46 _GLOBAL(load_up_altivec)
47 mfmsr r5 /* grab the current MSR */
49 MTMSRD(r5) /* enable use of AltiVec now */
53 * For SMP, we don't do lazy VMX switching because it just gets too
54 * horrendously complex, especially when a task switches from one CPU
55 * to another. Instead we call giveup_altvec in switch_to.
56 * VRSAVE isn't dealt with here, that is done in the normal context
57 * switch code. Note that we could rely on vrsave value to eventually
58 * avoid saving all of the VREGs here...
61 LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
63 PPC_LL r4,ADDROFF(last_task_used_altivec)(r3)
67 /* Save VMX state to last_task_used_altivec's THREAD struct */
70 addi r7,r4,THREAD_VRSTATE
75 /* Disable VMX for last_task_used_altivec */
78 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
81 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
83 #endif /* CONFIG_SMP */
85 /* Hack: if we get an altivec unavailable trap with VRSAVE
86 * set to all zeros, we assume this is a broken application
87 * that fails to set it properly, and thus we switch it to
96 /* enable use of VMX after return */
98 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
101 ld r4,PACACURRENT(r13)
102 addi r5,r4,THREAD /* Get THREAD */
103 oris r12,r12,MSR_VEC@h
106 addi r7,r5,THREAD_VRSTATE
109 stw r4,THREAD_USED_VR(r5)
114 /* Update last_task_used_altivec to 'current' */
115 subi r4,r5,THREAD /* Back to 'current' */
117 PPC_STL r4,ADDROFF(last_task_used_altivec)(r3)
118 #endif /* CONFIG_SMP */
119 /* restore registers and return */
122 _GLOBAL(giveup_altivec_notask)
124 andis. r4,r3,MSR_VEC@h
125 bnelr /* Already enabled? */
128 MTMSRD(r3) /* enable use of VMX now */
133 * giveup_altivec(tsk)
134 * Disable VMX for the task given as the argument,
135 * and save the vector registers in its thread_struct.
136 * Enables the VMX for use in the kernel on return.
138 _GLOBAL(giveup_altivec)
142 MTMSRD(r5) /* enable use of VMX now */
145 beqlr /* if no previous owner, done */
146 addi r3,r3,THREAD /* want THREAD of task */
147 addi r7,r3,THREAD_VRSTATE
148 PPC_LL r5,PT_REGS(r3)
155 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
158 lis r3,(MSR_VEC|MSR_VSX)@h
161 ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
165 andc r4,r4,r3 /* disable FP for previous task */
166 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
170 LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
171 PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
172 #endif /* CONFIG_SMP */
178 #error This asm code isn't ready for 32-bit kernels
182 * load_up_vsx(unused, unused, tsk)
183 * Disable VSX for the task which had it previously,
184 * and save its vector registers in its thread_struct.
185 * Reuse the fp and vsx saves, but first check to see if they have
186 * been saved already.
189 /* Load FP and VSX registers if they haven't been done yet */
191 beql+ load_up_fpu /* skip if already loaded */
192 andis. r5,r12,MSR_VEC@h
193 beql+ load_up_altivec /* skip if already loaded */
196 ld r3,last_task_used_vsx@got(r2)
200 /* Disable VSX for last_task_used_vsx */
203 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
206 std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
208 #endif /* CONFIG_SMP */
209 ld r4,PACACURRENT(r13)
210 addi r4,r4,THREAD /* Get THREAD */
212 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
213 /* enable use of VSX after return */
214 oris r12,r12,MSR_VSX@h
217 /* Update last_task_used_vsx to 'current' */
218 ld r4,PACACURRENT(r13)
220 #endif /* CONFIG_SMP */
221 b fast_exception_return
225 * Disable VSX for the task given as the argument.
226 * Does NOT save vsx registers.
227 * Enables the VSX for use in the kernel on return.
229 _GLOBAL(__giveup_vsx)
232 mtmsrd r5 /* enable use of VSX now */
236 beqlr- /* if no previous owner, done */
237 addi r3,r3,THREAD /* want THREAD of task */
241 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
243 andc r4,r4,r3 /* disable VSX for previous task */
244 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
248 ld r4,last_task_used_vsx@got(r2)
250 #endif /* CONFIG_SMP */
253 #endif /* CONFIG_VSX */
257 * The routines below are in assembler so we can closely control the
258 * usage of floating-point registers. These routines must be called
259 * with preempt disabled.
266 .long 0x3f800000 /* 1.0 in single-precision FP */
268 .long 0x3f000000 /* 0.5 in single-precision FP */
270 #define LDCONST(fr, name) \
279 .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */
281 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
283 #define LDCONST(fr, name) \
289 * Internal routine to enable floating point and set FPSCR to 0.
290 * Don't call it from C; it doesn't use the normal calling convention.
322 * Vector add, floating point.
339 * Vector subtract, floating point.
356 * Vector multiply and add, floating point.
368 fmadds fr0,fr0,fr2,fr1
376 * Vector negative multiply and subtract, floating point.
388 fnmsubs fr0,fr0,fr2,fr1
396 * Vector reciprocal estimate. We just compute 1.0/x.
397 * r3 -> destination, r4 -> source.
414 * Vector reciprocal square-root estimate, floating point.
415 * We use the frsqrte instruction for the initial estimate followed
416 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
417 * r3 -> destination, r4 -> source.
432 frsqrte fr1,fr0 /* r = frsqrte(s) */
433 fmuls fr3,fr1,fr0 /* r * s */
434 fmuls fr2,fr1,fr5 /* r * 0.5 */
435 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
436 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
437 fmuls fr3,fr1,fr0 /* r * s */
438 fmuls fr2,fr1,fr5 /* r * 0.5 */
439 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
440 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */