1 #include <linux/init.h>
2 #include <linux/linkage.h>
4 #include <asm/assembler.h>
5 #include <asm/asm-offsets.h>
7 #include <asm/thread_info.h>
12 #define BAD_PREFETCH 0
14 #define BAD_ADDREXCPTN 2
16 #define BAD_UNDEFINSTR 4
19 @ Most of the stack format comes from struct pt_regs, but with
20 @ the addition of 8 bytes for storing syscall args 5 and 6.
21 @ This _must_ remain a multiple of 8 for EABI.
26 * The SWI code relies on the fact that R0 is at the bottom of the stack
27 * (due to slow/fast restore user regs).
34 #ifdef CONFIG_FRAME_POINTER
39 .macro alignment_trap, rtemp
40 #ifdef CONFIG_ALIGNMENT_TRAP
41 ldr \rtemp, .LCcralign
43 mcr p15, 0, \rtemp, c1, c0
49 * ARMv7-M exception entry/exit macros.
51 * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
52 * automatically saved on the current stack (32 words) before
53 * switching to the exception stack (SP_main).
55 * If exception is taken while in user mode, SP_main is
56 * empty. Otherwise, SP_main is aligned to 64 bit automatically
59 * Linux assumes that the interrupts are disabled when entering an
60 * exception handler and it may BUG if this is not the case. Interrupts
61 * are disabled during entry and reenabled in the exit macro.
63 * v7m_exception_fast_exit is used when returning from interrupts.
65 * v7m_exception_slow_exit is used when returning from SVC or PendSV.
66 * When returning to kernel mode, we don't return from exception.
68 .macro v7m_exception_entry
69 @ determine the location of the registers saved by the core during
70 @ exception entry. Depending on the mode the cpu was in when the
71 @ exception happend that is either on the main or the process stack.
72 @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
78 @ we cannot rely on r0-r3 and r12 matching the value saved in the
79 @ exception frame because of tail-chaining. So these have to be
83 @ Linux expects to have irqs off. Do it here before taking stack space
86 sub sp, #S_FRAME_SIZE-S_IP
89 @ load saved r12, lr, return address and xPSR.
90 @ r0-r7 are used for signals and never touched from now on. Clobbering
93 ldmia r9!, {r8, r10-r12}
95 @ calculate the original stack pointer value.
96 @ r9 currently points to the memory location just above the auto saved
97 @ xPSR. If the FP extension is implemented and bit 4 of EXC_RETURN is 0
98 @ then space was allocated for FP state. That is space for 18 32-bit
99 @ values. (If FP extension is unimplemented, bit 4 is 1.)
100 @ Additionally the cpu might automatically 8-byte align the stack. Bit 9
101 @ of the saved xPSR specifies if stack aligning took place. In this case
102 @ another 32-bit value is included in the stack.
110 @ store saved r12 using str to have a register to hold the base for stm
113 @ store r13-r15, xPSR
115 @ store r0 once more and EXC_RETURN
119 .macro v7m_exception_fast_exit
120 @ registers r0-r3 and r12 are automatically restored on exception
121 @ return. r4-r7 were not clobbered in v7m_exception_entry so for
122 @ correctness they don't need to be restored. So only r8-r11 must be
123 @ restored here. The easiest way to do so is to restore r0-r7, too.
125 add sp, #S_FRAME_SIZE-S_IP
130 .macro v7m_exception_slow_exit ret_r0
132 ldr lr, [sp, #S_EXC_RET] @ read exception LR
134 bne 1f @ go to thread mode using exception return
137 * return to kernel thread
138 * sp is already set up (and might be unset in pt_regs), so only
139 * restore r0-r12 and pc
143 add sp, sp, #S_FRAME_SIZE
148 * return to userspace
151 @ read original r12, sp, lr, pc and xPSR
155 @ handle stack aligning
159 @ skip over stack space for fp saving
163 @ write basic exception frame
164 stmdb r2!, {r1, r3-r5}
165 ldmia sp, {r1, r3-r5}
167 stmdb r2!, {r0, r3-r5}
169 stmdb r2!, {r1, r3-r5}
175 @ restore original r4-r11
179 add sp, sp, #S_FRAME_SIZE-S_IP
184 #endif /* CONFIG_CPU_V7M */
187 @ Store/load the USER SP and LR registers by switching to the SYS
188 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
189 @ available. Should only be called from SVC mode
191 .macro store_user_sp_lr, rd, rtemp, offset = 0
193 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
194 msr cpsr_c, \rtemp @ switch to the SYS mode
196 str sp, [\rd, #\offset] @ save sp_usr
197 str lr, [\rd, #\offset + 4] @ save lr_usr
199 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
200 msr cpsr_c, \rtemp @ switch back to the SVC mode
203 .macro load_user_sp_lr, rd, rtemp, offset = 0
205 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
206 msr cpsr_c, \rtemp @ switch to the SYS mode
208 ldr sp, [\rd, #\offset] @ load sp_usr
209 ldr lr, [\rd, #\offset + 4] @ load lr_usr
211 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
212 msr cpsr_c, \rtemp @ switch back to the SVC mode
215 #ifndef CONFIG_THUMB2_KERNEL
216 .macro svc_exit, rpsr
218 #if defined(CONFIG_CPU_V6)
220 strex r1, r2, [sp] @ clear the exclusive monitor
221 ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
222 #elif defined(CONFIG_CPU_32v6K)
223 clrex @ clear the exclusive monitor
224 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
226 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
230 .macro restore_user_regs, fast = 0, offset = 0
231 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
232 ldr lr, [sp, #\offset + S_PC]! @ get pc
233 msr spsr_cxsf, r1 @ save in spsr_svc
234 #if defined(CONFIG_CPU_V6)
235 strex r1, r2, [sp] @ clear the exclusive monitor
236 #elif defined(CONFIG_CPU_32v6K)
237 clrex @ clear the exclusive monitor
240 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
242 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
244 mov r0, r0 @ ARMv5T and earlier require a nop
246 add sp, sp, #S_FRAME_SIZE - S_PC
247 movs pc, lr @ return & move spsr_svc into cpsr
250 .macro get_thread_info, rd
252 mov \rd, \rd, lsl #13
256 @ 32-bit wide "mov pc, reg"
261 #else /* CONFIG_THUMB2_KERNEL */
262 .macro svc_exit, rpsr
263 ldr lr, [sp, #S_SP] @ top of the stack
264 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
265 clrex @ clear the exclusive monitor
266 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
273 #ifdef CONFIG_CPU_V7M
274 .macro restore_user_regs, fast = 0, offset = 0
278 v7m_exception_slow_exit ret_r0 = \fast
280 #else /* !CONFIG_CPU_V7M */
281 .macro restore_user_regs, fast = 0, offset = 0
282 clrex @ clear the exclusive monitor
284 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
285 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
286 ldr lr, [sp, #\offset + S_PC] @ get pc
287 add sp, sp, #\offset + S_SP
288 msr spsr_cxsf, r1 @ save in spsr_svc
290 ldmdb sp, {r1 - r12} @ get calling r1 - r12
292 ldmdb sp, {r0 - r12} @ get calling r0 - r12
294 add sp, sp, #S_FRAME_SIZE - S_SP
295 movs pc, lr @ return & move spsr_svc into cpsr
297 #endif /* CONFIG_CPU_V7M */
299 .macro get_thread_info, rd
302 mov \rd, \rd, lsl #13
306 @ 32-bit wide "mov pc, reg"
312 #endif /* !CONFIG_THUMB2_KERNEL */
315 * These are the registers used in the syscall handler, and allow us to
316 * have in theory up to 7 arguments to a function - r0 to r6.
318 * r7 is reserved for the system call number for thumb mode.
320 * Note that tbl == why is intentional.
322 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
324 scno .req r7 @ syscall number
325 tbl .req r8 @ syscall table pointer
326 why .req r8 @ Linux syscall (!= 0)
327 tsk .req r9 @ current thread_info