2 * linux/arch/arm/kernel/entry-common.S
4 * Copyright (C) 2000 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <asm/unistd.h>
12 #include <asm/ftrace.h>
13 #include <asm/unwind.h>
15 #ifdef CONFIG_NEED_RET_TO_USER
16 #include <mach/entry-macro.S>
18 .macro arch_ret_to_user, tmp1, tmp2
22 #include "entry-header.S"
27 * This is the fast syscall return path. We do as little as
28 * possible here, and this includes saving r0 back into the SVC
34 disable_irq @ disable interrupts
35 ldr r1, [tsk, #TI_FLAGS]
36 tst r1, #_TIF_WORK_MASK
38 #if defined(CONFIG_IRQSOFF_TRACER)
42 /* perform architecture specific actions before user return */
43 arch_ret_to_user r1, lr
45 restore_user_regs fast = 1, offset = S_OFF
49 * Ok, we need to do extra processing, enter the slow path.
52 str r0, [sp, #S_R0+S_OFF]! @ returned r0
55 mov r2, why @ 'syscall'
59 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
60 ldmia sp, {r0 - r6} @ have to reload r0 - r6
61 b local_restart @ ... and off we go
64 * "slow" syscall return path. "why" tells us if this was a real syscall.
68 disable_irq @ disable interrupts
69 ENTRY(ret_to_user_from_irq)
70 ldr r1, [tsk, #TI_FLAGS]
71 tst r1, #_TIF_WORK_MASK
74 #if defined(CONFIG_IRQSOFF_TRACER)
77 /* perform architecture specific actions before user return */
78 arch_ret_to_user r1, lr
80 restore_user_regs fast = 0, offset = 0
81 ENDPROC(ret_to_user_from_irq)
85 * This is how we return from a fork.
92 ENDPROC(ret_from_fork)
95 #define CALL(x) .equ NR_syscalls,NR_syscalls+1
98 #define CALL(x) .long x
100 #ifdef CONFIG_FUNCTION_TRACER
102 * When compiling with -pg, gcc inserts a call to the mcount routine at the
103 * start of every function. In mcount, apart from the function's address (in
104 * lr), we need to get hold of the function's caller's address.
106 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
110 * These versions have the limitation that in order for the mcount routine to
111 * be able to determine the function's caller's address, an APCS-style frame
112 * pointer (which is set up with something like the code below) is required.
115 * push {fp, ip, lr, pc}
118 * With EABI, these frame pointers are not available unless -mapcs-frame is
119 * specified, and if building as Thumb-2, not even then.
121 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
122 * with call sites like:
127 * With these compilers, frame pointers are not necessary.
129 * mcount can be thought of as a function called in the middle of a subroutine
130 * call. As such, it needs to be transparent for both the caller and the
131 * callee: the original lr needs to be restored when leaving mcount, and no
132 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
133 * clobber the ip register. This is OK because the ARM calling convention
134 * allows it to be clobbered in subroutines and doesn't use it to hold
137 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
138 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
139 * arch/arm/kernel/ftrace.c).
142 #ifndef CONFIG_OLD_MCOUNT
143 #if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
144 #error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
148 .macro mcount_adjust_addr rd, rn
149 bic \rd, \rn, #1 @ clear the Thumb bit if present
150 sub \rd, \rd, #MCOUNT_INSN_SIZE
153 .macro __mcount suffix
155 ldr r0, =ftrace_trace_function
157 adr r0, .Lftrace_stub
161 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
162 ldr r1, =ftrace_graph_return
165 bne ftrace_graph_caller\suffix
167 ldr r1, =ftrace_graph_entry
169 ldr r0, =ftrace_graph_entry_stub
171 bne ftrace_graph_caller\suffix
176 1: mcount_get_lr r1 @ lr of instrumented func
177 mcount_adjust_addr r0, lr @ instrumented function
183 .macro __ftrace_caller suffix
186 mcount_get_lr r1 @ lr of instrumented func
187 mcount_adjust_addr r0, lr @ instrumented function
189 .globl ftrace_call\suffix
193 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
194 .globl ftrace_graph_call\suffix
195 ftrace_graph_call\suffix:
202 .macro __ftrace_graph_caller
203 sub r0, fp, #4 @ &lr of instrumented routine (&parent)
204 #ifdef CONFIG_DYNAMIC_FTRACE
205 @ called from __ftrace_caller, saved in mcount_enter
206 ldr r1, [sp, #16] @ instrumented routine (func)
207 mcount_adjust_addr r1, r1
209 @ called from __mcount, untouched in lr
210 mcount_adjust_addr r1, lr @ instrumented routine (func)
212 mov r2, fp @ frame pointer
213 bl prepare_ftrace_return
217 #ifdef CONFIG_OLD_MCOUNT
223 stmdb sp!, {r0-r3, lr}
226 .macro mcount_get_lr reg
232 ldmia sp!, {r0-r3, pc}
236 #ifdef CONFIG_DYNAMIC_FTRACE
245 #ifdef CONFIG_DYNAMIC_FTRACE
246 ENTRY(ftrace_caller_old)
248 ENDPROC(ftrace_caller_old)
251 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
252 ENTRY(ftrace_graph_caller_old)
253 __ftrace_graph_caller
254 ENDPROC(ftrace_graph_caller_old)
258 .purgem mcount_get_lr
267 stmdb sp!, {r0-r3, lr}
270 .macro mcount_get_lr reg
275 ldmia sp!, {r0-r3, ip, lr}
279 ENTRY(__gnu_mcount_nc)
280 #ifdef CONFIG_DYNAMIC_FTRACE
287 ENDPROC(__gnu_mcount_nc)
289 #ifdef CONFIG_DYNAMIC_FTRACE
292 ENDPROC(ftrace_caller)
295 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
296 ENTRY(ftrace_graph_caller)
297 __ftrace_graph_caller
298 ENDPROC(ftrace_graph_caller)
302 .purgem mcount_get_lr
305 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
306 .globl return_to_handler
309 mov r0, fp @ frame pointer
310 bl ftrace_return_to_handler
311 mov lr, r0 @ r0 has real ret addr
321 #endif /* CONFIG_FUNCTION_TRACER */
323 /*=============================================================================
325 *-----------------------------------------------------------------------------
330 sub sp, sp, #S_FRAME_SIZE
331 stmia sp, {r0 - r12} @ Calling r0 - r12
332 ARM( add r8, sp, #S_PC )
333 ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
335 THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
336 mrs r8, spsr @ called from non-FIQ mode, so ok.
337 str lr, [sp, #S_PC] @ Save calling PC
338 str r8, [sp, #S_PSR] @ Save CPSR
339 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
343 * Get the system call number.
346 #if defined(CONFIG_OABI_COMPAT)
349 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
350 * value to determine if it is an EABI or an old ABI call.
352 #ifdef CONFIG_ARM_THUMB
354 movne r10, #0 @ no thumb OABI emulation
355 ldreq r10, [lr, #-4] @ get SWI instruction
357 ldr r10, [lr, #-4] @ get SWI instruction
359 #ifdef CONFIG_CPU_ENDIAN_BE8
360 rev r10, r10 @ little endian instruction
363 #elif defined(CONFIG_AEABI)
366 * Pure EABI user space always put syscall number into scno (r7).
368 #elif defined(CONFIG_ARM_THUMB)
369 /* Legacy ABI only, possibly thumb mode. */
370 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
371 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
372 ldreq scno, [lr, #-4]
375 /* Legacy ABI only. */
376 ldr scno, [lr, #-4] @ get SWI instruction
379 #ifdef CONFIG_ALIGNMENT_TRAP
380 ldr ip, __cr_alignment
382 mcr p15, 0, ip, c1, c0 @ update control register
387 adr tbl, sys_call_table @ load syscall table pointer
389 #if defined(CONFIG_OABI_COMPAT)
391 * If the swi argument is zero, this is an EABI call and we do nothing.
393 * If this is an old ABI call, get the syscall number into scno and
394 * get the old ABI syscall table address.
396 bics r10, r10, #0xff000000
397 eorne scno, r10, #__NR_OABI_SYSCALL_BASE
398 ldrne tbl, =sys_oabi_call_table
399 #elif !defined(CONFIG_AEABI)
400 bic scno, scno, #0xff000000 @ mask off SWI op-code
401 eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
405 ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
406 stmdb sp!, {r4, r5} @ push fifth and sixth args
408 #ifdef CONFIG_SECCOMP
409 tst r10, #_TIF_SECCOMP
412 bl __secure_computing
413 add r0, sp, #S_R0 + S_OFF @ pointer to regs
414 ldmia r0, {r0 - r3} @ have to reload r0 - r3
418 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
421 cmp scno, #NR_syscalls @ check upper syscall limit
422 adr lr, BSYM(ret_fast_syscall) @ return address
423 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
426 2: mov why, #0 @ no longer a real syscall
427 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
428 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
430 b sys_ni_syscall @ not private func
434 * This is the really slow path. We're going to be doing
435 * context switches, and waiting for our parent to respond.
440 bl syscall_trace_enter
442 adr lr, BSYM(__sys_trace_return) @ return address
443 mov scno, r0 @ syscall number (possibly new)
444 add r1, sp, #S_R0 + S_OFF @ pointer to regs
445 cmp scno, #NR_syscalls @ check upper syscall limit
446 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
447 stmccia sp, {r4, r5} @ and update the stack args
448 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
452 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
455 bl syscall_trace_exit
459 #ifdef CONFIG_ALIGNMENT_TRAP
460 .type __cr_alignment, #object
467 * This is the syscall table declaration for native ABI syscalls.
468 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
470 #define ABI(native, compat) native
472 #define OBSOLETE(syscall) sys_ni_syscall
474 #define OBSOLETE(syscall) syscall
477 .type sys_call_table, #object
478 ENTRY(sys_call_table)
483 /*============================================================================
484 * Special system call wrappers
486 @ r0 = syscall number
489 bic scno, r0, #__NR_OABI_SYSCALL_BASE
490 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
491 cmpne scno, #NR_syscalls @ check range
492 stmloia sp, {r5, r6} @ shuffle args
497 ldrlo pc, [tbl, scno, lsl #2]
504 ENDPROC(sys_fork_wrapper)
509 ENDPROC(sys_vfork_wrapper)
514 ENDPROC(sys_execve_wrapper)
520 ENDPROC(sys_clone_wrapper)
522 sys_sigreturn_wrapper:
524 mov why, #0 @ prevent syscall restart handling
526 ENDPROC(sys_sigreturn_wrapper)
528 sys_rt_sigreturn_wrapper:
530 mov why, #0 @ prevent syscall restart handling
532 ENDPROC(sys_rt_sigreturn_wrapper)
534 sys_sigaltstack_wrapper:
535 ldr r2, [sp, #S_OFF + S_SP]
537 ENDPROC(sys_sigaltstack_wrapper)
539 sys_statfs64_wrapper:
543 ENDPROC(sys_statfs64_wrapper)
545 sys_fstatfs64_wrapper:
549 ENDPROC(sys_fstatfs64_wrapper)
552 * Note: off_4k (r5) is always units of 4K. If we can't do the requested
553 * offset, we return EINVAL.
558 moveq r5, r5, lsr #PAGE_SHIFT - 12
569 #ifdef CONFIG_OABI_COMPAT
572 * These are syscalls with argument register differences
578 ENDPROC(sys_oabi_pread64)
583 ENDPROC(sys_oabi_pwrite64)
589 ENDPROC(sys_oabi_truncate64)
591 sys_oabi_ftruncate64:
595 ENDPROC(sys_oabi_ftruncate64)
602 ENDPROC(sys_oabi_readahead)
605 * Let's declare a second syscall table for old ABI binaries
606 * using the compatibility syscall entries.
608 #define ABI(native, compat) compat
609 #define OBSOLETE(syscall) syscall
611 .type sys_oabi_call_table, #object
612 ENTRY(sys_oabi_call_table)