/* * arch/sh/lib/mcount.S * * Copyright (C) 2008 Paul Mundt * Copyright (C) 2008, 2009 Matt Fleming * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include #include #include #define MCOUNT_ENTER() \ mov.l r4, @-r15; \ mov.l r5, @-r15; \ mov.l r6, @-r15; \ mov.l r7, @-r15; \ sts.l pr, @-r15; \ \ mov.l @(20,r15),r4; \ sts pr, r5 #define MCOUNT_LEAVE() \ lds.l @r15+, pr; \ mov.l @r15+, r7; \ mov.l @r15+, r6; \ mov.l @r15+, r5; \ rts; \ mov.l @r15+, r4 #ifdef CONFIG_STACK_DEBUG /* * Perform diagnostic checks on the state of the kernel stack. * * Check for stack overflow. If there is less than 1KB free * then it has overflowed. * * Make sure the stack pointer contains a valid address. Valid * addresses for kernel stacks are anywhere after the bss * (after _ebss) and anywhere in init_thread_union (init_stack). */ #define STACK_CHECK() \ mov #(THREAD_SIZE >> 10), r0; \ shll8 r0; \ shll2 r0; \ \ /* r1 = sp & (THREAD_SIZE - 1) */ \ mov #-1, r1; \ add r0, r1; \ and r15, r1; \ \ mov #TI_SIZE, r3; \ mov #(STACK_WARN >> 8), r2; \ shll8 r2; \ add r3, r2; \ \ /* Is the stack overflowing? */ \ cmp/hi r2, r1; \ bf stack_panic; \ \ /* If sp > _ebss then we're OK. */ \ mov.l .L_ebss, r1; \ cmp/hi r1, r15; \ bt 1f; \ \ /* If sp < init_stack, we're not OK. */ \ mov.l .L_init_thread_union, r1; \ cmp/hs r1, r15; \ bf stack_panic; \ \ /* If sp > init_stack && sp < _ebss, not OK. */ \ add r0, r1; \ cmp/hs r1, r15; \ bt stack_panic; \ 1: #else #define STACK_CHECK() #endif /* CONFIG_STACK_DEBUG */ .align 2 .globl _mcount .type _mcount,@function .globl mcount .type mcount,@function _mcount: mcount: #ifndef CONFIG_DYNAMIC_FTRACE mov.l .Lfunction_trace_stop, r0 mov.l @r0, r0 tst r0, r0 bf ftrace_stub #endif STACK_CHECK() MCOUNT_ENTER() #ifdef CONFIG_DYNAMIC_FTRACE .globl mcount_call mcount_call: mov.l .Lftrace_stub, r6 #else mov.l .Lftrace_trace_function, r6 mov.l ftrace_stub, r7 cmp/eq r6, r7 bt skip_trace mov.l @r6, r6 #endif jsr @r6 nop skip_trace: MCOUNT_LEAVE() .align 2 .Lftrace_trace_function: .long ftrace_trace_function #ifdef CONFIG_DYNAMIC_FTRACE .globl ftrace_caller ftrace_caller: mov.l .Lfunction_trace_stop, r0 mov.l @r0, r0 tst r0, r0 bf ftrace_stub STACK_CHECK() MCOUNT_ENTER() .globl ftrace_call ftrace_call: mov.l .Lftrace_stub, r6 jsr @r6 nop MCOUNT_LEAVE() #endif /* CONFIG_DYNAMIC_FTRACE */ /* * NOTE: From here on the locations of the .Lftrace_stub label and * ftrace_stub itself are fixed. Adding additional data here will skew * the displacement for the memory table and break the block replacement. * Place new labels either after the ftrace_stub body, or before * ftrace_caller. You have been warned. */ .align 2 .Lftrace_stub: .long ftrace_stub .globl ftrace_stub ftrace_stub: rts nop #ifdef CONFIG_STACK_DEBUG .globl stack_panic stack_panic: mov.l .Ldump_stack, r0 jsr @r0 nop mov.l .Lpanic, r0 jsr @r0 mov.l .Lpanic_s, r4 rts nop .align 2 .Lfunction_trace_stop: .long function_trace_stop .L_ebss: .long _ebss .L_init_thread_union: .long init_thread_union .Lpanic: .long panic .Lpanic_s: .long .Lpanic_str .Ldump_stack: .long dump_stack .section .rodata .align 2 .Lpanic_str: .string "Stack error" #endif /* CONFIG_STACK_DEBUG */