2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
8 * Thanks goes to Steven Rostedt for writing the original x86 version.
11 #include <linux/uaccess.h>
12 #include <linux/init.h>
13 #include <linux/ftrace.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/cacheflush.h>
20 #include <asm-generic/sections.h>
22 #ifdef CONFIG_DYNAMIC_FTRACE
24 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
25 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
27 #define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
28 #define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
29 #define INSN_NOP 0x00000000 /* nop */
30 #define INSN_JAL(addr) \
31 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
33 static unsigned int insn_jal_ftrace_caller __read_mostly;
34 static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
35 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
37 static inline void ftrace_dyn_arch_init_insns(void)
42 /* lui v1, hi16_mcount */
44 buf = (u32 *)&insn_lui_v1_hi16_mcount;
45 UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
47 /* jal (ftrace_caller + 8), jump over the first two instruction */
48 buf = (u32 *)&insn_jal_ftrace_caller;
49 uasm_i_jal(&buf, (FTRACE_ADDR + 8));
51 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
52 /* j ftrace_graph_caller */
53 buf = (u32 *)&insn_j_ftrace_graph_caller;
54 uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
59 * Check if the address is in kernel space
61 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
62 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
64 static inline int in_kernel_space(unsigned long ip)
66 if (ip >= (unsigned long)_stext &&
67 ip <= (unsigned long)_etext)
72 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
76 /* *(unsigned int *)ip = new_code; */
77 safe_store_code(new_code, ip, faulted);
79 if (unlikely(faulted))
82 flush_icache_range(ip, ip + 8);
87 int ftrace_make_nop(struct module *mod,
88 struct dyn_ftrace *rec, unsigned long addr)
91 unsigned long ip = rec->ip;
94 * If ip is in kernel space, no long call, otherwise, long call is
97 if (in_kernel_space(ip)) {
100 * jal _mcount --> nop
104 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
106 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
107 * addiu v1, v1, low_16bit_of_mcount
109 * move $12, ra_address
112 * 1: offset = 5 instructions
117 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
118 * addiu v1, v1, low_16bit_of_mcount
121 * nop | move $12, ra_address | sub sp, sp, 8
122 * 1: offset = 4 instructions
127 return ftrace_modify_code(ip, new);
130 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
133 unsigned long ip = rec->ip;
135 new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
136 insn_lui_v1_hi16_mcount;
138 return ftrace_modify_code(ip, new);
141 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
143 int ftrace_update_ftrace_func(ftrace_func_t func)
147 new = INSN_JAL((unsigned long)func);
149 return ftrace_modify_code(FTRACE_CALL_IP, new);
152 int __init ftrace_dyn_arch_init(void *data)
154 /* Encode the instructions when booting */
155 ftrace_dyn_arch_init_insns();
157 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
158 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
160 /* The return code is retured via data */
161 *(unsigned long *)data = 0;
165 #endif /* CONFIG_DYNAMIC_FTRACE */
167 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
169 #ifdef CONFIG_DYNAMIC_FTRACE
171 extern void ftrace_graph_call(void);
172 #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
174 int ftrace_enable_ftrace_graph_caller(void)
176 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
177 insn_j_ftrace_graph_caller);
180 int ftrace_disable_ftrace_graph_caller(void)
182 return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
185 #endif /* CONFIG_DYNAMIC_FTRACE */
187 #ifndef KBUILD_MCOUNT_RA_ADDRESS
189 #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
190 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
191 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
193 unsigned long ftrace_get_parent_addr(unsigned long self_addr,
194 unsigned long parent,
195 unsigned long parent_addr,
198 unsigned long sp, ip, ra;
203 * For module, move the ip from calling site of mcount after the
204 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
205 * kernel, move after the instruction "move ra, at"(offset is 16)
207 ip = self_addr - (in_kernel_space(self_addr) ? 16 : 24);
210 * search the text until finding the non-store instruction or "s{d,w}
211 * ra, offset(sp)" instruction
214 /* get the code at "ip": code = *(unsigned int *)ip; */
215 safe_load_code(code, ip, faulted);
217 if (unlikely(faulted))
220 * If we hit the non-store instruction before finding where the
221 * ra is stored, then this is a leaf function and it does not
222 * store the ra on the stack
224 if ((code & S_R_SP) != S_R_SP)
227 /* Move to the next instruction */
229 } while ((code & S_RA_SP) != S_RA_SP);
231 sp = fp + (code & OFFSET_MASK);
233 /* ra = *(unsigned long *)sp; */
234 safe_load_stack(ra, sp, faulted);
235 if (unlikely(faulted))
243 #endif /* !KBUILD_MCOUNT_RA_ADDRESS */
246 * Hook the return address and push it in the stack of return addrs
247 * in current thread info.
249 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
253 struct ftrace_graph_ent trace;
254 unsigned long return_hooker = (unsigned long)
258 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
262 * "parent" is the stack address saved the return address of the caller
265 * if the gcc < 4.5, a leaf function does not save the return address
266 * in the stack address, so, we "emulate" one in _mcount's stack space,
267 * and hijack it directly, but for a non-leaf function, it save the
268 * return address to the its own stack space, we can not hijack it
269 * directly, but need to find the real stack address,
270 * ftrace_get_parent_addr() does it!
272 * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
273 * non-leaf function, the location of the return address will be saved
274 * to $12 for us, and for a leaf function, only put a zero into $12. we
275 * do it in ftrace_graph_caller of mcount.S.
279 safe_load_stack(old, parent, faulted);
280 if (unlikely(faulted))
282 #ifndef KBUILD_MCOUNT_RA_ADDRESS
283 parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
284 (unsigned long)parent, fp);
286 * If fails when getting the stack address of the non-leaf function's
287 * ra, stop function graph tracer and return
292 /* *parent = return_hooker; */
293 safe_store_stack(return_hooker, parent, faulted);
294 if (unlikely(faulted))
297 if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
303 trace.func = self_addr;
305 /* Only trace if the calling function expects to */
306 if (!ftrace_graph_entry(&trace)) {
307 current->curr_ret_stack--;
315 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */