2 * Dynamic function tracer architecture backend.
4 * Copyright IBM Corp. 2009
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/hardirq.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/kprobes.h>
16 #include <trace/syscall.h>
17 #include <asm/asm-offsets.h>
20 void ftrace_disable_code(void);
21 void ftrace_enable_insn(void);
25 * The 64-bit mcount code looks like this:
26 * stg %r14,8(%r15) # offset 0
27 * > larl %r1,<&counter> # offset 6
28 * > brasl %r14,_mcount # offset 12
29 * lg %r14,8(%r15) # offset 18
30 * Total length is 24 bytes. The middle two instructions of the mcount
31 * block get overwritten by ftrace_make_nop / ftrace_make_call.
32 * The 64-bit enabled ftrace code block looks like this:
33 * stg %r14,8(%r15) # offset 0
34 * > lg %r1,__LC_FTRACE_FUNC # offset 6
35 * > lgr %r0,%r0 # offset 12
36 * > basr %r14,%r1 # offset 16
37 * lg %r14,8(%15) # offset 18
38 * The return points of the mcount/ftrace function have the same offset 18.
39 * The 64-bit disable ftrace code block looks like this:
40 * stg %r14,8(%r15) # offset 0
41 * > jg .+18 # offset 6
42 * > lgr %r0,%r0 # offset 12
43 * > basr %r14,%r1 # offset 16
44 * lg %r14,8(%15) # offset 18
45 * The jg instruction branches to offset 24 to skip as many instructions
50 "ftrace_disable_code:\n"
56 "ftrace_enable_insn:\n"
57 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
59 #define FTRACE_INSN_SIZE 6
61 #else /* CONFIG_64BIT */
63 * The 31-bit mcount code looks like this:
64 * st %r14,4(%r15) # offset 0
65 * > bras %r1,0f # offset 4
66 * > .long _mcount # offset 8
67 * > .long <&counter> # offset 12
68 * > 0: l %r14,0(%r1) # offset 16
69 * > l %r1,4(%r1) # offset 20
70 * basr %r14,%r14 # offset 24
71 * l %r14,4(%r15) # offset 26
72 * Total length is 30 bytes. The twenty bytes starting from offset 4
73 * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call.
74 * The 31-bit enabled ftrace code block looks like this:
75 * st %r14,4(%r15) # offset 0
76 * > l %r14,__LC_FTRACE_FUNC # offset 4
78 * > .fill 12,1,0x07 # offset 12
79 * 0: basr %r14,%r14 # offset 24
80 * l %r14,4(%r14) # offset 26
81 * The return points of the mcount/ftrace function have the same offset 26.
82 * The 31-bit disabled ftrace code block looks like this:
83 * st %r14,4(%r15) # offset 0
86 * > .fill 12,1,0x07 # offset 12
87 * 0: basr %r14,%r14 # offset 24
88 * l %r14,4(%r14) # offset 26
89 * The j instruction branches to offset 30 to skip as many instructions
94 "ftrace_disable_code:\n"
101 "ftrace_enable_insn:\n"
102 " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n");
104 #define FTRACE_INSN_SIZE 4
106 #endif /* CONFIG_64BIT */
109 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
116 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
119 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code,
125 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
127 if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn,
133 int ftrace_update_ftrace_func(ftrace_func_t func)
138 int __init ftrace_dyn_arch_init(void)
143 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
145 * Hook the return address and push it in the stack of return addresses
146 * in current thread info.
148 unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
151 struct ftrace_graph_ent trace;
153 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
155 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
157 trace.depth = current->curr_ret_stack + 1;
158 /* Only trace if the calling function expects to. */
159 if (!ftrace_graph_entry(&trace))
161 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
163 parent = (unsigned long) return_to_handler;
169 * Patch the kernel code at ftrace_graph_caller location. The instruction
170 * there is branch relative and save to prepare_ftrace_return. To disable
171 * the call to prepare_ftrace_return we patch the bras offset to point
172 * directly after the instructions. To enable the call we calculate
173 * the original offset to prepare_ftrace_return and put it back.
178 int ftrace_enable_ftrace_graph_caller(void)
180 static unsigned short offset = 0x0002;
182 return probe_kernel_write((void *) ftrace_graph_caller + 2,
183 &offset, sizeof(offset));
186 int ftrace_disable_ftrace_graph_caller(void)
188 unsigned short offset;
190 offset = ((void *) &ftrace_graph_caller_end -
191 (void *) ftrace_graph_caller) / 2;
192 return probe_kernel_write((void *) ftrace_graph_caller + 2,
193 &offset, sizeof(offset));
196 #else /* CONFIG_64BIT */
198 int ftrace_enable_ftrace_graph_caller(void)
200 unsigned short offset;
202 offset = ((void *) prepare_ftrace_return -
203 (void *) ftrace_graph_caller) / 2;
204 return probe_kernel_write((void *) ftrace_graph_caller + 2,
205 &offset, sizeof(offset));
208 int ftrace_disable_ftrace_graph_caller(void)
210 static unsigned short offset = 0x0002;
212 return probe_kernel_write((void *) ftrace_graph_caller + 2,
213 &offset, sizeof(offset));
216 #endif /* CONFIG_64BIT */
217 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */