]> git.karo-electronics.de Git - mv-sheeva.git/blob - arch/mips/kernel/ftrace.c
628e90b992d16e598eebed6167673cbd0adb97fa
[mv-sheeva.git] / arch / mips / kernel / ftrace.c
1 /*
2  * Code for replacing ftrace calls with jumps.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7  *
8  * Thanks goes to Steven Rostedt for writing the original x86 version.
9  */
10
11 #include <linux/uaccess.h>
12 #include <linux/init.h>
13 #include <linux/ftrace.h>
14
15 #include <asm/asm.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/cacheflush.h>
18 #include <asm/uasm.h>
19
20 #ifdef CONFIG_DYNAMIC_FTRACE
21
22 #define JAL 0x0c000000          /* jump & link: ip --> ra, jump to target */
23 #define ADDR_MASK 0x03ffffff    /*  op_code|addr : 31...26|25 ....0 */
24
25 #define INSN_B_1F_4 0x10000004  /* b 1f; offset = 4 */
26 #define INSN_B_1F_5 0x10000005  /* b 1f; offset = 5 */
27 #define INSN_NOP 0x00000000     /* nop */
28 #define INSN_JAL(addr)  \
29         ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
30
31 static unsigned int insn_jal_ftrace_caller __read_mostly;
32 static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
33 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
34
35 static inline void ftrace_dyn_arch_init_insns(void)
36 {
37         u32 *buf;
38         unsigned int v1;
39
40         /* lui v1, hi16_mcount */
41         v1 = 3;
42         buf = (u32 *)&insn_lui_v1_hi16_mcount;
43         UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
44
45         /* jal (ftrace_caller + 8), jump over the first two instruction */
46         buf = (u32 *)&insn_jal_ftrace_caller;
47         uasm_i_jal(&buf, (FTRACE_ADDR + 8));
48
49 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
50         /* j ftrace_graph_caller */
51         buf = (u32 *)&insn_j_ftrace_graph_caller;
52         uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
53 #endif
54 }
55
56 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
57 {
58         int faulted;
59
60         /* *(unsigned int *)ip = new_code; */
61         safe_store_code(new_code, ip, faulted);
62
63         if (unlikely(faulted))
64                 return -EFAULT;
65
66         flush_icache_range(ip, ip + 8);
67
68         return 0;
69 }
70
71 int ftrace_make_nop(struct module *mod,
72                     struct dyn_ftrace *rec, unsigned long addr)
73 {
74         unsigned int new;
75         unsigned long ip = rec->ip;
76
77         /*
78          * We have compiled module with -mlong-calls, but compiled the kernel
79          * without it, we need to cope with them respectively.
80          */
81         if (ip & 0x40000000) {
82 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
83                 /*
84                  * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
85                  * addiu v1, v1, low_16bit_of_mcount
86                  * move at, ra
87                  * move $12, ra_address
88                  * jalr v1
89                  *  sub sp, sp, 8
90                  *                                  1: offset = 5 instructions
91                  */
92                 new = INSN_B_1F_5;
93 #else
94                 /*
95                  * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
96                  * addiu v1, v1, low_16bit_of_mcount
97                  * move at, ra
98                  * jalr v1
99                  *  nop | move $12, ra_address | sub sp, sp, 8
100                  *                                  1: offset = 4 instructions
101                  */
102                 new = INSN_B_1F_4;
103 #endif
104         } else {
105                 /*
106                  * move at, ra
107                  * jal _mcount          --> nop
108                  */
109                 new = INSN_NOP;
110         }
111         return ftrace_modify_code(ip, new);
112 }
113
114 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
115 {
116         unsigned int new;
117         unsigned long ip = rec->ip;
118
119         /* ip, module: 0xc0000000, kernel: 0x80000000 */
120         new = (ip & 0x40000000) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
121
122         return ftrace_modify_code(ip, new);
123 }
124
125 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
126
127 int ftrace_update_ftrace_func(ftrace_func_t func)
128 {
129         unsigned int new;
130
131         new = INSN_JAL((unsigned long)func);
132
133         return ftrace_modify_code(FTRACE_CALL_IP, new);
134 }
135
136 int __init ftrace_dyn_arch_init(void *data)
137 {
138         /* Encode the instructions when booting */
139         ftrace_dyn_arch_init_insns();
140
141         /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
142         ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
143
144         /* The return code is retured via data */
145         *(unsigned long *)data = 0;
146
147         return 0;
148 }
149 #endif  /* CONFIG_DYNAMIC_FTRACE */
150
151 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
152
153 #ifdef CONFIG_DYNAMIC_FTRACE
154
155 extern void ftrace_graph_call(void);
156 #define FTRACE_GRAPH_CALL_IP    ((unsigned long)(&ftrace_graph_call))
157
158 int ftrace_enable_ftrace_graph_caller(void)
159 {
160         return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
161                         insn_j_ftrace_graph_caller);
162 }
163
164 int ftrace_disable_ftrace_graph_caller(void)
165 {
166         return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
167 }
168
169 #endif  /* CONFIG_DYNAMIC_FTRACE */
170
171 #ifndef KBUILD_MCOUNT_RA_ADDRESS
172
173 #define S_RA_SP (0xafbf << 16)  /* s{d,w} ra, offset(sp) */
174 #define S_R_SP  (0xafb0 << 16)  /* s{d,w} R, offset(sp) */
175 #define OFFSET_MASK     0xffff  /* stack offset range: 0 ~ PT_SIZE */
176
177 unsigned long ftrace_get_parent_addr(unsigned long self_addr,
178                                      unsigned long parent,
179                                      unsigned long parent_addr,
180                                      unsigned long fp)
181 {
182         unsigned long sp, ip, ra;
183         unsigned int code;
184         int faulted;
185
186         /*
187          * For module, move the ip from calling site of mcount to the
188          * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
189          * kernel, move to the instruction "move ra, at"(offset is 12)
190          */
191         ip = self_addr - ((self_addr & 0x40000000) ? 20 : 12);
192
193         /*
194          * search the text until finding the non-store instruction or "s{d,w}
195          * ra, offset(sp)" instruction
196          */
197         do {
198                 ip -= 4;
199
200                 /* get the code at "ip": code = *(unsigned int *)ip; */
201                 safe_load_code(code, ip, faulted);
202
203                 if (unlikely(faulted))
204                         return 0;
205                 /*
206                  * If we hit the non-store instruction before finding where the
207                  * ra is stored, then this is a leaf function and it does not
208                  * store the ra on the stack
209                  */
210                 if ((code & S_R_SP) != S_R_SP)
211                         return parent_addr;
212
213         } while (((code & S_RA_SP) != S_RA_SP));
214
215         sp = fp + (code & OFFSET_MASK);
216
217         /* ra = *(unsigned long *)sp; */
218         safe_load_stack(ra, sp, faulted);
219         if (unlikely(faulted))
220                 return 0;
221
222         if (ra == parent)
223                 return sp;
224         return 0;
225 }
226
227 #endif  /* !KBUILD_MCOUNT_RA_ADDRESS */
228
229 /*
230  * Hook the return address and push it in the stack of return addrs
231  * in current thread info.
232  */
233 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
234                            unsigned long fp)
235 {
236         unsigned long old;
237         struct ftrace_graph_ent trace;
238         unsigned long return_hooker = (unsigned long)
239             &return_to_handler;
240         int faulted;
241
242         if (unlikely(atomic_read(&current->tracing_graph_pause)))
243                 return;
244
245         /*
246          * "parent" is the stack address saved the return address of the caller
247          * of _mcount.
248          *
249          * if the gcc < 4.5, a leaf function does not save the return address
250          * in the stack address, so, we "emulate" one in _mcount's stack space,
251          * and hijack it directly, but for a non-leaf function, it save the
252          * return address to the its own stack space, we can not hijack it
253          * directly, but need to find the real stack address,
254          * ftrace_get_parent_addr() does it!
255          *
256          * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
257          * non-leaf function, the location of the return address will be saved
258          * to $12 for us, and for a leaf function, only put a zero into $12. we
259          * do it in ftrace_graph_caller of mcount.S.
260          */
261
262         /* old = *parent; */
263         safe_load_stack(old, parent, faulted);
264         if (unlikely(faulted))
265                 goto out;
266 #ifndef KBUILD_MCOUNT_RA_ADDRESS
267         parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
268                         (unsigned long)parent, fp);
269         /*
270          * If fails when getting the stack address of the non-leaf function's
271          * ra, stop function graph tracer and return
272          */
273         if (parent == 0)
274                 goto out;
275 #endif
276         /* *parent = return_hooker; */
277         safe_store_stack(return_hooker, parent, faulted);
278         if (unlikely(faulted))
279                 goto out;
280
281         if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
282             -EBUSY) {
283                 *parent = old;
284                 return;
285         }
286
287         trace.func = self_addr;
288
289         /* Only trace if the calling function expects to */
290         if (!ftrace_graph_entry(&trace)) {
291                 current->curr_ret_stack--;
292                 *parent = old;
293         }
294         return;
295 out:
296         ftrace_graph_stop();
297         WARN_ON(1);
298 }
299 #endif  /* CONFIG_FUNCTION_GRAPH_TRACER */