]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/trace/trace_stack.c
4370c8e8d2748099e2c727e5f9650402102524a6
[karo-tx-linux.git] / kernel / trace / trace_stack.c
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16
17 #include <asm/setup.h>
18
19 #include "trace.h"
20
21 #define STACK_TRACE_ENTRIES 500
22
23 /*
24  * If fentry is used, then the function being traced will
25  * jump to fentry directly before it sets up its stack frame.
26  * We need to ignore that one and record the parent. Since
27  * the stack frame for the traced function wasn't set up yet,
28  * the stack_trace wont see the parent. That needs to be added
29  * manually to stack_dump_trace[] as the first element.
30  */
31 #ifdef CC_USING_FENTRY
32 # define add_func       1
33 #else
34 # define add_func       0
35 #endif
36
37 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
38          { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
39 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
40
41 static struct stack_trace max_stack_trace = {
42         .max_entries            = STACK_TRACE_ENTRIES - add_func,
43         .entries                = &stack_dump_trace[add_func],
44 };
45
46 static unsigned long max_stack_size;
47 static arch_spinlock_t max_stack_lock =
48         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
49
50 static DEFINE_PER_CPU(int, trace_active);
51 static DEFINE_MUTEX(stack_sysctl_mutex);
52
53 int stack_tracer_enabled;
54 static int last_stack_tracer_enabled;
55
56 static inline void
57 check_stack(unsigned long ip, unsigned long *stack)
58 {
59         unsigned long this_size, flags;
60         unsigned long *p, *top, *start;
61         int i;
62
63         this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
64         this_size = THREAD_SIZE - this_size;
65
66         if (this_size <= max_stack_size)
67                 return;
68
69         /* we do not handle interrupt stacks yet */
70         if (!object_is_on_stack(stack))
71                 return;
72
73         local_irq_save(flags);
74         arch_spin_lock(&max_stack_lock);
75
76         /* a race could have already updated it */
77         if (this_size <= max_stack_size)
78                 goto out;
79
80         max_stack_size = this_size;
81
82         max_stack_trace.nr_entries      = 0;
83         max_stack_trace.skip            = 3;
84
85         save_stack_trace(&max_stack_trace);
86
87         /*
88          * When fentry is used, the traced function does not get
89          * its stack frame set up, and we lose the parent.
90          * Add that one in manally. We set up save_stack_trace()
91          * to not touch the first element in this case.
92          */
93         if (add_func) {
94                 stack_dump_trace[0] = ip;
95                 max_stack_trace.nr_entries++;
96         }
97
98         /*
99          * Now find where in the stack these are.
100          */
101         i = 0;
102         start = stack;
103         top = (unsigned long *)
104                 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
105
106         /*
107          * Loop through all the entries. One of the entries may
108          * for some reason be missed on the stack, so we may
109          * have to account for them. If they are all there, this
110          * loop will only happen once. This code only takes place
111          * on a new max, so it is far from a fast path.
112          */
113         while (i < max_stack_trace.nr_entries) {
114                 int found = 0;
115
116                 stack_dump_index[i] = this_size;
117                 p = start;
118
119                 for (; p < top && i < max_stack_trace.nr_entries; p++) {
120                         if (*p == stack_dump_trace[i]) {
121                                 this_size = stack_dump_index[i++] =
122                                         (top - p) * sizeof(unsigned long);
123                                 found = 1;
124                                 /* Start the search from here */
125                                 start = p + 1;
126                         }
127                 }
128
129                 if (!found)
130                         i++;
131         }
132
133  out:
134         arch_spin_unlock(&max_stack_lock);
135         local_irq_restore(flags);
136 }
137
138 static void
139 stack_trace_call(unsigned long ip, unsigned long parent_ip,
140                  struct ftrace_ops *op, struct pt_regs *pt_regs)
141 {
142         unsigned long stack;
143         int cpu;
144
145         preempt_disable_notrace();
146
147         cpu = raw_smp_processor_id();
148         /* no atomic needed, we only modify this variable by this cpu */
149         if (per_cpu(trace_active, cpu)++ != 0)
150                 goto out;
151
152         check_stack(parent_ip, &stack);
153
154  out:
155         per_cpu(trace_active, cpu)--;
156         /* prevent recursion in schedule */
157         preempt_enable_notrace();
158 }
159
160 static struct ftrace_ops trace_ops __read_mostly =
161 {
162         .func = stack_trace_call,
163         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
164 };
165
166 static ssize_t
167 stack_max_size_read(struct file *filp, char __user *ubuf,
168                     size_t count, loff_t *ppos)
169 {
170         unsigned long *ptr = filp->private_data;
171         char buf[64];
172         int r;
173
174         r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
175         if (r > sizeof(buf))
176                 r = sizeof(buf);
177         return simple_read_from_buffer(ubuf, count, ppos, buf, r);
178 }
179
180 static ssize_t
181 stack_max_size_write(struct file *filp, const char __user *ubuf,
182                      size_t count, loff_t *ppos)
183 {
184         long *ptr = filp->private_data;
185         unsigned long val, flags;
186         int ret;
187         int cpu;
188
189         ret = kstrtoul_from_user(ubuf, count, 10, &val);
190         if (ret)
191                 return ret;
192
193         local_irq_save(flags);
194
195         /*
196          * In case we trace inside arch_spin_lock() or after (NMI),
197          * we will cause circular lock, so we also need to increase
198          * the percpu trace_active here.
199          */
200         cpu = smp_processor_id();
201         per_cpu(trace_active, cpu)++;
202
203         arch_spin_lock(&max_stack_lock);
204         *ptr = val;
205         arch_spin_unlock(&max_stack_lock);
206
207         per_cpu(trace_active, cpu)--;
208         local_irq_restore(flags);
209
210         return count;
211 }
212
213 static const struct file_operations stack_max_size_fops = {
214         .open           = tracing_open_generic,
215         .read           = stack_max_size_read,
216         .write          = stack_max_size_write,
217         .llseek         = default_llseek,
218 };
219
220 static void *
221 __next(struct seq_file *m, loff_t *pos)
222 {
223         long n = *pos - 1;
224
225         if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
226                 return NULL;
227
228         m->private = (void *)n;
229         return &m->private;
230 }
231
232 static void *
233 t_next(struct seq_file *m, void *v, loff_t *pos)
234 {
235         (*pos)++;
236         return __next(m, pos);
237 }
238
239 static void *t_start(struct seq_file *m, loff_t *pos)
240 {
241         int cpu;
242
243         local_irq_disable();
244
245         cpu = smp_processor_id();
246         per_cpu(trace_active, cpu)++;
247
248         arch_spin_lock(&max_stack_lock);
249
250         if (*pos == 0)
251                 return SEQ_START_TOKEN;
252
253         return __next(m, pos);
254 }
255
256 static void t_stop(struct seq_file *m, void *p)
257 {
258         int cpu;
259
260         arch_spin_unlock(&max_stack_lock);
261
262         cpu = smp_processor_id();
263         per_cpu(trace_active, cpu)--;
264
265         local_irq_enable();
266 }
267
268 static int trace_lookup_stack(struct seq_file *m, long i)
269 {
270         unsigned long addr = stack_dump_trace[i];
271
272         return seq_printf(m, "%pS\n", (void *)addr);
273 }
274
275 static void print_disabled(struct seq_file *m)
276 {
277         seq_puts(m, "#\n"
278                  "#  Stack tracer disabled\n"
279                  "#\n"
280                  "# To enable the stack tracer, either add 'stacktrace' to the\n"
281                  "# kernel command line\n"
282                  "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
283                  "#\n");
284 }
285
286 static int t_show(struct seq_file *m, void *v)
287 {
288         long i;
289         int size;
290
291         if (v == SEQ_START_TOKEN) {
292                 seq_printf(m, "        Depth    Size   Location"
293                            "    (%d entries)\n"
294                            "        -----    ----   --------\n",
295                            max_stack_trace.nr_entries - 1);
296
297                 if (!stack_tracer_enabled && !max_stack_size)
298                         print_disabled(m);
299
300                 return 0;
301         }
302
303         i = *(long *)v;
304
305         if (i >= max_stack_trace.nr_entries ||
306             stack_dump_trace[i] == ULONG_MAX)
307                 return 0;
308
309         if (i+1 == max_stack_trace.nr_entries ||
310             stack_dump_trace[i+1] == ULONG_MAX)
311                 size = stack_dump_index[i];
312         else
313                 size = stack_dump_index[i] - stack_dump_index[i+1];
314
315         seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
316
317         trace_lookup_stack(m, i);
318
319         return 0;
320 }
321
322 static const struct seq_operations stack_trace_seq_ops = {
323         .start          = t_start,
324         .next           = t_next,
325         .stop           = t_stop,
326         .show           = t_show,
327 };
328
329 static int stack_trace_open(struct inode *inode, struct file *file)
330 {
331         return seq_open(file, &stack_trace_seq_ops);
332 }
333
334 static const struct file_operations stack_trace_fops = {
335         .open           = stack_trace_open,
336         .read           = seq_read,
337         .llseek         = seq_lseek,
338         .release        = seq_release,
339 };
340
341 static int
342 stack_trace_filter_open(struct inode *inode, struct file *file)
343 {
344         return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
345                                  inode, file);
346 }
347
348 static const struct file_operations stack_trace_filter_fops = {
349         .open = stack_trace_filter_open,
350         .read = seq_read,
351         .write = ftrace_filter_write,
352         .llseek = ftrace_filter_lseek,
353         .release = ftrace_regex_release,
354 };
355
356 int
357 stack_trace_sysctl(struct ctl_table *table, int write,
358                    void __user *buffer, size_t *lenp,
359                    loff_t *ppos)
360 {
361         int ret;
362
363         mutex_lock(&stack_sysctl_mutex);
364
365         ret = proc_dointvec(table, write, buffer, lenp, ppos);
366
367         if (ret || !write ||
368             (last_stack_tracer_enabled == !!stack_tracer_enabled))
369                 goto out;
370
371         last_stack_tracer_enabled = !!stack_tracer_enabled;
372
373         if (stack_tracer_enabled)
374                 register_ftrace_function(&trace_ops);
375         else
376                 unregister_ftrace_function(&trace_ops);
377
378  out:
379         mutex_unlock(&stack_sysctl_mutex);
380         return ret;
381 }
382
383 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
384
385 static __init int enable_stacktrace(char *str)
386 {
387         if (strncmp(str, "_filter=", 8) == 0)
388                 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
389
390         stack_tracer_enabled = 1;
391         last_stack_tracer_enabled = 1;
392         return 1;
393 }
394 __setup("stacktrace", enable_stacktrace);
395
396 static __init int stack_trace_init(void)
397 {
398         struct dentry *d_tracer;
399
400         d_tracer = tracing_init_dentry();
401
402         trace_create_file("stack_max_size", 0644, d_tracer,
403                         &max_stack_size, &stack_max_size_fops);
404
405         trace_create_file("stack_trace", 0444, d_tracer,
406                         NULL, &stack_trace_fops);
407
408         trace_create_file("stack_trace_filter", 0444, d_tracer,
409                         NULL, &stack_trace_filter_fops);
410
411         if (stack_trace_filter_buf[0])
412                 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
413
414         if (stack_tracer_enabled)
415                 register_ftrace_function(&trace_ops);
416
417         return 0;
418 }
419
420 device_initcall(stack_trace_init);