2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
17 #include <asm/setup.h>
21 #define STACK_TRACE_ENTRIES 500
24 * If fentry is used, then the function being traced will
25 * jump to fentry directly before it sets up its stack frame.
26 * We need to ignore that one and record the parent. Since
27 * the stack frame for the traced function wasn't set up yet,
28 * the stack_trace wont see the parent. That needs to be added
29 * manually to stack_dump_trace[] as the first element.
31 #ifdef CC_USING_FENTRY
37 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
38 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
39 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
41 static struct stack_trace max_stack_trace = {
42 .max_entries = STACK_TRACE_ENTRIES - add_func,
43 .entries = &stack_dump_trace[add_func],
46 static unsigned long max_stack_size;
47 static arch_spinlock_t max_stack_lock =
48 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
50 static DEFINE_PER_CPU(int, trace_active);
51 static DEFINE_MUTEX(stack_sysctl_mutex);
53 int stack_tracer_enabled;
54 static int last_stack_tracer_enabled;
57 check_stack(unsigned long ip, unsigned long *stack)
59 unsigned long this_size, flags;
60 unsigned long *p, *top, *start;
63 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
64 this_size = THREAD_SIZE - this_size;
66 if (this_size <= max_stack_size)
69 /* we do not handle interrupt stacks yet */
70 if (!object_is_on_stack(stack))
73 local_irq_save(flags);
74 arch_spin_lock(&max_stack_lock);
76 /* a race could have already updated it */
77 if (this_size <= max_stack_size)
80 max_stack_size = this_size;
82 max_stack_trace.nr_entries = 0;
83 max_stack_trace.skip = 3;
85 save_stack_trace(&max_stack_trace);
88 * When fentry is used, the traced function does not get
89 * its stack frame set up, and we lose the parent.
90 * Add that one in manally. We set up save_stack_trace()
91 * to not touch the first element in this case.
94 stack_dump_trace[0] = ip;
95 max_stack_trace.nr_entries++;
99 * Now find where in the stack these are.
103 top = (unsigned long *)
104 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
107 * Loop through all the entries. One of the entries may
108 * for some reason be missed on the stack, so we may
109 * have to account for them. If they are all there, this
110 * loop will only happen once. This code only takes place
111 * on a new max, so it is far from a fast path.
113 while (i < max_stack_trace.nr_entries) {
116 stack_dump_index[i] = this_size;
119 for (; p < top && i < max_stack_trace.nr_entries; p++) {
120 if (*p == stack_dump_trace[i]) {
121 this_size = stack_dump_index[i++] =
122 (top - p) * sizeof(unsigned long);
124 /* Start the search from here */
134 arch_spin_unlock(&max_stack_lock);
135 local_irq_restore(flags);
139 stack_trace_call(unsigned long ip, unsigned long parent_ip,
140 struct ftrace_ops *op, struct pt_regs *pt_regs)
145 preempt_disable_notrace();
147 cpu = raw_smp_processor_id();
148 /* no atomic needed, we only modify this variable by this cpu */
149 if (per_cpu(trace_active, cpu)++ != 0)
152 check_stack(parent_ip, &stack);
155 per_cpu(trace_active, cpu)--;
156 /* prevent recursion in schedule */
157 preempt_enable_notrace();
160 static struct ftrace_ops trace_ops __read_mostly =
162 .func = stack_trace_call,
163 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
167 stack_max_size_read(struct file *filp, char __user *ubuf,
168 size_t count, loff_t *ppos)
170 unsigned long *ptr = filp->private_data;
174 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
177 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
181 stack_max_size_write(struct file *filp, const char __user *ubuf,
182 size_t count, loff_t *ppos)
184 long *ptr = filp->private_data;
185 unsigned long val, flags;
189 ret = kstrtoul_from_user(ubuf, count, 10, &val);
193 local_irq_save(flags);
196 * In case we trace inside arch_spin_lock() or after (NMI),
197 * we will cause circular lock, so we also need to increase
198 * the percpu trace_active here.
200 cpu = smp_processor_id();
201 per_cpu(trace_active, cpu)++;
203 arch_spin_lock(&max_stack_lock);
205 arch_spin_unlock(&max_stack_lock);
207 per_cpu(trace_active, cpu)--;
208 local_irq_restore(flags);
213 static const struct file_operations stack_max_size_fops = {
214 .open = tracing_open_generic,
215 .read = stack_max_size_read,
216 .write = stack_max_size_write,
217 .llseek = default_llseek,
221 __next(struct seq_file *m, loff_t *pos)
225 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
228 m->private = (void *)n;
233 t_next(struct seq_file *m, void *v, loff_t *pos)
236 return __next(m, pos);
239 static void *t_start(struct seq_file *m, loff_t *pos)
245 cpu = smp_processor_id();
246 per_cpu(trace_active, cpu)++;
248 arch_spin_lock(&max_stack_lock);
251 return SEQ_START_TOKEN;
253 return __next(m, pos);
256 static void t_stop(struct seq_file *m, void *p)
260 arch_spin_unlock(&max_stack_lock);
262 cpu = smp_processor_id();
263 per_cpu(trace_active, cpu)--;
268 static int trace_lookup_stack(struct seq_file *m, long i)
270 unsigned long addr = stack_dump_trace[i];
272 return seq_printf(m, "%pS\n", (void *)addr);
275 static void print_disabled(struct seq_file *m)
278 "# Stack tracer disabled\n"
280 "# To enable the stack tracer, either add 'stacktrace' to the\n"
281 "# kernel command line\n"
282 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
286 static int t_show(struct seq_file *m, void *v)
291 if (v == SEQ_START_TOKEN) {
292 seq_printf(m, " Depth Size Location"
294 " ----- ---- --------\n",
295 max_stack_trace.nr_entries - 1);
297 if (!stack_tracer_enabled && !max_stack_size)
305 if (i >= max_stack_trace.nr_entries ||
306 stack_dump_trace[i] == ULONG_MAX)
309 if (i+1 == max_stack_trace.nr_entries ||
310 stack_dump_trace[i+1] == ULONG_MAX)
311 size = stack_dump_index[i];
313 size = stack_dump_index[i] - stack_dump_index[i+1];
315 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
317 trace_lookup_stack(m, i);
322 static const struct seq_operations stack_trace_seq_ops = {
329 static int stack_trace_open(struct inode *inode, struct file *file)
331 return seq_open(file, &stack_trace_seq_ops);
334 static const struct file_operations stack_trace_fops = {
335 .open = stack_trace_open,
338 .release = seq_release,
342 stack_trace_filter_open(struct inode *inode, struct file *file)
344 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
348 static const struct file_operations stack_trace_filter_fops = {
349 .open = stack_trace_filter_open,
351 .write = ftrace_filter_write,
352 .llseek = ftrace_filter_lseek,
353 .release = ftrace_regex_release,
357 stack_trace_sysctl(struct ctl_table *table, int write,
358 void __user *buffer, size_t *lenp,
363 mutex_lock(&stack_sysctl_mutex);
365 ret = proc_dointvec(table, write, buffer, lenp, ppos);
368 (last_stack_tracer_enabled == !!stack_tracer_enabled))
371 last_stack_tracer_enabled = !!stack_tracer_enabled;
373 if (stack_tracer_enabled)
374 register_ftrace_function(&trace_ops);
376 unregister_ftrace_function(&trace_ops);
379 mutex_unlock(&stack_sysctl_mutex);
383 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
385 static __init int enable_stacktrace(char *str)
387 if (strncmp(str, "_filter=", 8) == 0)
388 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
390 stack_tracer_enabled = 1;
391 last_stack_tracer_enabled = 1;
394 __setup("stacktrace", enable_stacktrace);
396 static __init int stack_trace_init(void)
398 struct dentry *d_tracer;
400 d_tracer = tracing_init_dentry();
402 trace_create_file("stack_max_size", 0644, d_tracer,
403 &max_stack_size, &stack_max_size_fops);
405 trace_create_file("stack_trace", 0444, d_tracer,
406 NULL, &stack_trace_fops);
408 trace_create_file("stack_trace_filter", 0444, d_tracer,
409 NULL, &stack_trace_filter_fops);
411 if (stack_trace_filter_buf[0])
412 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
414 if (stack_tracer_enabled)
415 register_ftrace_function(&trace_ops);
420 device_initcall(stack_trace_init);