2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/sched/task_stack.h>
6 #include <linux/stacktrace.h>
7 #include <linux/kallsyms.h>
8 #include <linux/seq_file.h>
9 #include <linux/spinlock.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
16 #include <asm/setup.h>
20 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
21 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
22 unsigned stack_trace_index[STACK_TRACE_ENTRIES];
25 * Reserve one entry for the passed in ip. This will allow
26 * us to remove most or all of the stack size overhead
27 * added by the stack tracer itself.
29 struct stack_trace stack_trace_max = {
30 .max_entries = STACK_TRACE_ENTRIES - 1,
31 .entries = &stack_dump_trace[0],
34 unsigned long stack_trace_max_size;
35 arch_spinlock_t stack_trace_max_lock =
36 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
38 DEFINE_PER_CPU(int, disable_stack_tracer);
39 static DEFINE_MUTEX(stack_sysctl_mutex);
41 int stack_tracer_enabled;
42 static int last_stack_tracer_enabled;
44 void stack_trace_print(void)
49 pr_emerg(" Depth Size Location (%d entries)\n"
50 " ----- ---- --------\n",
51 stack_trace_max.nr_entries);
53 for (i = 0; i < stack_trace_max.nr_entries; i++) {
54 if (stack_dump_trace[i] == ULONG_MAX)
56 if (i+1 == stack_trace_max.nr_entries ||
57 stack_dump_trace[i+1] == ULONG_MAX)
58 size = stack_trace_index[i];
60 size = stack_trace_index[i] - stack_trace_index[i+1];
62 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
63 size, (void *)stack_dump_trace[i]);
68 * When arch-specific code overrides this function, the following
69 * data should be filled up, assuming stack_trace_max_lock is held to
70 * prevent concurrent updates.
73 * stack_trace_max_size
76 check_stack(unsigned long ip, unsigned long *stack)
78 unsigned long this_size, flags; unsigned long *p, *top, *start;
79 static int tracer_frame;
80 int frame_size = ACCESS_ONCE(tracer_frame);
83 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
84 this_size = THREAD_SIZE - this_size;
85 /* Remove the frame of the tracer */
86 this_size -= frame_size;
88 if (this_size <= stack_trace_max_size)
91 /* we do not handle interrupt stacks yet */
92 if (!object_is_on_stack(stack))
95 /* Can't do this from NMI context (can cause deadlocks) */
99 local_irq_save(flags);
100 arch_spin_lock(&stack_trace_max_lock);
103 * RCU may not be watching, make it see us.
104 * The stack trace code uses rcu_sched.
108 /* In case another CPU set the tracer_frame on us */
109 if (unlikely(!frame_size))
110 this_size -= tracer_frame;
112 /* a race could have already updated it */
113 if (this_size <= stack_trace_max_size)
116 stack_trace_max_size = this_size;
118 stack_trace_max.nr_entries = 0;
119 stack_trace_max.skip = 3;
121 save_stack_trace(&stack_trace_max);
123 /* Skip over the overhead of the stack tracer itself */
124 for (i = 0; i < stack_trace_max.nr_entries; i++) {
125 if (stack_dump_trace[i] == ip)
130 * Some archs may not have the passed in ip in the dump.
131 * If that happens, we need to show everything.
133 if (i == stack_trace_max.nr_entries)
137 * Now find where in the stack these are.
141 top = (unsigned long *)
142 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
145 * Loop through all the entries. One of the entries may
146 * for some reason be missed on the stack, so we may
147 * have to account for them. If they are all there, this
148 * loop will only happen once. This code only takes place
149 * on a new max, so it is far from a fast path.
151 while (i < stack_trace_max.nr_entries) {
154 stack_trace_index[x] = this_size;
157 for (; p < top && i < stack_trace_max.nr_entries; p++) {
158 if (stack_dump_trace[i] == ULONG_MAX)
161 * The READ_ONCE_NOCHECK is used to let KASAN know that
162 * this is not a stack-out-of-bounds error.
164 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
165 stack_dump_trace[x] = stack_dump_trace[i++];
166 this_size = stack_trace_index[x++] =
167 (top - p) * sizeof(unsigned long);
169 /* Start the search from here */
172 * We do not want to show the overhead
173 * of the stack tracer stack in the
174 * max stack. If we haven't figured
175 * out what that is, then figure it out
178 if (unlikely(!tracer_frame)) {
179 tracer_frame = (p - stack) *
180 sizeof(unsigned long);
181 stack_trace_max_size -= tracer_frame;
190 stack_trace_max.nr_entries = x;
192 stack_dump_trace[x] = ULONG_MAX;
194 if (task_stack_end_corrupted(current)) {
201 arch_spin_unlock(&stack_trace_max_lock);
202 local_irq_restore(flags);
206 stack_trace_call(unsigned long ip, unsigned long parent_ip,
207 struct ftrace_ops *op, struct pt_regs *pt_regs)
211 preempt_disable_notrace();
213 /* no atomic needed, we only modify this variable by this cpu */
214 __this_cpu_inc(disable_stack_tracer);
215 if (__this_cpu_read(disable_stack_tracer) != 1)
218 ip += MCOUNT_INSN_SIZE;
220 check_stack(ip, &stack);
223 __this_cpu_dec(disable_stack_tracer);
224 /* prevent recursion in schedule */
225 preempt_enable_notrace();
228 static struct ftrace_ops trace_ops __read_mostly =
230 .func = stack_trace_call,
231 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
235 stack_max_size_read(struct file *filp, char __user *ubuf,
236 size_t count, loff_t *ppos)
238 unsigned long *ptr = filp->private_data;
242 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
245 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
249 stack_max_size_write(struct file *filp, const char __user *ubuf,
250 size_t count, loff_t *ppos)
252 long *ptr = filp->private_data;
253 unsigned long val, flags;
256 ret = kstrtoul_from_user(ubuf, count, 10, &val);
260 local_irq_save(flags);
263 * In case we trace inside arch_spin_lock() or after (NMI),
264 * we will cause circular lock, so we also need to increase
265 * the percpu disable_stack_tracer here.
267 __this_cpu_inc(disable_stack_tracer);
269 arch_spin_lock(&stack_trace_max_lock);
271 arch_spin_unlock(&stack_trace_max_lock);
273 __this_cpu_dec(disable_stack_tracer);
274 local_irq_restore(flags);
279 static const struct file_operations stack_max_size_fops = {
280 .open = tracing_open_generic,
281 .read = stack_max_size_read,
282 .write = stack_max_size_write,
283 .llseek = default_llseek,
287 __next(struct seq_file *m, loff_t *pos)
291 if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
294 m->private = (void *)n;
299 t_next(struct seq_file *m, void *v, loff_t *pos)
302 return __next(m, pos);
305 static void *t_start(struct seq_file *m, loff_t *pos)
309 __this_cpu_inc(disable_stack_tracer);
311 arch_spin_lock(&stack_trace_max_lock);
314 return SEQ_START_TOKEN;
316 return __next(m, pos);
319 static void t_stop(struct seq_file *m, void *p)
321 arch_spin_unlock(&stack_trace_max_lock);
323 __this_cpu_dec(disable_stack_tracer);
328 static void trace_lookup_stack(struct seq_file *m, long i)
330 unsigned long addr = stack_dump_trace[i];
332 seq_printf(m, "%pS\n", (void *)addr);
335 static void print_disabled(struct seq_file *m)
338 "# Stack tracer disabled\n"
340 "# To enable the stack tracer, either add 'stacktrace' to the\n"
341 "# kernel command line\n"
342 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
346 static int t_show(struct seq_file *m, void *v)
351 if (v == SEQ_START_TOKEN) {
352 seq_printf(m, " Depth Size Location"
354 " ----- ---- --------\n",
355 stack_trace_max.nr_entries);
357 if (!stack_tracer_enabled && !stack_trace_max_size)
365 if (i >= stack_trace_max.nr_entries ||
366 stack_dump_trace[i] == ULONG_MAX)
369 if (i+1 == stack_trace_max.nr_entries ||
370 stack_dump_trace[i+1] == ULONG_MAX)
371 size = stack_trace_index[i];
373 size = stack_trace_index[i] - stack_trace_index[i+1];
375 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
377 trace_lookup_stack(m, i);
382 static const struct seq_operations stack_trace_seq_ops = {
389 static int stack_trace_open(struct inode *inode, struct file *file)
391 return seq_open(file, &stack_trace_seq_ops);
394 static const struct file_operations stack_trace_fops = {
395 .open = stack_trace_open,
398 .release = seq_release,
402 stack_trace_filter_open(struct inode *inode, struct file *file)
404 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
408 static const struct file_operations stack_trace_filter_fops = {
409 .open = stack_trace_filter_open,
411 .write = ftrace_filter_write,
412 .llseek = tracing_lseek,
413 .release = ftrace_regex_release,
417 stack_trace_sysctl(struct ctl_table *table, int write,
418 void __user *buffer, size_t *lenp,
423 mutex_lock(&stack_sysctl_mutex);
425 ret = proc_dointvec(table, write, buffer, lenp, ppos);
428 (last_stack_tracer_enabled == !!stack_tracer_enabled))
431 last_stack_tracer_enabled = !!stack_tracer_enabled;
433 if (stack_tracer_enabled)
434 register_ftrace_function(&trace_ops);
436 unregister_ftrace_function(&trace_ops);
439 mutex_unlock(&stack_sysctl_mutex);
443 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
445 static __init int enable_stacktrace(char *str)
447 if (strncmp(str, "_filter=", 8) == 0)
448 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
450 stack_tracer_enabled = 1;
451 last_stack_tracer_enabled = 1;
454 __setup("stacktrace", enable_stacktrace);
456 static __init int stack_trace_init(void)
458 struct dentry *d_tracer;
460 d_tracer = tracing_init_dentry();
461 if (IS_ERR(d_tracer))
464 trace_create_file("stack_max_size", 0644, d_tracer,
465 &stack_trace_max_size, &stack_max_size_fops);
467 trace_create_file("stack_trace", 0444, d_tracer,
468 NULL, &stack_trace_fops);
470 trace_create_file("stack_trace_filter", 0444, d_tracer,
471 NULL, &stack_trace_filter_fops);
473 if (stack_trace_filter_buf[0])
474 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
476 if (stack_tracer_enabled)
477 register_ftrace_function(&trace_ops);
482 device_initcall(stack_trace_init);