4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2004, 2005, Soeren Sandmann
8 #include <linux/kallsyms.h>
9 #include <linux/debugfs.h>
10 #include <linux/hrtimer.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <linux/module.h>
14 #include <linux/irq.h>
19 static struct trace_array *sysprof_trace;
20 static int __read_mostly tracer_enabled;
25 static const unsigned long sample_period = 1000000;
28 * Per CPU hrtimers that do the profiling:
30 static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
33 const void __user *next_fp;
34 unsigned long return_address;
37 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
39 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
42 if (__copy_from_user_inatomic(frame, frame_pointer, sizeof(*frame)))
48 #define SYSPROF_MAX_ADDRESSES 512
50 static void timer_notify(struct pt_regs *regs, int cpu)
52 const void __user *frame_pointer;
53 struct trace_array_cpu *data;
54 struct stack_frame frame;
55 struct trace_array *tr;
64 is_user = user_mode(regs);
66 if (!current || current->pid == 0)
69 if (is_user && current->state != TASK_RUNNING)
74 ftrace(tr, data, current->pid, 1, 0);
79 trace_special(tr, data, 0, current->pid, regs->ip);
81 frame_pointer = (void __user *)regs->bp;
83 for (i = 0; i < SYSPROF_MAX_ADDRESSES; i++) {
84 if (!copy_stack_frame(frame_pointer, &frame))
86 if ((unsigned long)frame_pointer < regs->sp)
89 trace_special(tr, data, 1, frame.return_address,
90 (unsigned long)frame_pointer);
91 frame_pointer = frame.next_fp;
94 trace_special(tr, data, 2, current->pid, i);
96 if (i == SYSPROF_MAX_ADDRESSES)
97 trace_special(tr, data, -1, -1, -1);
100 static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
103 timer_notify(get_irq_regs(), smp_processor_id());
105 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
107 return HRTIMER_RESTART;
110 static void start_stack_timer(int cpu)
112 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
114 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
115 hrtimer->function = stack_trace_timer_fn;
116 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
118 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
121 static void start_stack_timers(void)
123 cpumask_t saved_mask = current->cpus_allowed;
126 for_each_online_cpu(cpu) {
127 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
128 start_stack_timer(cpu);
129 printk(KERN_INFO "started sysprof timer on cpu%d\n", cpu);
131 set_cpus_allowed_ptr(current, &saved_mask);
134 static void stop_stack_timer(int cpu)
136 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu);
138 hrtimer_cancel(hrtimer);
139 printk(KERN_INFO "cancelled sysprof timer on cpu%d\n", cpu);
142 static void stop_stack_timers(void)
146 for_each_online_cpu(cpu)
147 stop_stack_timer(cpu);
150 static notrace void stack_reset(struct trace_array *tr)
154 tr->time_start = ftrace_now(tr->cpu);
156 for_each_online_cpu(cpu)
157 tracing_reset(tr->data[cpu]);
160 static notrace void start_stack_trace(struct trace_array *tr)
163 start_stack_timers();
167 static notrace void stop_stack_trace(struct trace_array *tr)
173 static notrace void stack_trace_init(struct trace_array *tr)
178 start_stack_trace(tr);
181 static notrace void stack_trace_reset(struct trace_array *tr)
184 stop_stack_trace(tr);
187 static void stack_trace_ctrl_update(struct trace_array *tr)
189 /* When starting a new trace, reset the buffers */
191 start_stack_trace(tr);
193 stop_stack_trace(tr);
196 static struct tracer stack_trace __read_mostly =
199 .init = stack_trace_init,
200 .reset = stack_trace_reset,
201 .ctrl_update = stack_trace_ctrl_update,
202 #ifdef CONFIG_FTRACE_SELFTEST
203 .selftest = trace_selftest_startup_sysprof,
207 __init static int init_stack_trace(void)
209 return register_tracer(&stack_trace);
211 device_initcall(init_stack_trace);