4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
7 #include <linux/module.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
17 static struct trace_array *ctx_trace;
18 static int __read_mostly tracer_enabled;
21 ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
23 struct trace_array *tr = ctx_trace;
24 struct trace_array_cpu *data;
32 local_irq_save(flags);
33 cpu = raw_smp_processor_id();
35 disabled = atomic_inc_return(&data->disabled);
37 if (likely(disabled == 1)) {
38 tracing_sched_switch_trace(tr, data, prev, next, flags);
39 if (trace_flags & TRACE_ITER_SCHED_TREE)
40 ftrace_all_fair_tasks(__rq, tr, data);
43 atomic_dec(&data->disabled);
44 local_irq_restore(flags);
48 wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
50 struct trace_array *tr = ctx_trace;
51 struct trace_array_cpu *data;
59 local_irq_save(flags);
60 cpu = raw_smp_processor_id();
62 disabled = atomic_inc_return(&data->disabled);
64 if (likely(disabled == 1)) {
65 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
66 if (trace_flags & TRACE_ITER_SCHED_TREE)
67 ftrace_all_fair_tasks(__rq, tr, data);
70 atomic_dec(&data->disabled);
71 local_irq_restore(flags);
75 ftrace_ctx_switch(void *__rq, struct task_struct *prev,
76 struct task_struct *next)
78 tracing_record_cmdline(prev);
81 * If tracer_switch_func only points to the local
82 * switch func, it still needs the ptr passed to it.
84 ctx_switch_func(__rq, prev, next);
87 * Chain to the wakeup tracer (this is a NOP if disabled):
89 wakeup_sched_switch(prev, next);
93 ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
94 struct task_struct *curr)
96 tracing_record_cmdline(curr);
98 wakeup_func(__rq, wakee, curr);
101 * Chain to the wakeup tracer (this is a NOP if disabled):
103 wakeup_sched_wakeup(wakee, curr);
106 static void sched_switch_reset(struct trace_array *tr)
110 tr->time_start = ftrace_now(tr->cpu);
112 for_each_online_cpu(cpu)
113 tracing_reset(tr->data[cpu]);
116 static void start_sched_trace(struct trace_array *tr)
118 sched_switch_reset(tr);
122 static void stop_sched_trace(struct trace_array *tr)
127 static void sched_switch_trace_init(struct trace_array *tr)
132 start_sched_trace(tr);
135 static void sched_switch_trace_reset(struct trace_array *tr)
138 stop_sched_trace(tr);
141 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
143 /* When starting a new trace, reset the buffers */
145 start_sched_trace(tr);
147 stop_sched_trace(tr);
150 static struct tracer sched_switch_trace __read_mostly =
152 .name = "sched_switch",
153 .init = sched_switch_trace_init,
154 .reset = sched_switch_trace_reset,
155 .ctrl_update = sched_switch_trace_ctrl_update,
156 #ifdef CONFIG_FTRACE_SELFTEST
157 .selftest = trace_selftest_startup_sched_switch,
161 __init static int init_sched_switch_trace(void)
163 return register_tracer(&sched_switch_trace);
165 device_initcall(init_sched_switch_trace);