]> git.karo-electronics.de Git - karo-tx-linux.git/blob - kernel/trace/trace_sched_switch.c
ftrace: fix cmdline tracing
[karo-tx-linux.git] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19
20 static void
21 ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
22 {
23         struct trace_array *tr = ctx_trace;
24         struct trace_array_cpu *data;
25         unsigned long flags;
26         long disabled;
27         int cpu;
28
29         if (!tracer_enabled)
30                 return;
31
32         tracing_record_cmdline(prev);
33
34         local_irq_save(flags);
35         cpu = raw_smp_processor_id();
36         data = tr->data[cpu];
37         disabled = atomic_inc_return(&data->disabled);
38
39         if (likely(disabled == 1)) {
40                 tracing_sched_switch_trace(tr, data, prev, next, flags);
41                 if (trace_flags & TRACE_ITER_SCHED_TREE)
42                         ftrace_all_fair_tasks(__rq, tr, data);
43         }
44
45         atomic_dec(&data->disabled);
46         local_irq_restore(flags);
47 }
48
49 static void
50 wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
51 {
52         struct trace_array *tr = ctx_trace;
53         struct trace_array_cpu *data;
54         unsigned long flags;
55         long disabled;
56         int cpu;
57
58         if (!tracer_enabled)
59                 return;
60
61         tracing_record_cmdline(curr);
62
63         local_irq_save(flags);
64         cpu = raw_smp_processor_id();
65         data = tr->data[cpu];
66         disabled = atomic_inc_return(&data->disabled);
67
68         if (likely(disabled == 1)) {
69                 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
70                 if (trace_flags & TRACE_ITER_SCHED_TREE)
71                         ftrace_all_fair_tasks(__rq, tr, data);
72         }
73
74         atomic_dec(&data->disabled);
75         local_irq_restore(flags);
76 }
77
78 void
79 ftrace_ctx_switch(void *__rq, struct task_struct *prev,
80                   struct task_struct *next)
81 {
82         /*
83          * If tracer_switch_func only points to the local
84          * switch func, it still needs the ptr passed to it.
85          */
86         ctx_switch_func(__rq, prev, next);
87
88         /*
89          * Chain to the wakeup tracer (this is a NOP if disabled):
90          */
91         wakeup_sched_switch(prev, next);
92 }
93
94 void
95 ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
96                     struct task_struct *curr)
97 {
98         wakeup_func(__rq, wakee, curr);
99
100         /*
101          * Chain to the wakeup tracer (this is a NOP if disabled):
102          */
103         wakeup_sched_wakeup(wakee, curr);
104 }
105
106 static void sched_switch_reset(struct trace_array *tr)
107 {
108         int cpu;
109
110         tr->time_start = ftrace_now(tr->cpu);
111
112         for_each_online_cpu(cpu)
113                 tracing_reset(tr->data[cpu]);
114 }
115
116 static void start_sched_trace(struct trace_array *tr)
117 {
118         sched_switch_reset(tr);
119         tracer_enabled = 1;
120 }
121
122 static void stop_sched_trace(struct trace_array *tr)
123 {
124         tracer_enabled = 0;
125 }
126
127 static void sched_switch_trace_init(struct trace_array *tr)
128 {
129         ctx_trace = tr;
130
131         if (tr->ctrl)
132                 start_sched_trace(tr);
133 }
134
135 static void sched_switch_trace_reset(struct trace_array *tr)
136 {
137         if (tr->ctrl)
138                 stop_sched_trace(tr);
139 }
140
141 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
142 {
143         /* When starting a new trace, reset the buffers */
144         if (tr->ctrl)
145                 start_sched_trace(tr);
146         else
147                 stop_sched_trace(tr);
148 }
149
150 static struct tracer sched_switch_trace __read_mostly =
151 {
152         .name           = "sched_switch",
153         .init           = sched_switch_trace_init,
154         .reset          = sched_switch_trace_reset,
155         .ctrl_update    = sched_switch_trace_ctrl_update,
156 #ifdef CONFIG_FTRACE_SELFTEST
157         .selftest    = trace_selftest_startup_sched_switch,
158 #endif
159 };
160
161 __init static int init_sched_switch_trace(void)
162 {
163         return register_tracer(&sched_switch_trace);
164 }
165 device_initcall(init_sched_switch_trace);