if (!tracer_enabled)
return;
+ tracing_record_cmdline(prev);
+
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
- if (likely(disabled == 1)) {
+ if (likely(disabled == 1))
tracing_sched_switch_trace(tr, data, prev, next, flags);
- ftrace_all_fair_tasks(__rq, tr, data);
- }
atomic_dec(&data->disabled);
local_irq_restore(flags);
if (!tracer_enabled)
return;
+ tracing_record_cmdline(curr);
+
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
- if (likely(disabled == 1)) {
+ if (likely(disabled == 1))
tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
- ftrace_all_fair_tasks(__rq, tr, data);
- }
atomic_dec(&data->disabled);
local_irq_restore(flags);
ftrace_ctx_switch(void *__rq, struct task_struct *prev,
struct task_struct *next)
{
- tracing_record_cmdline(prev);
-
/*
* If tracer_switch_func only points to the local
* switch func, it still needs the ptr passed to it.
ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
struct task_struct *curr)
{
- tracing_record_cmdline(curr);
-
wakeup_func(__rq, wakee, curr);
/*
wakeup_sched_wakeup(wakee, curr);
}
+void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+ struct trace_array *tr = ctx_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+
+ if (!tracer_enabled)
+ return;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1))
+ __trace_special(tr, data, arg1, arg2, arg3);
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
+}
+
static void sched_switch_reset(struct trace_array *tr)
{
int cpu;