]> git.karo-electronics.de Git - linux-beck.git/commitdiff
ftrace: add trace_function api for other tracers to use
authorSteven Rostedt <srostedt@srostedt@redhat.com>
Mon, 12 May 2008 19:20:49 +0000 (21:20 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 23 May 2008 18:55:55 +0000 (20:55 +0200)
A new check was added in the ftrace function that wont trace if the CPU
trace buffer is disabled.  Unfortunately, other tracers used ftrace() to
write to the buffer after they disabled it. The new disable check makes
these calls into a nop.

This patch changes the __ftrace that is called without the check into a
new api for the other tracers to use, called "trace_function". The other
tracers use this interface instead when the trace CPU buffer is already
disabled.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_irqsoff.c
kernel/trace/trace_sched_wakeup.c

index d041578affd0cda60106d04b945f9203ba582a66..9022c357032ac87db2e867ae51f2a1608db0f047 100644 (file)
@@ -641,8 +641,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
 }
 
 notrace void
-__ftrace(struct trace_array *tr, struct trace_array_cpu *data,
-        unsigned long ip, unsigned long parent_ip, unsigned long flags)
+trace_function(struct trace_array *tr, struct trace_array_cpu *data,
+              unsigned long ip, unsigned long parent_ip, unsigned long flags)
 {
        struct trace_entry *entry;
        unsigned long irq_flags;
@@ -664,7 +664,7 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
        unsigned long ip, unsigned long parent_ip, unsigned long flags)
 {
        if (likely(!atomic_read(&data->disabled)))
-               __ftrace(tr, data, ip, parent_ip, flags);
+               trace_function(tr, data, ip, parent_ip, flags);
 }
 
 notrace void
@@ -730,7 +730,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1))
-               __ftrace(tr, data, ip, parent_ip, flags);
+               trace_function(tr, data, ip, parent_ip, flags);
 
        atomic_dec(&data->disabled);
        local_irq_restore(flags);
index 7bdfef35c05ab7455d4d8cdab173cd00980a267f..faf9f67246ac2bce72c5a7d12954fe3c2f6c1cf6 100644 (file)
@@ -169,6 +169,11 @@ void trace_special(struct trace_array *tr,
                   unsigned long arg1,
                   unsigned long arg2,
                   unsigned long arg3);
+void trace_function(struct trace_array *tr,
+                   struct trace_array_cpu *data,
+                   unsigned long ip,
+                   unsigned long parent_ip,
+                   unsigned long flags);
 
 void tracing_start_function_trace(void);
 void tracing_stop_function_trace(void);
index d2a6e6f1ad2d95cccd1fd09e9dbb615ebd8f7ad9..3269f4ff5172fe8f47bdcdf82f0f87041d3526da 100644 (file)
@@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1))
-               ftrace(tr, data, ip, parent_ip, flags);
+               trace_function(tr, data, ip, parent_ip, flags);
 
        atomic_dec(&data->disabled);
 }
@@ -150,7 +150,7 @@ check_critical_timing(struct trace_array *tr,
        if (!report_latency(delta))
                goto out_unlock;
 
-       ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
+       trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
 
        latency = nsecs_to_usecs(delta);
 
@@ -188,7 +188,7 @@ out:
        data->critical_sequence = max_sequence;
        data->preempt_timestamp = ftrace_now(cpu);
        tracing_reset(data);
-       ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
+       trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
 }
 
 static inline void notrace
@@ -221,7 +221,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
 
        local_save_flags(flags);
 
-       ftrace(tr, data, ip, parent_ip, flags);
+       trace_function(tr, data, ip, parent_ip, flags);
 
        __get_cpu_var(tracing_cpu) = 1;
 
@@ -254,7 +254,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
 
        atomic_inc(&data->disabled);
        local_save_flags(flags);
-       ftrace(tr, data, ip, parent_ip, flags);
+       trace_function(tr, data, ip, parent_ip, flags);
        check_critical_timing(tr, data, parent_ip ? : ip, cpu);
        data->critical_start = 0;
        atomic_dec(&data->disabled);
index b7df825c3af9e3739536810a3ca49abaa735ffbd..3549e4154f1f6675add2daef043c582f7040a87b 100644 (file)
@@ -85,7 +85,7 @@ wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
        if (unlikely(!tracer_enabled || next != wakeup_task))
                goto out_unlock;
 
-       ftrace(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
+       trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
 
        /*
         * usecs conversion is slow so we try to delay the conversion
@@ -192,7 +192,8 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
        local_save_flags(flags);
 
        tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
-       ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags);
+       trace_function(tr, tr->data[wakeup_cpu],
+                      CALLER_ADDR1, CALLER_ADDR2, flags);
 
 out_locked:
        spin_unlock(&wakeup_lock);