static DEFINE_PER_CPU(int, tracing_cpu);
+static DEFINE_SPINLOCK(max_trace_lock);
+
enum {
TRACER_IRQS_OFF = (1 << 1),
TRACER_PREEMPT_OFF = (1 << 2),
static int trace_type __read_mostly;
#ifdef CONFIG_PREEMPT_TRACER
-static inline int notrace
+static inline int
preempt_trace(void)
{
return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
#endif
#ifdef CONFIG_IRQSOFF_TRACER
-static inline int notrace
+static inline int
irq_trace(void)
{
return ((trace_type & TRACER_IRQS_OFF) &&
/*
* irqsoff uses its own tracer function to keep the overhead down:
*/
-static void notrace
+static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
{
struct trace_array *tr = irqsoff_trace;
long disabled;
int cpu;
- if (likely(!__get_cpu_var(tracing_cpu)))
+ /*
+ * Does not matter if we preempt. We test the flags
+ * afterward, to see if irqs are disabled or not.
+ * If we preempt and get a false positive, the flags
+ * test will fail.
+ */
+ cpu = raw_smp_processor_id();
+ if (likely(!per_cpu(tracing_cpu, cpu)))
return;
local_save_flags(flags);
+ /* slight chance to get a false positive on tracing_cpu */
+ if (!irqs_disabled_flags(flags))
+ return;
- cpu = raw_smp_processor_id();
data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1))
- ftrace(tr, data, ip, parent_ip, flags);
+ trace_function(tr, data, ip, parent_ip, flags);
atomic_dec(&data->disabled);
}
/*
* Should this new latency be reported/recorded?
*/
-static int notrace report_latency(cycle_t delta)
+static int report_latency(cycle_t delta)
{
if (tracing_thresh) {
if (delta < tracing_thresh)
return 1;
}
-static void notrace
+static void
check_critical_timing(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long parent_ip,
int cpu)
{
unsigned long latency, t0, t1;
- cycle_t T0, T1, T2, delta;
+ cycle_t T0, T1, delta;
unsigned long flags;
/*
* as long as possible:
*/
T0 = data->preempt_timestamp;
- T1 = now(cpu);
+ T1 = ftrace_now(cpu);
delta = T1-T0;
local_save_flags(flags);
if (!report_latency(delta))
goto out;
- ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
- /*
- * Update the timestamp, because the trace entry above
- * might change it (it can only get larger so the latency
- * is fair to be reported):
- */
- T2 = now(cpu);
+ spin_lock_irqsave(&max_trace_lock, flags);
+
+ /* check if we are still the max latency */
+ if (!report_latency(delta))
+ goto out_unlock;
- delta = T2-T0;
+ trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
latency = nsecs_to_usecs(delta);
if (data->critical_sequence != max_sequence)
- goto out;
+ goto out_unlock;
tracing_max_latency = delta;
t0 = nsecs_to_usecs(T0);
update_max_tr_single(tr, current, cpu);
- if (tracing_thresh)
- printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical section "
- "violates %lu us threshold.\n"
- " => started at timestamp %lu: ",
- current->comm, current->pid,
- raw_smp_processor_id(),
- latency, nsecs_to_usecs(tracing_thresh), t0);
- else
- printk(KERN_INFO "(%16s-%-5d|#%d):"
- " new %lu us maximum-latency "
- "critical section.\n => started at timestamp %lu: ",
- current->comm, current->pid,
- raw_smp_processor_id(),
- latency, t0);
-
- print_symbol(KERN_CONT "<%s>\n", data->critical_start);
- printk(KERN_CONT " => ended at timestamp %lu: ", t1);
- print_symbol(KERN_CONT "<%s>\n", data->critical_end);
- dump_stack();
- t1 = nsecs_to_usecs(now(cpu));
- printk(KERN_CONT " => dump-end timestamp %lu\n\n", t1);
-
max_sequence++;
+out_unlock:
+ spin_unlock_irqrestore(&max_trace_lock, flags);
+
out:
data->critical_sequence = max_sequence;
- data->preempt_timestamp = now(cpu);
+ data->preempt_timestamp = ftrace_now(cpu);
tracing_reset(data);
- ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
+ trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
}
-static inline void notrace
+static inline void
start_critical_timing(unsigned long ip, unsigned long parent_ip)
{
int cpu;
if (likely(!tracer_enabled))
return;
- if (__get_cpu_var(tracing_cpu))
+ cpu = raw_smp_processor_id();
+
+ if (per_cpu(tracing_cpu, cpu))
return;
- cpu = raw_smp_processor_id();
data = tr->data[cpu];
- if (unlikely(!data) || unlikely(!data->trace) ||
- atomic_read(&data->disabled))
+ if (unlikely(!data) || atomic_read(&data->disabled))
return;
atomic_inc(&data->disabled);
data->critical_sequence = max_sequence;
- data->preempt_timestamp = now(cpu);
+ data->preempt_timestamp = ftrace_now(cpu);
data->critical_start = parent_ip ? : ip;
tracing_reset(data);
local_save_flags(flags);
- ftrace(tr, data, ip, parent_ip, flags);
+ trace_function(tr, data, ip, parent_ip, flags);
- __get_cpu_var(tracing_cpu) = 1;
+ per_cpu(tracing_cpu, cpu) = 1;
atomic_dec(&data->disabled);
}
-static inline void notrace
+static inline void
stop_critical_timing(unsigned long ip, unsigned long parent_ip)
{
int cpu;
struct trace_array_cpu *data;
unsigned long flags;
+ cpu = raw_smp_processor_id();
/* Always clear the tracing cpu on stopping the trace */
- if (unlikely(__get_cpu_var(tracing_cpu)))
- __get_cpu_var(tracing_cpu) = 0;
+ if (unlikely(per_cpu(tracing_cpu, cpu)))
+ per_cpu(tracing_cpu, cpu) = 0;
else
return;
if (!tracer_enabled)
return;
- cpu = raw_smp_processor_id();
data = tr->data[cpu];
- if (unlikely(!data) || unlikely(!data->trace) ||
+ if (unlikely(!data) || unlikely(!head_page(data)) ||
!data->critical_start || atomic_read(&data->disabled))
return;
atomic_inc(&data->disabled);
+
local_save_flags(flags);
- ftrace(tr, data, ip, parent_ip, flags);
+ trace_function(tr, data, ip, parent_ip, flags);
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);
}
/* start and stop critical timings used to for stoppage (in idle) */
-void notrace start_critical_timings(void)
+void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-void notrace stop_critical_timings(void)
+void stop_critical_timings(void)
{
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
#ifdef CONFIG_IRQSOFF_TRACER
#ifdef CONFIG_PROVE_LOCKING
-void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
+void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
-void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
+void time_hardirqs_off(unsigned long a0, unsigned long a1)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
/*
* We are only interested in hardirq on/off events:
*/
-void notrace trace_hardirqs_on(void)
+void trace_hardirqs_on(void)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL(trace_hardirqs_on);
-void notrace trace_hardirqs_off(void)
+void trace_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL(trace_hardirqs_off);
-void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
+void trace_hardirqs_on_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
-void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
+void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
#endif /* CONFIG_IRQSOFF_TRACER */
#ifdef CONFIG_PREEMPT_TRACER
-void notrace trace_preempt_on(unsigned long a0, unsigned long a1)
+void trace_preempt_on(unsigned long a0, unsigned long a1)
{
stop_critical_timing(a0, a1);
}
-void notrace trace_preempt_off(unsigned long a0, unsigned long a1)
+void trace_preempt_off(unsigned long a0, unsigned long a1)
{
start_critical_timing(a0, a1);
}
static void start_irqsoff_tracer(struct trace_array *tr)
{
- tracer_enabled = 1;
register_ftrace_function(&trace_ops);
+ tracer_enabled = 1;
}
static void stop_irqsoff_tracer(struct trace_array *tr)
{
- unregister_ftrace_function(&trace_ops);
tracer_enabled = 0;
+ unregister_ftrace_function(&trace_ops);
}
static void __irqsoff_tracer_init(struct trace_array *tr)
{
irqsoff_trace = tr;
- /* make sure that the tracer is visibel */
+ /* make sure that the tracer is visible */
smp_wmb();
if (tr->ctrl)
stop_irqsoff_tracer(tr);
}
-static void notrace irqsoff_tracer_open(struct trace_iterator *iter)
+static void irqsoff_tracer_open(struct trace_iterator *iter)
{
/* stop the trace while dumping */
if (iter->tr->ctrl)
stop_irqsoff_tracer(iter->tr);
}
-static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
+static void irqsoff_tracer_close(struct trace_iterator *iter)
{
if (iter->tr->ctrl)
start_irqsoff_tracer(iter->tr);
.close = irqsoff_tracer_close,
.ctrl_update = irqsoff_tracer_ctrl_update,
.print_max = 1,
+#ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_irqsoff,
+#endif
};
# define register_irqsoff(trace) register_tracer(&trace)
#else
.close = irqsoff_tracer_close,
.ctrl_update = irqsoff_tracer_ctrl_update,
.print_max = 1,
+#ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_preemptoff,
+#endif
};
# define register_preemptoff(trace) register_tracer(&trace)
#else
.close = irqsoff_tracer_close,
.ctrl_update = irqsoff_tracer_ctrl_update,
.print_max = 1,
+#ifdef CONFIG_FTRACE_SELFTEST
+ .selftest = trace_selftest_startup_preemptirqsoff,
+#endif
};
# define register_preemptirqsoff(trace) register_tracer(&trace)