update_max_tr_single(tr, current, cpu);
- if (!runqueue_is_locked()) {
- if (tracing_thresh) {
- printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical"
- " section violates %lu us threshold.\n",
- current->comm, current->pid,
- raw_smp_processor_id(),
- latency, nsecs_to_usecs(tracing_thresh));
- } else {
- printk(KERN_INFO "(%16s-%-5d|#%d): new %lu us"
- " maximum-latency critical section.\n",
- current->comm, current->pid,
- raw_smp_processor_id(),
- latency);
- }
- }
-
max_sequence++;
out_unlock:
if (likely(!tracer_enabled))
return;
- if (__get_cpu_var(tracing_cpu))
+ cpu = raw_smp_processor_id();
+
+ if (per_cpu(tracing_cpu, cpu))
return;
- cpu = raw_smp_processor_id();
data = tr->data[cpu];
- if (unlikely(!data) || unlikely(!head_page(data)) ||
- atomic_read(&data->disabled))
+ if (unlikely(!data) || atomic_read(&data->disabled))
return;
atomic_inc(&data->disabled);
trace_function(tr, data, ip, parent_ip, flags);
- __get_cpu_var(tracing_cpu) = 1;
+ per_cpu(tracing_cpu, cpu) = 1;
atomic_dec(&data->disabled);
}
struct trace_array_cpu *data;
unsigned long flags;
+ cpu = raw_smp_processor_id();
/* Always clear the tracing cpu on stopping the trace */
- if (unlikely(__get_cpu_var(tracing_cpu)))
- __get_cpu_var(tracing_cpu) = 0;
+ if (unlikely(per_cpu(tracing_cpu, cpu)))
+ per_cpu(tracing_cpu, cpu) = 0;
else
return;
if (!tracer_enabled)
return;
- cpu = raw_smp_processor_id();
data = tr->data[cpu];
if (unlikely(!data) || unlikely(!head_page(data)) ||
return;
atomic_inc(&data->disabled);
+
local_save_flags(flags);
trace_function(tr, data, ip, parent_ip, flags);
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
+EXPORT_SYMBOL_GPL(start_critical_timings);
void stop_critical_timings(void)
{
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
+EXPORT_SYMBOL_GPL(stop_critical_timings);
#ifdef CONFIG_IRQSOFF_TRACER
#ifdef CONFIG_PROVE_LOCKING
#ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
- stop_critical_timing(a0, a1);
+ if (preempt_trace())
+ stop_critical_timing(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
- start_critical_timing(a0, a1);
+ if (preempt_trace())
+ start_critical_timing(a0, a1);
}
#endif /* CONFIG_PREEMPT_TRACER */
static void __irqsoff_tracer_init(struct trace_array *tr)
{
irqsoff_trace = tr;
- /* make sure that the tracer is visibel */
+ /* make sure that the tracer is visible */
smp_wmb();
if (tr->ctrl)