From: Ingo Molnar Date: Mon, 17 Nov 2008 08:36:22 +0000 (+0100) Subject: Merge branches 'tracing/branch-tracer', 'tracing/ftrace', 'tracing/function-return... X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=3f8e402f34ecc7d1d00b54703d3baa401b8bdd78;p=linux-beck.git Merge branches 'tracing/branch-tracer', 'tracing/ftrace', 'tracing/function-return-tracer', 'tracing/tracepoints' and 'tracing/urgent' into tracing/core --- 3f8e402f34ecc7d1d00b54703d3baa401b8bdd78 diff --cc kernel/trace/ftrace.c index 54cb9a7d15e5,2f78a45aac14,54cb9a7d15e5,e60205722d0c..f212da486689 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@@@@ -47,9 -47,12 -47,9 -47,6 +47,12 @@@@@ int ftrace_enabled __read_mostly; static int last_ftrace_enabled; +/* Quick disabling of function tracer. */ +int function_trace_stop; + + ++/* By default, current tracing type is normal tracing. */ + ++enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER; + ++ /* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. @@@@@ -559,9 -555,8 -559,9 -533,9 +555,8 @@@@@ static void ftrace_startup(void return; mutex_lock(&ftrace_start_lock); - ftrace_start++; - if (ftrace_start == 1) - command |= FTRACE_ENABLE_CALLS; + ftrace_start_up++; - - if (ftrace_start_up == 1) - - command |= FTRACE_ENABLE_CALLS; + ++ command |= FTRACE_ENABLE_CALLS; if (saved_ftrace_func != ftrace_trace_function) { saved_ftrace_func = ftrace_trace_function; @@@@@ -1211,7 -1206,7 -1211,7 -1186,7 +1210,7 @@@@@ ftrace_regex_release(struct inode *inod mutex_lock(&ftrace_sysctl_lock); mutex_lock(&ftrace_start_lock); - - if (iter->filtered && ftrace_start_up && ftrace_enabled) - if (iter->filtered && ftrace_start && ftrace_enabled) + ++ if (ftrace_start_up && ftrace_enabled) ftrace_run_update_code(FTRACE_ENABLE_CALLS); mutex_unlock(&ftrace_start_lock); mutex_unlock(&ftrace_sysctl_lock); @@@@@ -1479,19 -1492,48 -1479,19 -1454,3 +1496,48 @@@@@ ftrace_enable_sysctl(struct ctl_table * return ret; } +#ifdef CONFIG_FUNCTION_RET_TRACER + ++ + ++/* The callback that hooks the return of a function */ +trace_function_return_t ftrace_function_return = + (trace_function_return_t)ftrace_stub; - - void register_ftrace_return(trace_function_return_t func) + ++ + ++int register_ftrace_return(trace_function_return_t func) +{ + ++ int ret = 0; + ++ + ++ mutex_lock(&ftrace_sysctl_lock); + ++ + ++ /* + ++ * Don't launch return tracing if normal function + ++ * tracing is already running. + ++ */ + ++ if (ftrace_trace_function != ftrace_stub) { + ++ ret = -EBUSY; + ++ goto out; + ++ } + ++ + ++ ftrace_tracing_type = FTRACE_TYPE_RETURN; + ftrace_function_return = func; + ++ ftrace_startup(); + ++ + ++out: + ++ mutex_unlock(&ftrace_sysctl_lock); + ++ return ret; +} + +void unregister_ftrace_return(void) +{ + ++ mutex_lock(&ftrace_sysctl_lock); + ++ + ftrace_function_return = (trace_function_return_t)ftrace_stub; + ++ ftrace_shutdown(); + ++ /* Restore normal tracing type */ + ++ ftrace_tracing_type = FTRACE_TYPE_ENTER; + ++ + ++ mutex_unlock(&ftrace_sysctl_lock); +} +#endif + + + diff --cc kernel/trace/trace_branch.c index 2511e32572ca,44bd39539d61,85265553918f,000000000000..23f9b02ce967 mode 100644,100644,100644,000000..100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@@@@ -1,320 -1,321 -1,320 -1,0 +1,321 @@@@@ +/* + * unlikely profiler + * + * Copyright (C) 2008 Steven Rostedt + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trace.h" + +#ifdef CONFIG_BRANCH_TRACER + +static int branch_tracing_enabled __read_mostly; +static DEFINE_MUTEX(branch_tracing_mutex); +static struct trace_array *branch_tracer; + +static void +probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) +{ + struct trace_array *tr = branch_tracer; + struct ring_buffer_event *event; + struct trace_branch *entry; + unsigned long flags, irq_flags; + int cpu, pc; + const char *p; + + /* + * I would love to save just the ftrace_likely_data pointer, but + * this code can also be used by modules. Ugly things can happen + * if the module is unloaded, and then we go and read the + * pointer. This is slower, but much safer. + */ + + if (unlikely(!tr)) + return; + -- local_irq_save(flags); +++ raw_local_irq_save(flags); + cpu = raw_smp_processor_id(); + if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) + goto out; + + event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), + &irq_flags); + if (!event) + goto out; + + pc = preempt_count(); + entry = ring_buffer_event_data(event); + tracing_generic_entry_update(&entry->ent, flags, pc); + entry->ent.type = TRACE_BRANCH; + + /* Strip off the path, only save the file */ + p = f->file + strlen(f->file); + while (p >= f->file && *p != '/') + p--; + p++; + + strncpy(entry->func, f->func, TRACE_FUNC_SIZE); + strncpy(entry->file, p, TRACE_FILE_SIZE); + entry->func[TRACE_FUNC_SIZE] = 0; + entry->file[TRACE_FILE_SIZE] = 0; + entry->line = f->line; + entry->correct = val == expect; + + ring_buffer_unlock_commit(tr->buffer, event, irq_flags); + + out: + atomic_dec(&tr->data[cpu]->disabled); -- local_irq_restore(flags); +++ raw_local_irq_restore(flags); +} + +static inline +void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) +{ + if (!branch_tracing_enabled) + return; + + probe_likely_condition(f, val, expect); +} + +int enable_branch_tracing(struct trace_array *tr) +{ + int ret = 0; + + mutex_lock(&branch_tracing_mutex); + branch_tracer = tr; + /* + * Must be seen before enabling. The reader is a condition + * where we do not need a matching rmb() + */ + smp_wmb(); + branch_tracing_enabled++; + mutex_unlock(&branch_tracing_mutex); + + return ret; +} + +void disable_branch_tracing(void) +{ + mutex_lock(&branch_tracing_mutex); + + if (!branch_tracing_enabled) + goto out_unlock; + + branch_tracing_enabled--; + + out_unlock: + mutex_unlock(&branch_tracing_mutex); +} + +static void start_branch_trace(struct trace_array *tr) +{ + enable_branch_tracing(tr); +} + +static void stop_branch_trace(struct trace_array *tr) +{ + disable_branch_tracing(); +} + - - static void branch_trace_init(struct trace_array *tr) + ++static int branch_trace_init(struct trace_array *tr) +{ + int cpu; + + for_each_online_cpu(cpu) + tracing_reset(tr, cpu); + + start_branch_trace(tr); + ++ return 0; +} + +static void branch_trace_reset(struct trace_array *tr) +{ + stop_branch_trace(tr); +} + +struct tracer branch_trace __read_mostly = +{ + .name = "branch", + .init = branch_trace_init, + .reset = branch_trace_reset, +#ifdef CONFIG_FTRACE_SELFTEST + .selftest = trace_selftest_startup_branch, +#endif +}; + +__init static int init_branch_trace(void) +{ + return register_tracer(&branch_trace); +} + +device_initcall(init_branch_trace); +#else +static inline +void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) +{ +} +#endif /* CONFIG_BRANCH_TRACER */ + +void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) +{ + /* + * I would love to have a trace point here instead, but the + * trace point code is so inundated with unlikely and likely + * conditions that the recursive nightmare that exists is too + * much to try to get working. At least for now. + */ + trace_likely_condition(f, val, expect); + + /* FIXME: Make this atomic! */ + if (val == expect) + f->correct++; + else + f->incorrect++; +} +EXPORT_SYMBOL(ftrace_likely_update); + +struct ftrace_pointer { + void *start; + void *stop; +}; + +static void * +t_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct ftrace_pointer *f = m->private; + struct ftrace_branch_data *p = v; + + (*pos)++; + + if (v == (void *)1) + return f->start; + + ++p; + + if ((void *)p >= (void *)f->stop) + return NULL; + + return p; +} + +static void *t_start(struct seq_file *m, loff_t *pos) +{ + void *t = (void *)1; + loff_t l = 0; + + for (; t && l < *pos; t = t_next(m, t, &l)) + ; + + return t; +} + +static void t_stop(struct seq_file *m, void *p) +{ +} + +static int t_show(struct seq_file *m, void *v) +{ + struct ftrace_branch_data *p = v; + const char *f; + unsigned long percent; + + if (v == (void *)1) { + seq_printf(m, " correct incorrect %% " + " Function " + " File Line\n" + " ------- --------- - " + " -------- " + " ---- ----\n"); + return 0; + } + + /* Only print the file, not the path */ + f = p->file + strlen(p->file); + while (f >= p->file && *f != '/') + f--; + f++; + + if (p->correct) { + percent = p->incorrect * 100; + percent /= p->correct + p->incorrect; + } else + percent = p->incorrect ? 100 : 0; + + seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent); + seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); + return 0; +} + +static struct seq_operations tracing_likely_seq_ops = { + .start = t_start, + .next = t_next, + .stop = t_stop, + .show = t_show, +}; + +static int tracing_likely_open(struct inode *inode, struct file *file) +{ + int ret; + + ret = seq_open(file, &tracing_likely_seq_ops); + if (!ret) { + struct seq_file *m = file->private_data; + m->private = (void *)inode->i_private; + } + + return ret; +} + +static struct file_operations tracing_likely_fops = { + .open = tracing_likely_open, + .read = seq_read, + .llseek = seq_lseek, +}; + +extern unsigned long __start_likely_profile[]; +extern unsigned long __stop_likely_profile[]; +extern unsigned long __start_unlikely_profile[]; +extern unsigned long __stop_unlikely_profile[]; + +static struct ftrace_pointer ftrace_likely_pos = { + .start = __start_likely_profile, + .stop = __stop_likely_profile, +}; + +static struct ftrace_pointer ftrace_unlikely_pos = { + .start = __start_unlikely_profile, + .stop = __stop_unlikely_profile, +}; + +static __init int ftrace_branch_init(void) +{ + struct dentry *d_tracer; + struct dentry *entry; + + d_tracer = tracing_init_dentry(); + + entry = debugfs_create_file("profile_likely", 0444, d_tracer, + &ftrace_likely_pos, + &tracing_likely_fops); + if (!entry) + pr_warning("Could not create debugfs 'profile_likely' entry\n"); + + entry = debugfs_create_file("profile_unlikely", 0444, d_tracer, + &ftrace_unlikely_pos, + &tracing_likely_fops); + if (!entry) + pr_warning("Could not create debugfs" + " 'profile_unlikely' entry\n"); + + return 0; +} + +device_initcall(ftrace_branch_init);