From: Ingo Molnar Date: Tue, 18 Nov 2008 07:52:13 +0000 (+0100) Subject: Merge branches 'tracing/branch-tracer' and 'tracing/urgent' into tracing/core X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=5a209c2d58e70f9bc415b9cdf0e3b9aaefb70371;p=linux-beck.git Merge branches 'tracing/branch-tracer' and 'tracing/urgent' into tracing/core --- 5a209c2d58e70f9bc415b9cdf0e3b9aaefb70371 diff --cc kernel/trace/trace.c index 396fda034e3f,b04923b72ce0,697eda36b86a..2596b5a968c4 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@@@ -258,11 -258,9 -213,6 +258,9 @@@@ static const char *trace_options[] = "stacktrace", "sched-tree", "ftrace_printk", + "ftrace_preempt", - #ifdef CONFIG_BRANCH_TRACER + "branch", - #endif + "annotate", NULL }; @@@@ -1031,62 -1029,62 -884,11 +1029,62 @@@@ function_trace_call_preempt_only(unsign trace_function(tr, data, ip, parent_ip, flags, pc); atomic_dec(&data->disabled); - if (resched) - preempt_enable_no_resched_notrace(); - else - preempt_enable_notrace(); + ftrace_preempt_enable(resched); +} + +static void +function_trace_call(unsigned long ip, unsigned long parent_ip) +{ + struct trace_array *tr = &global_trace; + struct trace_array_cpu *data; + unsigned long flags; + long disabled; + int cpu; + int pc; + + if (unlikely(!ftrace_function_enabled)) + return; + + /* + * Need to use raw, since this must be called before the + * recursive protection is performed. + */ - raw_local_irq_save(flags); ++ local_irq_save(flags); + cpu = raw_smp_processor_id(); + data = tr->data[cpu]; + disabled = atomic_inc_return(&data->disabled); + + if (likely(disabled == 1)) { + pc = preempt_count(); + trace_function(tr, data, ip, parent_ip, flags, pc); + } + + atomic_dec(&data->disabled); - raw_local_irq_restore(flags); ++ local_irq_restore(flags); +} + +#ifdef CONFIG_FUNCTION_RET_TRACER +void trace_function_return(struct ftrace_retfunc *trace) +{ + struct trace_array *tr = &global_trace; + struct trace_array_cpu *data; + unsigned long flags; + long disabled; + int cpu; + int pc; + + raw_local_irq_save(flags); + cpu = raw_smp_processor_id(); + data = tr->data[cpu]; + disabled = atomic_inc_return(&data->disabled); + if (likely(disabled == 1)) { + pc = preempt_count(); + __trace_function_return(tr, data, trace, flags, pc); + } + atomic_dec(&data->disabled); + raw_local_irq_restore(flags); } +#endif /* CONFIG_FUNCTION_RET_TRACER */ static struct ftrace_ops trace_ops __read_mostly = { @@@@ -2638,47 -2636,42 -2417,14 +2636,47 @@@@ static int tracing_set_tracer(char *buf current_trace->reset(tr); current_trace = t; -- if (t->init) -- t->init(tr); ++ if (t->init) { ++ ret = t->init(tr); ++ if (ret) ++ goto out; ++ } + trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); - if (ret > 0) - filp->f_pos += ret; + return ret; +} + +static ssize_t +tracing_set_trace_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[max_tracer_type_len+1]; + int i; + size_t ret; ++ int err; ++ ++ ret = cnt; + + if (cnt > max_tracer_type_len) + cnt = max_tracer_type_len; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = 0; + + /* strip ending whitespace. */ + for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) + buf[i] = 0; + - ret = tracing_set_tracer(buf); - if (!ret) - ret = cnt; ++ err = tracing_set_tracer(buf); ++ if (err) ++ return err; + - if (ret > 0) - filp->f_pos += ret; ++ filp->f_pos += ret; return ret; } diff --cc kernel/trace/trace.h index cdbd5cc22be8,b41d7b4c2cae,8465ad052707..37947f6b92bf --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@@@ -264,11 -264,10 -234,8 +264,11 @@@@ enum print_line_t */ struct tracer { const char *name; -- void (*init)(struct trace_array *tr); ++ /* Your tracer should raise a warning if init fails */ ++ int (*init)(struct trace_array *tr); void (*reset)(struct trace_array *tr); + void (*start)(struct trace_array *tr); + void (*stop)(struct trace_array *tr); void (*open)(struct trace_iterator *iter); void (*pipe_open)(struct trace_iterator *iter); void (*close)(struct trace_iterator *iter); @@@@ -470,92 -469,90 -415,8 +470,90 @@@@ enum trace_iterator_flags TRACE_ITER_STACKTRACE = 0x100, TRACE_ITER_SCHED_TREE = 0x200, TRACE_ITER_PRINTK = 0x400, + TRACE_ITER_PREEMPTONLY = 0x800, - #ifdef CONFIG_BRANCH_TRACER + TRACE_ITER_BRANCH = 0x1000, - #endif + TRACE_ITER_ANNOTATE = 0x2000, }; +/* + * TRACE_ITER_SYM_MASK masks the options in trace_flags that + * control the output of kernel symbols. + */ +#define TRACE_ITER_SYM_MASK \ + (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) + extern struct tracer nop_trace; +/** + * ftrace_preempt_disable - disable preemption scheduler safe + * + * When tracing can happen inside the scheduler, there exists + * cases that the tracing might happen before the need_resched + * flag is checked. If this happens and the tracer calls + * preempt_enable (after a disable), a schedule might take place + * causing an infinite recursion. + * + * To prevent this, we read the need_recshed flag before + * disabling preemption. When we want to enable preemption we + * check the flag, if it is set, then we call preempt_enable_no_resched. + * Otherwise, we call preempt_enable. + * + * The rational for doing the above is that if need resched is set + * and we have yet to reschedule, we are either in an atomic location + * (where we do not need to check for scheduling) or we are inside + * the scheduler and do not want to resched. + */ +static inline int ftrace_preempt_disable(void) +{ + int resched; + + resched = need_resched(); + preempt_disable_notrace(); + + return resched; +} + +/** + * ftrace_preempt_enable - enable preemption scheduler safe + * @resched: the return value from ftrace_preempt_disable + * + * This is a scheduler safe way to enable preemption and not miss + * any preemption checks. The disabled saved the state of preemption. + * If resched is set, then we were either inside an atomic or + * are inside the scheduler (we would have already scheduled + * otherwise). In this case, we do not want to call normal + * preempt_enable, but preempt_enable_no_resched instead. + */ +static inline void ftrace_preempt_enable(int resched) +{ + if (resched) + preempt_enable_no_resched_notrace(); + else + preempt_enable_notrace(); +} + +#ifdef CONFIG_BRANCH_TRACER +extern int enable_branch_tracing(struct trace_array *tr); +extern void disable_branch_tracing(void); +static inline int trace_branch_enable(struct trace_array *tr) +{ + if (trace_flags & TRACE_ITER_BRANCH) + return enable_branch_tracing(tr); + return 0; +} +static inline void trace_branch_disable(void) +{ + /* due to races, always disable */ + disable_branch_tracing(); +} +#else +static inline int trace_branch_enable(struct trace_array *tr) +{ + return 0; +} +static inline void trace_branch_disable(void) +{ +} +#endif /* CONFIG_BRANCH_TRACER */ + #endif /* _LINUX_KERNEL_TRACE_H */