2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/slab.h>
21 static void tracing_start_function_trace(struct trace_array *tr);
22 static void tracing_stop_function_trace(struct trace_array *tr);
24 function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
27 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29 static struct tracer_flags func_flags;
33 TRACE_FUNC_OPT_STACK = 0x1,
36 static int allocate_ftrace_ops(struct trace_array *tr)
38 struct ftrace_ops *ops;
40 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
44 /* Currently only the non stack verision is supported */
45 ops->func = function_trace_call;
46 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
54 int ftrace_create_function_files(struct trace_array *tr,
55 struct dentry *parent)
60 * The top level array uses the "global_ops", and the files are
63 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
66 ret = allocate_ftrace_ops(tr);
70 ftrace_create_filter_files(tr->ops, parent);
75 void ftrace_destroy_function_files(struct trace_array *tr)
77 ftrace_destroy_filter_files(tr->ops);
82 static int function_trace_init(struct trace_array *tr)
87 * Instance trace_arrays get their ops allocated
88 * at instance creation. Unless it failed
94 /* Currently only the global instance can do stack tracing */
95 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96 func_flags.val & TRACE_FUNC_OPT_STACK)
97 func = function_stack_trace_call;
99 func = function_trace_call;
101 ftrace_init_array_ops(tr, func);
103 tr->trace_buffer.cpu = get_cpu();
106 tracing_start_cmdline_record();
107 tracing_start_function_trace(tr);
111 static void function_trace_reset(struct trace_array *tr)
113 tracing_stop_function_trace(tr);
114 tracing_stop_cmdline_record();
115 ftrace_reset_array_ops(tr);
118 static void function_trace_start(struct trace_array *tr)
120 tracing_reset_online_cpus(&tr->trace_buffer);
124 function_trace_call(unsigned long ip, unsigned long parent_ip,
125 struct ftrace_ops *op, struct pt_regs *pt_regs)
127 struct trace_array *tr = op->private;
128 struct trace_array_cpu *data;
134 if (unlikely(!tr->function_enabled))
137 pc = preempt_count();
138 preempt_disable_notrace();
140 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
144 cpu = smp_processor_id();
145 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
146 if (!atomic_read(&data->disabled)) {
147 local_save_flags(flags);
148 trace_function(tr, ip, parent_ip, flags, pc);
150 trace_clear_recursion(bit);
153 preempt_enable_notrace();
157 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
158 struct ftrace_ops *op, struct pt_regs *pt_regs)
160 struct trace_array *tr = op->private;
161 struct trace_array_cpu *data;
167 if (unlikely(!tr->function_enabled))
171 * Need to use raw, since this must be called before the
172 * recursive protection is performed.
174 local_irq_save(flags);
175 cpu = raw_smp_processor_id();
176 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
177 disabled = atomic_inc_return(&data->disabled);
179 if (likely(disabled == 1)) {
180 pc = preempt_count();
181 trace_function(tr, ip, parent_ip, flags, pc);
184 * __ftrace_trace_stack,
186 * function_stack_trace_call
190 __trace_stack(tr, flags, 5, pc);
193 atomic_dec(&data->disabled);
194 local_irq_restore(flags);
197 static struct tracer_opt func_opts[] = {
198 #ifdef CONFIG_STACKTRACE
199 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
201 { } /* Always set a last empty entry */
204 static struct tracer_flags func_flags = {
205 .val = 0, /* By default: all flags disabled */
209 static void tracing_start_function_trace(struct trace_array *tr)
211 tr->function_enabled = 0;
212 register_ftrace_function(tr->ops);
213 tr->function_enabled = 1;
216 static void tracing_stop_function_trace(struct trace_array *tr)
218 tr->function_enabled = 0;
219 unregister_ftrace_function(tr->ops);
222 static struct tracer function_trace;
225 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
228 case TRACE_FUNC_OPT_STACK:
229 /* do nothing if already set */
230 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
233 /* We can change this flag when not running. */
234 if (tr->current_trace != &function_trace)
237 unregister_ftrace_function(tr->ops);
240 tr->ops->func = function_stack_trace_call;
241 register_ftrace_function(tr->ops);
243 tr->ops->func = function_trace_call;
244 register_ftrace_function(tr->ops);
255 static struct tracer function_trace __tracer_data =
258 .init = function_trace_init,
259 .reset = function_trace_reset,
260 .start = function_trace_start,
261 .flags = &func_flags,
262 .set_flag = func_set_flag,
263 .allow_instances = true,
264 #ifdef CONFIG_FTRACE_SELFTEST
265 .selftest = trace_selftest_startup_function,
269 #ifdef CONFIG_DYNAMIC_FTRACE
270 static void update_traceon_count(struct ftrace_probe_ops *ops,
271 unsigned long ip, bool on)
273 struct ftrace_func_mapper *mapper = ops->private_data;
278 * Tracing gets disabled (or enabled) once per count.
279 * This function can be called at the same time on multiple CPUs.
280 * It is fine if both disable (or enable) tracing, as disabling
281 * (or enabling) the second time doesn't do anything as the
282 * state of the tracer is already disabled (or enabled).
283 * What needs to be synchronized in this case is that the count
284 * only gets decremented once, even if the tracer is disabled
285 * (or enabled) twice, as the second one is really a nop.
287 * The memory barriers guarantee that we only decrement the
288 * counter once. First the count is read to a local variable
289 * and a read barrier is used to make sure that it is loaded
290 * before checking if the tracer is in the state we want.
291 * If the tracer is not in the state we want, then the count
292 * is guaranteed to be the old count.
294 * Next the tracer is set to the state we want (disabled or enabled)
295 * then a write memory barrier is used to make sure that
296 * the new state is visible before changing the counter by
297 * one minus the old counter. This guarantees that another CPU
298 * executing this code will see the new state before seeing
299 * the new counter value, and would not do anything if the new
302 * Note, there is no synchronization between this and a user
303 * setting the tracing_on file. But we currently don't care
306 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
312 /* Make sure we see count before checking tracing state */
315 if (on == !!tracing_is_on())
323 /* Make sure tracing state is visible before updating count */
326 *count = old_count - 1;
330 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
331 struct ftrace_probe_ops *ops, void **data)
333 update_traceon_count(ops, ip, 1);
337 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
338 struct ftrace_probe_ops *ops, void **data)
340 update_traceon_count(ops, ip, 0);
344 ftrace_traceon(unsigned long ip, unsigned long parent_ip,
345 struct ftrace_probe_ops *ops, void **data)
354 ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
355 struct ftrace_probe_ops *ops, void **data)
357 if (!tracing_is_on())
365 * ftrace_stacktrace()
366 * function_trace_probe_call()
367 * ftrace_ops_list_func()
373 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
374 struct ftrace_probe_ops *ops, void **data)
376 trace_dump_stack(STACK_SKIP);
380 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
381 struct ftrace_probe_ops *ops, void **data)
383 struct ftrace_func_mapper *mapper = ops->private_data;
388 if (!tracing_is_on())
393 trace_dump_stack(STACK_SKIP);
397 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
400 * Stack traces should only execute the number of times the
401 * user specified in the counter.
409 new_count = old_count - 1;
410 new_count = cmpxchg(count, old_count, new_count);
411 if (new_count == old_count)
412 trace_dump_stack(STACK_SKIP);
414 if (!tracing_is_on())
417 } while (new_count != old_count);
420 static int update_count(struct ftrace_probe_ops *ops, unsigned long ip)
422 struct ftrace_func_mapper *mapper = ops->private_data;
426 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
438 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
439 struct ftrace_probe_ops *ops, void **data)
441 if (update_count(ops, ip))
442 ftrace_dump(DUMP_ALL);
445 /* Only dump the current CPU buffer. */
447 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
448 struct ftrace_probe_ops *ops, void **data)
450 if (update_count(ops, ip))
451 ftrace_dump(DUMP_ORIG);
455 ftrace_probe_print(const char *name, struct seq_file *m,
456 unsigned long ip, struct ftrace_probe_ops *ops)
458 struct ftrace_func_mapper *mapper = ops->private_data;
461 seq_printf(m, "%ps:%s", (void *)ip, name);
464 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
467 seq_printf(m, ":count=%ld\n", *count);
469 seq_puts(m, ":unlimited\n");
475 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
476 struct ftrace_probe_ops *ops, void *data)
478 return ftrace_probe_print("traceon", m, ip, ops);
482 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
483 struct ftrace_probe_ops *ops, void *data)
485 return ftrace_probe_print("traceoff", m, ip, ops);
489 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
490 struct ftrace_probe_ops *ops, void *data)
492 return ftrace_probe_print("stacktrace", m, ip, ops);
496 ftrace_dump_print(struct seq_file *m, unsigned long ip,
497 struct ftrace_probe_ops *ops, void *data)
499 return ftrace_probe_print("dump", m, ip, ops);
503 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
504 struct ftrace_probe_ops *ops, void *data)
506 return ftrace_probe_print("cpudump", m, ip, ops);
511 ftrace_count_init(struct ftrace_probe_ops *ops, unsigned long ip,
514 struct ftrace_func_mapper *mapper = ops->private_data;
516 return ftrace_func_mapper_add_ip(mapper, ip, data);
520 ftrace_count_free(struct ftrace_probe_ops *ops, unsigned long ip,
523 struct ftrace_func_mapper *mapper = ops->private_data;
525 ftrace_func_mapper_remove_ip(mapper, ip);
528 static struct ftrace_probe_ops traceon_count_probe_ops = {
529 .func = ftrace_traceon_count,
530 .print = ftrace_traceon_print,
531 .init = ftrace_count_init,
532 .free = ftrace_count_free,
535 static struct ftrace_probe_ops traceoff_count_probe_ops = {
536 .func = ftrace_traceoff_count,
537 .print = ftrace_traceoff_print,
538 .init = ftrace_count_init,
539 .free = ftrace_count_free,
542 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
543 .func = ftrace_stacktrace_count,
544 .print = ftrace_stacktrace_print,
545 .init = ftrace_count_init,
546 .free = ftrace_count_free,
549 static struct ftrace_probe_ops dump_probe_ops = {
550 .func = ftrace_dump_probe,
551 .print = ftrace_dump_print,
552 .init = ftrace_count_init,
553 .free = ftrace_count_free,
556 static struct ftrace_probe_ops cpudump_probe_ops = {
557 .func = ftrace_cpudump_probe,
558 .print = ftrace_cpudump_print,
561 static struct ftrace_probe_ops traceon_probe_ops = {
562 .func = ftrace_traceon,
563 .print = ftrace_traceon_print,
566 static struct ftrace_probe_ops traceoff_probe_ops = {
567 .func = ftrace_traceoff,
568 .print = ftrace_traceoff_print,
571 static struct ftrace_probe_ops stacktrace_probe_ops = {
572 .func = ftrace_stacktrace,
573 .print = ftrace_stacktrace_print,
577 ftrace_trace_probe_callback(struct trace_array *tr,
578 struct ftrace_probe_ops *ops,
579 struct ftrace_hash *hash, char *glob,
580 char *cmd, char *param, int enable)
582 void *count = (void *)-1;
586 /* hash funcs only work with set_ftrace_filter */
591 return unregister_ftrace_function_probe_func(glob+1, ops);
596 number = strsep(¶m, ":");
601 if (!ops->private_data) {
602 ops->private_data = allocate_ftrace_func_mapper();
603 if (!ops->private_data)
608 * We use the callback data field (which is a pointer)
611 ret = kstrtoul(number, 0, (unsigned long *)&count);
616 ret = register_ftrace_function_probe(glob, tr, ops, count);
618 return ret < 0 ? ret : 0;
622 ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
623 char *glob, char *cmd, char *param, int enable)
625 struct ftrace_probe_ops *ops;
627 /* we register both traceon and traceoff to this callback */
628 if (strcmp(cmd, "traceon") == 0)
629 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
631 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
633 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
638 ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
639 char *glob, char *cmd, char *param, int enable)
641 struct ftrace_probe_ops *ops;
643 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
645 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
650 ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
651 char *glob, char *cmd, char *param, int enable)
653 struct ftrace_probe_ops *ops;
655 ops = &dump_probe_ops;
657 /* Only dump once. */
658 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
663 ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
664 char *glob, char *cmd, char *param, int enable)
666 struct ftrace_probe_ops *ops;
668 ops = &cpudump_probe_ops;
670 /* Only dump once. */
671 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
675 static struct ftrace_func_command ftrace_traceon_cmd = {
677 .func = ftrace_trace_onoff_callback,
680 static struct ftrace_func_command ftrace_traceoff_cmd = {
682 .func = ftrace_trace_onoff_callback,
685 static struct ftrace_func_command ftrace_stacktrace_cmd = {
686 .name = "stacktrace",
687 .func = ftrace_stacktrace_callback,
690 static struct ftrace_func_command ftrace_dump_cmd = {
692 .func = ftrace_dump_callback,
695 static struct ftrace_func_command ftrace_cpudump_cmd = {
697 .func = ftrace_cpudump_callback,
700 static int __init init_func_cmd_traceon(void)
704 ret = register_ftrace_command(&ftrace_traceoff_cmd);
708 ret = register_ftrace_command(&ftrace_traceon_cmd);
710 goto out_free_traceoff;
712 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
714 goto out_free_traceon;
716 ret = register_ftrace_command(&ftrace_dump_cmd);
718 goto out_free_stacktrace;
720 ret = register_ftrace_command(&ftrace_cpudump_cmd);
727 unregister_ftrace_command(&ftrace_dump_cmd);
729 unregister_ftrace_command(&ftrace_stacktrace_cmd);
731 unregister_ftrace_command(&ftrace_traceon_cmd);
733 unregister_ftrace_command(&ftrace_traceoff_cmd);
738 static inline int init_func_cmd_traceon(void)
742 #endif /* CONFIG_DYNAMIC_FTRACE */
744 __init int init_function_trace(void)
746 init_func_cmd_traceon();
747 return register_tracer(&function_trace);