2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/slab.h>
21 static void tracing_start_function_trace(struct trace_array *tr);
22 static void tracing_stop_function_trace(struct trace_array *tr);
24 function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
27 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29 static struct ftrace_ops trace_ops;
30 static struct ftrace_ops trace_stack_ops;
31 static struct tracer_flags func_flags;
35 TRACE_FUNC_OPT_STACK = 0x1,
38 static int allocate_ftrace_ops(struct trace_array *tr)
40 struct ftrace_ops *ops;
42 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
46 /* Currently only the non stack verision is supported */
47 ops->func = function_trace_call;
48 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
56 int ftrace_create_function_files(struct trace_array *tr,
57 struct dentry *parent)
61 /* The top level array uses the "global_ops". */
62 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) {
63 ret = allocate_ftrace_ops(tr);
68 ftrace_create_filter_files(tr->ops, parent);
73 void ftrace_destroy_function_files(struct trace_array *tr)
75 ftrace_destroy_filter_files(tr->ops);
80 static int function_trace_init(struct trace_array *tr)
82 struct ftrace_ops *ops;
84 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
85 /* There's only one global tr */
86 if (!trace_ops.private) {
87 trace_ops.private = tr;
88 trace_stack_ops.private = tr;
91 if (func_flags.val & TRACE_FUNC_OPT_STACK)
92 ops = &trace_stack_ops;
96 } else if (!tr->ops) {
98 * Instance trace_arrays get their ops allocated
99 * at instance creation. Unless it failed
105 tr->trace_buffer.cpu = get_cpu();
108 tracing_start_cmdline_record();
109 tracing_start_function_trace(tr);
113 static void function_trace_reset(struct trace_array *tr)
115 tracing_stop_function_trace(tr);
116 tracing_stop_cmdline_record();
119 static void function_trace_start(struct trace_array *tr)
121 tracing_reset_online_cpus(&tr->trace_buffer);
125 function_trace_call(unsigned long ip, unsigned long parent_ip,
126 struct ftrace_ops *op, struct pt_regs *pt_regs)
128 struct trace_array *tr = op->private;
129 struct trace_array_cpu *data;
135 if (unlikely(!tr->function_enabled))
138 pc = preempt_count();
139 preempt_disable_notrace();
141 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
145 cpu = smp_processor_id();
146 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
147 if (!atomic_read(&data->disabled)) {
148 local_save_flags(flags);
149 trace_function(tr, ip, parent_ip, flags, pc);
151 trace_clear_recursion(bit);
154 preempt_enable_notrace();
158 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
159 struct ftrace_ops *op, struct pt_regs *pt_regs)
161 struct trace_array *tr = op->private;
162 struct trace_array_cpu *data;
168 if (unlikely(!tr->function_enabled))
172 * Need to use raw, since this must be called before the
173 * recursive protection is performed.
175 local_irq_save(flags);
176 cpu = raw_smp_processor_id();
177 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
178 disabled = atomic_inc_return(&data->disabled);
180 if (likely(disabled == 1)) {
181 pc = preempt_count();
182 trace_function(tr, ip, parent_ip, flags, pc);
185 * __ftrace_trace_stack,
187 * function_stack_trace_call
191 __trace_stack(tr, flags, 5, pc);
194 atomic_dec(&data->disabled);
195 local_irq_restore(flags);
198 static struct ftrace_ops trace_ops __read_mostly =
200 .func = function_trace_call,
201 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
204 static struct ftrace_ops trace_stack_ops __read_mostly =
206 .func = function_stack_trace_call,
207 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
210 static struct tracer_opt func_opts[] = {
211 #ifdef CONFIG_STACKTRACE
212 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
214 { } /* Always set a last empty entry */
217 static struct tracer_flags func_flags = {
218 .val = 0, /* By default: all flags disabled */
222 static void tracing_start_function_trace(struct trace_array *tr)
224 tr->function_enabled = 0;
225 register_ftrace_function(tr->ops);
226 tr->function_enabled = 1;
229 static void tracing_stop_function_trace(struct trace_array *tr)
231 tr->function_enabled = 0;
232 unregister_ftrace_function(tr->ops);
236 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
239 case TRACE_FUNC_OPT_STACK:
240 /* do nothing if already set */
241 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
244 unregister_ftrace_function(tr->ops);
247 tr->ops = &trace_stack_ops;
248 register_ftrace_function(tr->ops);
250 tr->ops = &trace_ops;
251 register_ftrace_function(tr->ops);
262 static struct tracer function_trace __tracer_data =
265 .init = function_trace_init,
266 .reset = function_trace_reset,
267 .start = function_trace_start,
268 .wait_pipe = poll_wait_pipe,
269 .flags = &func_flags,
270 .set_flag = func_set_flag,
271 .allow_instances = true,
272 #ifdef CONFIG_FTRACE_SELFTEST
273 .selftest = trace_selftest_startup_function,
277 #ifdef CONFIG_DYNAMIC_FTRACE
278 static int update_count(void **data)
280 unsigned long *count = (long *)data;
292 ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
297 if (update_count(data))
302 ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
304 if (!tracing_is_on())
307 if (update_count(data))
312 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
321 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
323 if (!tracing_is_on())
331 * ftrace_stacktrace()
332 * function_trace_probe_call()
333 * ftrace_ops_list_func()
339 ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
341 trace_dump_stack(STACK_SKIP);
345 ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
347 if (!tracing_is_on())
350 if (update_count(data))
351 trace_dump_stack(STACK_SKIP);
355 ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
357 if (update_count(data))
358 ftrace_dump(DUMP_ALL);
361 /* Only dump the current CPU buffer. */
363 ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
365 if (update_count(data))
366 ftrace_dump(DUMP_ORIG);
370 ftrace_probe_print(const char *name, struct seq_file *m,
371 unsigned long ip, void *data)
373 long count = (long)data;
375 seq_printf(m, "%ps:%s", (void *)ip, name);
378 seq_printf(m, ":unlimited\n");
380 seq_printf(m, ":count=%ld\n", count);
386 ftrace_traceon_print(struct seq_file *m, unsigned long ip,
387 struct ftrace_probe_ops *ops, void *data)
389 return ftrace_probe_print("traceon", m, ip, data);
393 ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
394 struct ftrace_probe_ops *ops, void *data)
396 return ftrace_probe_print("traceoff", m, ip, data);
400 ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
401 struct ftrace_probe_ops *ops, void *data)
403 return ftrace_probe_print("stacktrace", m, ip, data);
407 ftrace_dump_print(struct seq_file *m, unsigned long ip,
408 struct ftrace_probe_ops *ops, void *data)
410 return ftrace_probe_print("dump", m, ip, data);
414 ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
415 struct ftrace_probe_ops *ops, void *data)
417 return ftrace_probe_print("cpudump", m, ip, data);
420 static struct ftrace_probe_ops traceon_count_probe_ops = {
421 .func = ftrace_traceon_count,
422 .print = ftrace_traceon_print,
425 static struct ftrace_probe_ops traceoff_count_probe_ops = {
426 .func = ftrace_traceoff_count,
427 .print = ftrace_traceoff_print,
430 static struct ftrace_probe_ops stacktrace_count_probe_ops = {
431 .func = ftrace_stacktrace_count,
432 .print = ftrace_stacktrace_print,
435 static struct ftrace_probe_ops dump_probe_ops = {
436 .func = ftrace_dump_probe,
437 .print = ftrace_dump_print,
440 static struct ftrace_probe_ops cpudump_probe_ops = {
441 .func = ftrace_cpudump_probe,
442 .print = ftrace_cpudump_print,
445 static struct ftrace_probe_ops traceon_probe_ops = {
446 .func = ftrace_traceon,
447 .print = ftrace_traceon_print,
450 static struct ftrace_probe_ops traceoff_probe_ops = {
451 .func = ftrace_traceoff,
452 .print = ftrace_traceoff_print,
455 static struct ftrace_probe_ops stacktrace_probe_ops = {
456 .func = ftrace_stacktrace,
457 .print = ftrace_stacktrace_print,
461 ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
462 struct ftrace_hash *hash, char *glob,
463 char *cmd, char *param, int enable)
465 void *count = (void *)-1;
469 /* hash funcs only work with set_ftrace_filter */
473 if (glob[0] == '!') {
474 unregister_ftrace_function_probe_func(glob+1, ops);
481 number = strsep(¶m, ":");
487 * We use the callback data field (which is a pointer)
490 ret = kstrtoul(number, 0, (unsigned long *)&count);
495 ret = register_ftrace_function_probe(glob, ops, count);
497 return ret < 0 ? ret : 0;
501 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
502 char *glob, char *cmd, char *param, int enable)
504 struct ftrace_probe_ops *ops;
506 /* we register both traceon and traceoff to this callback */
507 if (strcmp(cmd, "traceon") == 0)
508 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
510 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
512 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
517 ftrace_stacktrace_callback(struct ftrace_hash *hash,
518 char *glob, char *cmd, char *param, int enable)
520 struct ftrace_probe_ops *ops;
522 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
524 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
529 ftrace_dump_callback(struct ftrace_hash *hash,
530 char *glob, char *cmd, char *param, int enable)
532 struct ftrace_probe_ops *ops;
534 ops = &dump_probe_ops;
536 /* Only dump once. */
537 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
542 ftrace_cpudump_callback(struct ftrace_hash *hash,
543 char *glob, char *cmd, char *param, int enable)
545 struct ftrace_probe_ops *ops;
547 ops = &cpudump_probe_ops;
549 /* Only dump once. */
550 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
554 static struct ftrace_func_command ftrace_traceon_cmd = {
556 .func = ftrace_trace_onoff_callback,
559 static struct ftrace_func_command ftrace_traceoff_cmd = {
561 .func = ftrace_trace_onoff_callback,
564 static struct ftrace_func_command ftrace_stacktrace_cmd = {
565 .name = "stacktrace",
566 .func = ftrace_stacktrace_callback,
569 static struct ftrace_func_command ftrace_dump_cmd = {
571 .func = ftrace_dump_callback,
574 static struct ftrace_func_command ftrace_cpudump_cmd = {
576 .func = ftrace_cpudump_callback,
579 static int __init init_func_cmd_traceon(void)
583 ret = register_ftrace_command(&ftrace_traceoff_cmd);
587 ret = register_ftrace_command(&ftrace_traceon_cmd);
589 goto out_free_traceoff;
591 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
593 goto out_free_traceon;
595 ret = register_ftrace_command(&ftrace_dump_cmd);
597 goto out_free_stacktrace;
599 ret = register_ftrace_command(&ftrace_cpudump_cmd);
606 unregister_ftrace_command(&ftrace_dump_cmd);
608 unregister_ftrace_command(&ftrace_stacktrace_cmd);
610 unregister_ftrace_command(&ftrace_traceon_cmd);
612 unregister_ftrace_command(&ftrace_traceoff_cmd);
617 static inline int init_func_cmd_traceon(void)
621 #endif /* CONFIG_DYNAMIC_FTRACE */
623 static __init int init_function_trace(void)
625 init_func_cmd_traceon();
626 return register_tracer(&function_trace);
628 core_initcall(init_function_trace);