3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
16 #include "trace_output.h"
18 struct fgraph_cpu_data {
22 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
26 struct fgraph_cpu_data *cpu_data;
28 /* Place to preserve last processed entry. */
29 struct ftrace_graph_ent_entry ent;
30 struct ftrace_graph_ret_entry ret;
35 #define TRACE_GRAPH_INDENT 2
38 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
39 #define TRACE_GRAPH_PRINT_CPU 0x2
40 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
41 #define TRACE_GRAPH_PRINT_PROC 0x8
42 #define TRACE_GRAPH_PRINT_DURATION 0x10
43 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
45 static struct tracer_opt trace_opts[] = {
46 /* Display overruns? (for self-debug purpose) */
47 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
49 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
50 /* Display Overhead ? */
51 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
52 /* Display proc name/pid */
53 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
54 /* Display duration of execution */
55 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
56 /* Display absolute time of an entry */
57 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
61 static struct tracer_flags tracer_flags = {
62 /* Don't display overruns and proc by default */
63 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
64 TRACE_GRAPH_PRINT_DURATION,
68 static struct trace_array *graph_array;
71 /* Add a function return address to the trace stack on thread info.*/
73 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
74 unsigned long frame_pointer)
76 unsigned long long calltime;
79 if (!current->ret_stack)
83 * We must make sure the ret_stack is tested before we read
88 /* The return trace stack is full */
89 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
90 atomic_inc(¤t->trace_overrun);
94 calltime = trace_clock_local();
96 index = ++current->curr_ret_stack;
98 current->ret_stack[index].ret = ret;
99 current->ret_stack[index].func = func;
100 current->ret_stack[index].calltime = calltime;
101 current->ret_stack[index].subtime = 0;
102 current->ret_stack[index].fp = frame_pointer;
108 /* Retrieve a function return address to the trace stack on thread info.*/
110 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
111 unsigned long frame_pointer)
115 index = current->curr_ret_stack;
117 if (unlikely(index < 0)) {
120 /* Might as well panic, otherwise we have no where to go */
121 *ret = (unsigned long)panic;
125 #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
127 * The arch may choose to record the frame pointer used
128 * and check it here to make sure that it is what we expect it
129 * to be. If gcc does not set the place holder of the return
130 * address in the frame pointer, and does a copy instead, then
131 * the function graph trace will fail. This test detects this
134 * Currently, x86_32 with optimize for size (-Os) makes the latest
137 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
139 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
140 " from func %ps return to %lx\n",
141 current->ret_stack[index].fp,
143 (void *)current->ret_stack[index].func,
144 current->ret_stack[index].ret);
145 *ret = (unsigned long)panic;
150 *ret = current->ret_stack[index].ret;
151 trace->func = current->ret_stack[index].func;
152 trace->calltime = current->ret_stack[index].calltime;
153 trace->overrun = atomic_read(¤t->trace_overrun);
154 trace->depth = index;
158 * Send the trace to the ring-buffer.
159 * @return the original return address.
161 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
163 struct ftrace_graph_ret trace;
166 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
167 trace.rettime = trace_clock_local();
168 ftrace_graph_return(&trace);
170 current->curr_ret_stack--;
172 if (unlikely(!ret)) {
175 /* Might as well panic. What else to do? */
176 ret = (unsigned long)panic;
182 int __trace_graph_entry(struct trace_array *tr,
183 struct ftrace_graph_ent *trace,
187 struct ftrace_event_call *call = &event_funcgraph_entry;
188 struct ring_buffer_event *event;
189 struct ring_buffer *buffer = tr->buffer;
190 struct ftrace_graph_ent_entry *entry;
192 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
195 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
196 sizeof(*entry), flags, pc);
199 entry = ring_buffer_event_data(event);
200 entry->graph_ent = *trace;
201 if (!filter_current_check_discard(buffer, call, entry, event))
202 ring_buffer_unlock_commit(buffer, event);
207 int trace_graph_entry(struct ftrace_graph_ent *trace)
209 struct trace_array *tr = graph_array;
210 struct trace_array_cpu *data;
217 if (!ftrace_trace_task(current))
220 /* trace it when it is-nested-in or is a function enabled. */
221 if (!(trace->depth || ftrace_graph_addr(trace->func)))
224 local_irq_save(flags);
225 cpu = raw_smp_processor_id();
226 data = tr->data[cpu];
227 disabled = atomic_inc_return(&data->disabled);
228 if (likely(disabled == 1)) {
229 pc = preempt_count();
230 ret = __trace_graph_entry(tr, trace, flags, pc);
235 atomic_dec(&data->disabled);
236 local_irq_restore(flags);
241 int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
246 return trace_graph_entry(trace);
249 void __trace_graph_return(struct trace_array *tr,
250 struct ftrace_graph_ret *trace,
254 struct ftrace_event_call *call = &event_funcgraph_exit;
255 struct ring_buffer_event *event;
256 struct ring_buffer *buffer = tr->buffer;
257 struct ftrace_graph_ret_entry *entry;
259 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
262 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
263 sizeof(*entry), flags, pc);
266 entry = ring_buffer_event_data(event);
268 if (!filter_current_check_discard(buffer, call, entry, event))
269 ring_buffer_unlock_commit(buffer, event);
272 void trace_graph_return(struct ftrace_graph_ret *trace)
274 struct trace_array *tr = graph_array;
275 struct trace_array_cpu *data;
281 local_irq_save(flags);
282 cpu = raw_smp_processor_id();
283 data = tr->data[cpu];
284 disabled = atomic_inc_return(&data->disabled);
285 if (likely(disabled == 1)) {
286 pc = preempt_count();
287 __trace_graph_return(tr, trace, flags, pc);
289 atomic_dec(&data->disabled);
290 local_irq_restore(flags);
293 void set_graph_array(struct trace_array *tr)
297 /* Make graph_array visible before we start tracing */
302 void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
304 if (tracing_thresh &&
305 (trace->rettime - trace->calltime < tracing_thresh))
308 trace_graph_return(trace);
311 static int graph_trace_init(struct trace_array *tr)
317 ret = register_ftrace_graph(&trace_graph_thresh_return,
318 &trace_graph_thresh_entry);
320 ret = register_ftrace_graph(&trace_graph_return,
324 tracing_start_cmdline_record();
329 static void graph_trace_reset(struct trace_array *tr)
331 tracing_stop_cmdline_record();
332 unregister_ftrace_graph();
335 static int max_bytes_for_cpu;
337 static enum print_line_t
338 print_graph_cpu(struct trace_seq *s, int cpu)
343 * Start with a space character - to make it stand out
344 * to the right a bit when trace output is pasted into
347 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
349 return TRACE_TYPE_PARTIAL_LINE;
351 return TRACE_TYPE_HANDLED;
354 #define TRACE_GRAPH_PROCINFO_LENGTH 14
356 static enum print_line_t
357 print_graph_proc(struct trace_seq *s, pid_t pid)
359 char comm[TASK_COMM_LEN];
360 /* sign + log10(MAX_INT) + '\0' */
367 trace_find_cmdline(pid, comm);
369 sprintf(pid_str, "%d", pid);
371 /* 1 stands for the "-" character */
372 len = strlen(comm) + strlen(pid_str) + 1;
374 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
375 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
377 /* First spaces to align center */
378 for (i = 0; i < spaces / 2; i++) {
379 ret = trace_seq_printf(s, " ");
381 return TRACE_TYPE_PARTIAL_LINE;
384 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
386 return TRACE_TYPE_PARTIAL_LINE;
388 /* Last spaces to align center */
389 for (i = 0; i < spaces - (spaces / 2); i++) {
390 ret = trace_seq_printf(s, " ");
392 return TRACE_TYPE_PARTIAL_LINE;
394 return TRACE_TYPE_HANDLED;
398 static enum print_line_t
399 print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
401 if (!trace_seq_putc(s, ' '))
404 return trace_print_lat_fmt(s, entry);
407 /* If the pid changed since the last trace, output this event */
408 static enum print_line_t
409 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
416 return TRACE_TYPE_HANDLED;
418 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
420 if (*last_pid == pid)
421 return TRACE_TYPE_HANDLED;
423 prev_pid = *last_pid;
427 return TRACE_TYPE_HANDLED;
429 * Context-switch trace line:
431 ------------------------------------------
432 | 1) migration/0--1 => sshd-1755
433 ------------------------------------------
436 ret = trace_seq_printf(s,
437 " ------------------------------------------\n");
439 return TRACE_TYPE_PARTIAL_LINE;
441 ret = print_graph_cpu(s, cpu);
442 if (ret == TRACE_TYPE_PARTIAL_LINE)
443 return TRACE_TYPE_PARTIAL_LINE;
445 ret = print_graph_proc(s, prev_pid);
446 if (ret == TRACE_TYPE_PARTIAL_LINE)
447 return TRACE_TYPE_PARTIAL_LINE;
449 ret = trace_seq_printf(s, " => ");
451 return TRACE_TYPE_PARTIAL_LINE;
453 ret = print_graph_proc(s, pid);
454 if (ret == TRACE_TYPE_PARTIAL_LINE)
455 return TRACE_TYPE_PARTIAL_LINE;
457 ret = trace_seq_printf(s,
458 "\n ------------------------------------------\n\n");
460 return TRACE_TYPE_PARTIAL_LINE;
462 return TRACE_TYPE_HANDLED;
465 static struct ftrace_graph_ret_entry *
466 get_return_for_leaf(struct trace_iterator *iter,
467 struct ftrace_graph_ent_entry *curr)
469 struct fgraph_data *data = iter->private;
470 struct ring_buffer_iter *ring_iter = NULL;
471 struct ring_buffer_event *event;
472 struct ftrace_graph_ret_entry *next;
475 * If the previous output failed to write to the seq buffer,
476 * then we just reuse the data from before.
478 if (data && data->failed) {
483 ring_iter = iter->buffer_iter[iter->cpu];
485 /* First peek to compare current entry and the next one */
487 event = ring_buffer_iter_peek(ring_iter, NULL);
490 * We need to consume the current entry to see
493 ring_buffer_consume(iter->tr->buffer, iter->cpu,
495 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
502 next = ring_buffer_event_data(event);
506 * Save current and next entries for later reference
507 * if the output fails.
511 * If the next event is not a return type, then
512 * we only care about what type it is. Otherwise we can
513 * safely copy the entire event.
515 if (next->ent.type == TRACE_GRAPH_RET)
518 data->ret.ent.type = next->ent.type;
522 if (next->ent.type != TRACE_GRAPH_RET)
525 if (curr->ent.pid != next->ent.pid ||
526 curr->graph_ent.func != next->ret.func)
529 /* this is a leaf, now advance the iterator */
531 ring_buffer_read(ring_iter, NULL);
536 /* Signal a overhead of time execution to the output */
538 print_graph_overhead(unsigned long long duration, struct trace_seq *s,
541 /* If duration disappear, we don't need anything */
542 if (!(flags & TRACE_GRAPH_PRINT_DURATION))
545 /* Non nested entry or return */
547 return trace_seq_printf(s, " ");
549 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
550 /* Duration exceeded 100 msecs */
551 if (duration > 100000ULL)
552 return trace_seq_printf(s, "! ");
554 /* Duration exceeded 10 msecs */
555 if (duration > 10000ULL)
556 return trace_seq_printf(s, "+ ");
559 return trace_seq_printf(s, " ");
562 static int print_graph_abs_time(u64 t, struct trace_seq *s)
564 unsigned long usecs_rem;
566 usecs_rem = do_div(t, NSEC_PER_SEC);
569 return trace_seq_printf(s, "%5lu.%06lu | ",
570 (unsigned long)t, usecs_rem);
573 static enum print_line_t
574 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
575 enum trace_type type, int cpu, pid_t pid, u32 flags)
578 struct trace_seq *s = &iter->seq;
580 if (addr < (unsigned long)__irqentry_text_start ||
581 addr >= (unsigned long)__irqentry_text_end)
582 return TRACE_TYPE_UNHANDLED;
585 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
586 ret = print_graph_abs_time(iter->ts, s);
588 return TRACE_TYPE_PARTIAL_LINE;
592 if (flags & TRACE_GRAPH_PRINT_CPU) {
593 ret = print_graph_cpu(s, cpu);
594 if (ret == TRACE_TYPE_PARTIAL_LINE)
595 return TRACE_TYPE_PARTIAL_LINE;
599 if (flags & TRACE_GRAPH_PRINT_PROC) {
600 ret = print_graph_proc(s, pid);
601 if (ret == TRACE_TYPE_PARTIAL_LINE)
602 return TRACE_TYPE_PARTIAL_LINE;
603 ret = trace_seq_printf(s, " | ");
605 return TRACE_TYPE_PARTIAL_LINE;
609 ret = print_graph_overhead(-1, s, flags);
611 return TRACE_TYPE_PARTIAL_LINE;
613 if (type == TRACE_GRAPH_ENT)
614 ret = trace_seq_printf(s, "==========>");
616 ret = trace_seq_printf(s, "<==========");
619 return TRACE_TYPE_PARTIAL_LINE;
621 /* Don't close the duration column if haven't one */
622 if (flags & TRACE_GRAPH_PRINT_DURATION)
623 trace_seq_printf(s, " |");
624 ret = trace_seq_printf(s, "\n");
627 return TRACE_TYPE_PARTIAL_LINE;
628 return TRACE_TYPE_HANDLED;
632 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
634 unsigned long nsecs_rem = do_div(duration, 1000);
635 /* log10(ULONG_MAX) + '\0' */
641 sprintf(msecs_str, "%lu", (unsigned long) duration);
644 ret = trace_seq_printf(s, "%s", msecs_str);
646 return TRACE_TYPE_PARTIAL_LINE;
648 len = strlen(msecs_str);
650 /* Print nsecs (we don't want to exceed 7 numbers) */
652 snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu",
654 ret = trace_seq_printf(s, ".%s", nsecs_str);
656 return TRACE_TYPE_PARTIAL_LINE;
657 len += strlen(nsecs_str);
660 ret = trace_seq_printf(s, " us ");
662 return TRACE_TYPE_PARTIAL_LINE;
664 /* Print remaining spaces to fit the row's width */
665 for (i = len; i < 7; i++) {
666 ret = trace_seq_printf(s, " ");
668 return TRACE_TYPE_PARTIAL_LINE;
670 return TRACE_TYPE_HANDLED;
673 static enum print_line_t
674 print_graph_duration(unsigned long long duration, struct trace_seq *s)
678 ret = trace_print_graph_duration(duration, s);
679 if (ret != TRACE_TYPE_HANDLED)
682 ret = trace_seq_printf(s, "| ");
684 return TRACE_TYPE_PARTIAL_LINE;
686 return TRACE_TYPE_HANDLED;
689 /* Case of a leaf function on its call entry */
690 static enum print_line_t
691 print_graph_entry_leaf(struct trace_iterator *iter,
692 struct ftrace_graph_ent_entry *entry,
693 struct ftrace_graph_ret_entry *ret_entry,
694 struct trace_seq *s, u32 flags)
696 struct fgraph_data *data = iter->private;
697 struct ftrace_graph_ret *graph_ret;
698 struct ftrace_graph_ent *call;
699 unsigned long long duration;
703 graph_ret = &ret_entry->ret;
704 call = &entry->graph_ent;
705 duration = graph_ret->rettime - graph_ret->calltime;
708 struct fgraph_cpu_data *cpu_data;
711 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
714 * Comments display at + 1 to depth. Since
715 * this is a leaf function, keep the comments
716 * equal to this depth.
718 cpu_data->depth = call->depth - 1;
720 /* No need to keep this function around for this depth */
721 if (call->depth < FTRACE_RETFUNC_DEPTH)
722 cpu_data->enter_funcs[call->depth] = 0;
726 ret = print_graph_overhead(duration, s, flags);
728 return TRACE_TYPE_PARTIAL_LINE;
731 if (flags & TRACE_GRAPH_PRINT_DURATION) {
732 ret = print_graph_duration(duration, s);
733 if (ret == TRACE_TYPE_PARTIAL_LINE)
734 return TRACE_TYPE_PARTIAL_LINE;
738 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
739 ret = trace_seq_printf(s, " ");
741 return TRACE_TYPE_PARTIAL_LINE;
744 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
746 return TRACE_TYPE_PARTIAL_LINE;
748 return TRACE_TYPE_HANDLED;
751 static enum print_line_t
752 print_graph_entry_nested(struct trace_iterator *iter,
753 struct ftrace_graph_ent_entry *entry,
754 struct trace_seq *s, int cpu, u32 flags)
756 struct ftrace_graph_ent *call = &entry->graph_ent;
757 struct fgraph_data *data = iter->private;
762 struct fgraph_cpu_data *cpu_data;
765 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
766 cpu_data->depth = call->depth;
768 /* Save this function pointer to see if the exit matches */
769 if (call->depth < FTRACE_RETFUNC_DEPTH)
770 cpu_data->enter_funcs[call->depth] = call->func;
774 ret = print_graph_overhead(-1, s, flags);
776 return TRACE_TYPE_PARTIAL_LINE;
779 if (flags & TRACE_GRAPH_PRINT_DURATION) {
780 ret = trace_seq_printf(s, " | ");
782 return TRACE_TYPE_PARTIAL_LINE;
786 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
787 ret = trace_seq_printf(s, " ");
789 return TRACE_TYPE_PARTIAL_LINE;
792 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
794 return TRACE_TYPE_PARTIAL_LINE;
797 * we already consumed the current entry to check the next one
798 * and see if this is a leaf.
800 return TRACE_TYPE_NO_CONSUME;
803 static enum print_line_t
804 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
805 int type, unsigned long addr, u32 flags)
807 struct fgraph_data *data = iter->private;
808 struct trace_entry *ent = iter->ent;
813 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
814 return TRACE_TYPE_PARTIAL_LINE;
818 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
819 if (ret == TRACE_TYPE_PARTIAL_LINE)
820 return TRACE_TYPE_PARTIAL_LINE;
824 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
825 ret = print_graph_abs_time(iter->ts, s);
827 return TRACE_TYPE_PARTIAL_LINE;
831 if (flags & TRACE_GRAPH_PRINT_CPU) {
832 ret = print_graph_cpu(s, cpu);
833 if (ret == TRACE_TYPE_PARTIAL_LINE)
834 return TRACE_TYPE_PARTIAL_LINE;
838 if (flags & TRACE_GRAPH_PRINT_PROC) {
839 ret = print_graph_proc(s, ent->pid);
840 if (ret == TRACE_TYPE_PARTIAL_LINE)
841 return TRACE_TYPE_PARTIAL_LINE;
843 ret = trace_seq_printf(s, " | ");
845 return TRACE_TYPE_PARTIAL_LINE;
849 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
850 ret = print_graph_lat_fmt(s, ent);
851 if (ret == TRACE_TYPE_PARTIAL_LINE)
852 return TRACE_TYPE_PARTIAL_LINE;
858 static enum print_line_t
859 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
860 struct trace_iterator *iter, u32 flags)
862 struct fgraph_data *data = iter->private;
863 struct ftrace_graph_ent *call = &field->graph_ent;
864 struct ftrace_graph_ret_entry *leaf_ret;
865 static enum print_line_t ret;
868 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
869 return TRACE_TYPE_PARTIAL_LINE;
871 leaf_ret = get_return_for_leaf(iter, field);
873 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
875 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
879 * If we failed to write our output, then we need to make
880 * note of it. Because we already consumed our entry.
892 static enum print_line_t
893 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
894 struct trace_entry *ent, struct trace_iterator *iter,
897 unsigned long long duration = trace->rettime - trace->calltime;
898 struct fgraph_data *data = iter->private;
899 pid_t pid = ent->pid;
906 struct fgraph_cpu_data *cpu_data;
909 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
912 * Comments display at + 1 to depth. This is the
913 * return from a function, we now want the comments
914 * to display at the same level of the bracket.
916 cpu_data->depth = trace->depth - 1;
918 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
919 if (cpu_data->enter_funcs[trace->depth] != trace->func)
921 cpu_data->enter_funcs[trace->depth] = 0;
925 if (print_graph_prologue(iter, s, 0, 0, flags))
926 return TRACE_TYPE_PARTIAL_LINE;
929 ret = print_graph_overhead(duration, s, flags);
931 return TRACE_TYPE_PARTIAL_LINE;
934 if (flags & TRACE_GRAPH_PRINT_DURATION) {
935 ret = print_graph_duration(duration, s);
936 if (ret == TRACE_TYPE_PARTIAL_LINE)
937 return TRACE_TYPE_PARTIAL_LINE;
941 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
942 ret = trace_seq_printf(s, " ");
944 return TRACE_TYPE_PARTIAL_LINE;
948 * If the return function does not have a matching entry,
949 * then the entry was lost. Instead of just printing
950 * the '}' and letting the user guess what function this
951 * belongs to, write out the function name.
954 ret = trace_seq_printf(s, "}\n");
956 return TRACE_TYPE_PARTIAL_LINE;
958 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
960 return TRACE_TYPE_PARTIAL_LINE;
964 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
965 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
968 return TRACE_TYPE_PARTIAL_LINE;
971 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
973 if (ret == TRACE_TYPE_PARTIAL_LINE)
974 return TRACE_TYPE_PARTIAL_LINE;
976 return TRACE_TYPE_HANDLED;
979 static enum print_line_t
980 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
981 struct trace_iterator *iter, u32 flags)
983 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
984 struct fgraph_data *data = iter->private;
985 struct trace_event *event;
991 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
993 if (print_graph_prologue(iter, s, 0, 0, flags))
994 return TRACE_TYPE_PARTIAL_LINE;
997 ret = print_graph_overhead(-1, s, flags);
999 return TRACE_TYPE_PARTIAL_LINE;
1002 if (flags & TRACE_GRAPH_PRINT_DURATION) {
1003 ret = trace_seq_printf(s, " | ");
1005 return TRACE_TYPE_PARTIAL_LINE;
1010 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1011 ret = trace_seq_printf(s, " ");
1013 return TRACE_TYPE_PARTIAL_LINE;
1017 ret = trace_seq_printf(s, "/* ");
1019 return TRACE_TYPE_PARTIAL_LINE;
1021 switch (iter->ent->type) {
1023 ret = trace_print_bprintk_msg_only(iter);
1024 if (ret != TRACE_TYPE_HANDLED)
1028 ret = trace_print_printk_msg_only(iter);
1029 if (ret != TRACE_TYPE_HANDLED)
1033 event = ftrace_find_event(ent->type);
1035 return TRACE_TYPE_UNHANDLED;
1037 ret = event->funcs->trace(iter, sym_flags, event);
1038 if (ret != TRACE_TYPE_HANDLED)
1042 /* Strip ending newline */
1043 if (s->buffer[s->len - 1] == '\n') {
1044 s->buffer[s->len - 1] = '\0';
1048 ret = trace_seq_printf(s, " */\n");
1050 return TRACE_TYPE_PARTIAL_LINE;
1052 return TRACE_TYPE_HANDLED;
1057 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1059 struct ftrace_graph_ent_entry *field;
1060 struct fgraph_data *data = iter->private;
1061 struct trace_entry *entry = iter->ent;
1062 struct trace_seq *s = &iter->seq;
1063 int cpu = iter->cpu;
1066 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1067 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1068 return TRACE_TYPE_HANDLED;
1072 * If the last output failed, there's a possibility we need
1073 * to print out the missing entry which would never go out.
1075 if (data && data->failed) {
1077 iter->cpu = data->cpu;
1078 ret = print_graph_entry(field, s, iter, flags);
1079 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1080 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1081 ret = TRACE_TYPE_NO_CONSUME;
1087 switch (entry->type) {
1088 case TRACE_GRAPH_ENT: {
1090 * print_graph_entry() may consume the current event,
1091 * thus @field may become invalid, so we need to save it.
1092 * sizeof(struct ftrace_graph_ent_entry) is very small,
1093 * it can be safely saved at the stack.
1095 struct ftrace_graph_ent_entry saved;
1096 trace_assign_type(field, entry);
1098 return print_graph_entry(&saved, s, iter, flags);
1100 case TRACE_GRAPH_RET: {
1101 struct ftrace_graph_ret_entry *field;
1102 trace_assign_type(field, entry);
1103 return print_graph_return(&field->ret, s, entry, iter, flags);
1107 /* dont trace stack and functions as comments */
1108 return TRACE_TYPE_UNHANDLED;
1111 return print_graph_comment(s, entry, iter, flags);
1114 return TRACE_TYPE_HANDLED;
1117 static enum print_line_t
1118 print_graph_function(struct trace_iterator *iter)
1120 return print_graph_function_flags(iter, tracer_flags.val);
1123 static enum print_line_t
1124 print_graph_function_event(struct trace_iterator *iter, int flags,
1125 struct trace_event *event)
1127 return print_graph_function(iter);
1130 static void print_lat_header(struct seq_file *s, u32 flags)
1132 static const char spaces[] = " " /* 16 spaces */
1134 " "; /* 17 spaces */
1137 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1139 if (flags & TRACE_GRAPH_PRINT_CPU)
1141 if (flags & TRACE_GRAPH_PRINT_PROC)
1144 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1145 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1146 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1147 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1148 seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces);
1149 seq_printf(s, "#%.*s|||| / \n", size, spaces);
1152 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1154 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1157 print_lat_header(s, flags);
1161 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1162 seq_printf(s, " TIME ");
1163 if (flags & TRACE_GRAPH_PRINT_CPU)
1164 seq_printf(s, " CPU");
1165 if (flags & TRACE_GRAPH_PRINT_PROC)
1166 seq_printf(s, " TASK/PID ");
1168 seq_printf(s, "|||||");
1169 if (flags & TRACE_GRAPH_PRINT_DURATION)
1170 seq_printf(s, " DURATION ");
1171 seq_printf(s, " FUNCTION CALLS\n");
1175 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1176 seq_printf(s, " | ");
1177 if (flags & TRACE_GRAPH_PRINT_CPU)
1178 seq_printf(s, " | ");
1179 if (flags & TRACE_GRAPH_PRINT_PROC)
1180 seq_printf(s, " | | ");
1182 seq_printf(s, "|||||");
1183 if (flags & TRACE_GRAPH_PRINT_DURATION)
1184 seq_printf(s, " | | ");
1185 seq_printf(s, " | | | |\n");
1188 void print_graph_headers(struct seq_file *s)
1190 print_graph_headers_flags(s, tracer_flags.val);
1193 void graph_trace_open(struct trace_iterator *iter)
1195 /* pid and depth on the last trace processed */
1196 struct fgraph_data *data;
1199 iter->private = NULL;
1201 data = kzalloc(sizeof(*data), GFP_KERNEL);
1205 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1206 if (!data->cpu_data)
1209 for_each_possible_cpu(cpu) {
1210 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1211 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1212 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1218 iter->private = data;
1225 pr_warning("function graph tracer: not enough memory\n");
1228 void graph_trace_close(struct trace_iterator *iter)
1230 struct fgraph_data *data = iter->private;
1233 free_percpu(data->cpu_data);
1238 static struct trace_event_functions graph_functions = {
1239 .trace = print_graph_function_event,
1242 static struct trace_event graph_trace_entry_event = {
1243 .type = TRACE_GRAPH_ENT,
1244 .funcs = &graph_functions,
1247 static struct trace_event graph_trace_ret_event = {
1248 .type = TRACE_GRAPH_RET,
1249 .funcs = &graph_functions
1252 static struct tracer graph_trace __read_mostly = {
1253 .name = "function_graph",
1254 .open = graph_trace_open,
1255 .pipe_open = graph_trace_open,
1256 .close = graph_trace_close,
1257 .pipe_close = graph_trace_close,
1258 .wait_pipe = poll_wait_pipe,
1259 .init = graph_trace_init,
1260 .reset = graph_trace_reset,
1261 .print_line = print_graph_function,
1262 .print_header = print_graph_headers,
1263 .flags = &tracer_flags,
1264 #ifdef CONFIG_FTRACE_SELFTEST
1265 .selftest = trace_selftest_startup_function_graph,
1269 static __init int init_graph_trace(void)
1271 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1273 if (!register_ftrace_event(&graph_trace_entry_event)) {
1274 pr_warning("Warning: could not register graph trace events\n");
1278 if (!register_ftrace_event(&graph_trace_ret_event)) {
1279 pr_warning("Warning: could not register graph trace events\n");
1283 return register_tracer(&graph_trace);
1286 device_initcall(init_graph_trace);