3 #include "../../util/util.h"
4 #include "../../util/hist.h"
5 #include "../../util/sort.h"
6 #include "../../util/evsel.h"
9 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
12 int ret = fprintf(fp, " ");
14 for (i = 0; i < left_margin; i++)
15 ret += fprintf(fp, " ");
20 static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
21 int depth, int depth_mask, FILE *fp)
24 struct inline_node *node;
25 struct inline_list *ilist;
35 if (dso->kernel != DSO_TYPE_USER)
38 node = dso__parse_addr_inlines(dso,
39 map__rip_2objdump(map, ip));
43 list_for_each_entry(ilist, &node->val, list) {
44 if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
45 ret += callchain__fprintf_left_margin(fp, left_margin);
47 for (i = 0; i < depth; i++) {
48 if (depth_mask & (1 << i))
49 ret += fprintf(fp, "|");
51 ret += fprintf(fp, " ");
52 ret += fprintf(fp, " ");
55 if (callchain_param.key == CCKEY_ADDRESS ||
56 callchain_param.key == CCKEY_SRCLINE) {
57 if (ilist->filename != NULL)
58 ret += fprintf(fp, "%s:%d (inline)",
62 ret += fprintf(fp, "??");
63 } else if (ilist->funcname != NULL)
64 ret += fprintf(fp, "%s (inline)",
66 else if (ilist->filename != NULL)
67 ret += fprintf(fp, "%s:%d (inline)",
71 ret += fprintf(fp, "??");
73 ret += fprintf(fp, "\n");
77 inline_node__delete(node);
81 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
85 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
87 for (i = 0; i < depth; i++)
88 if (depth_mask & (1 << i))
89 ret += fprintf(fp, "| ");
91 ret += fprintf(fp, " ");
93 ret += fprintf(fp, "\n");
98 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
99 struct callchain_list *chain,
100 int depth, int depth_mask, int period,
101 u64 total_samples, int left_margin)
105 char bf[1024], *alloc_str = NULL;
109 ret += callchain__fprintf_left_margin(fp, left_margin);
110 for (i = 0; i < depth; i++) {
111 if (depth_mask & (1 << i))
112 ret += fprintf(fp, "|");
114 ret += fprintf(fp, " ");
115 if (!period && i == depth - 1) {
116 ret += fprintf(fp, "--");
117 ret += callchain_node__fprintf_value(node, fp, total_samples);
118 ret += fprintf(fp, "--");
120 ret += fprintf(fp, "%s", " ");
123 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
125 if (symbol_conf.show_branchflag_count) {
127 callchain_list_counts__printf_value(node, chain, NULL,
130 callchain_list_counts__printf_value(NULL, chain, NULL,
133 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
134 str = "Not enough memory!";
143 if (symbol_conf.inline_name)
144 ret += inline__fprintf(chain->ms.map, chain->ip,
145 left_margin, depth, depth_mask, fp);
149 static struct symbol *rem_sq_bracket;
150 static struct callchain_list rem_hits;
152 static void init_rem_hits(void)
154 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
155 if (!rem_sq_bracket) {
156 fprintf(stderr, "Not enough memory to display remaining hits\n");
160 strcpy(rem_sq_bracket->name, "[...]");
161 rem_hits.ms.sym = rem_sq_bracket;
164 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
165 u64 total_samples, int depth,
166 int depth_mask, int left_margin)
168 struct rb_node *node, *next;
169 struct callchain_node *child = NULL;
170 struct callchain_list *chain;
171 int new_depth_mask = depth_mask;
175 uint entries_printed = 0;
178 remaining = total_samples;
180 node = rb_first(root);
185 child = rb_entry(node, struct callchain_node, rb_node);
186 cumul = callchain_cumul_hits(child);
188 cumul_count += callchain_cumul_counts(child);
191 * The depth mask manages the output of pipes that show
192 * the depth. We don't want to keep the pipes of the current
193 * level for the last child of this depth.
194 * Except if we have remaining filtered hits. They will
195 * supersede the last child
197 next = rb_next(node);
198 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
199 new_depth_mask &= ~(1 << (depth - 1));
202 * But we keep the older depth mask for the line separator
203 * to keep the level link until we reach the last child
205 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
208 list_for_each_entry(chain, &child->val, list) {
209 ret += ipchain__fprintf_graph(fp, child, chain, depth,
215 if (callchain_param.mode == CHAIN_GRAPH_REL)
216 new_total = child->children_hit;
218 new_total = total_samples;
220 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
222 new_depth_mask | (1 << depth),
225 if (++entries_printed == callchain_param.print_limit)
229 if (callchain_param.mode == CHAIN_GRAPH_REL &&
230 remaining && remaining != total_samples) {
231 struct callchain_node rem_node = {
238 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
239 rem_node.count = child->parent->children_count - cumul_count;
240 if (rem_node.count <= 0)
244 new_depth_mask &= ~(1 << (depth - 1));
245 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
246 new_depth_mask, 0, total_samples,
254 * If have one single callchain root, don't bother printing
255 * its percentage (100 % in fractal mode and the same percentage
256 * than the hist in graph mode). This also avoid one level of column.
258 * However when percent-limit applied, it's possible that single callchain
259 * node have different (non-100% in fractal mode) percentage.
261 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
263 struct callchain_node *cnode;
268 cnode = rb_entry(node, struct callchain_node, rb_node);
269 return callchain_cumul_hits(cnode) != parent_samples;
272 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
273 u64 total_samples, u64 parent_samples,
276 struct callchain_node *cnode;
277 struct callchain_list *chain;
278 u32 entries_printed = 0;
279 bool printed = false;
280 struct rb_node *node;
285 node = rb_first(root);
286 if (node && !need_percent_display(node, parent_samples)) {
287 cnode = rb_entry(node, struct callchain_node, rb_node);
288 list_for_each_entry(chain, &cnode->val, list) {
290 * If we sort by symbol, the first entry is the same than
291 * the symbol. No need to print it otherwise it appears as
294 if (!i++ && field_order == NULL &&
295 sort_order && !prefixcmp(sort_order, "sym"))
299 ret += callchain__fprintf_left_margin(fp, left_margin);
300 ret += fprintf(fp, "|\n");
301 ret += callchain__fprintf_left_margin(fp, left_margin);
302 ret += fprintf(fp, "---");
306 ret += callchain__fprintf_left_margin(fp, left_margin);
308 ret += fprintf(fp, "%s",
309 callchain_list__sym_name(chain, bf,
313 if (symbol_conf.show_branchflag_count)
314 ret += callchain_list_counts__printf_value(
315 NULL, chain, fp, NULL, 0);
316 ret += fprintf(fp, "\n");
318 if (++entries_printed == callchain_param.print_limit)
321 if (symbol_conf.inline_name)
322 ret += inline__fprintf(chain->ms.map,
328 root = &cnode->rb_root;
331 if (callchain_param.mode == CHAIN_GRAPH_REL)
332 total_samples = parent_samples;
334 ret += __callchain__fprintf_graph(fp, root, total_samples,
337 /* do not add a blank line if it printed nothing */
338 ret += fprintf(fp, "\n");
344 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
347 struct callchain_list *chain;
354 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
357 list_for_each_entry(chain, &node->val, list) {
358 if (chain->ip >= PERF_CONTEXT_MAX)
360 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
361 bf, sizeof(bf), false));
367 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
371 u32 entries_printed = 0;
372 struct callchain_node *chain;
373 struct rb_node *rb_node = rb_first(tree);
376 chain = rb_entry(rb_node, struct callchain_node, rb_node);
378 ret += fprintf(fp, " ");
379 ret += callchain_node__fprintf_value(chain, fp, total_samples);
380 ret += fprintf(fp, "\n");
381 ret += __callchain__fprintf_flat(fp, chain, total_samples);
382 ret += fprintf(fp, "\n");
383 if (++entries_printed == callchain_param.print_limit)
386 rb_node = rb_next(rb_node);
392 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
394 const char *sep = symbol_conf.field_sep ?: ";";
395 struct callchain_list *chain;
403 ret += __callchain__fprintf_folded(fp, node->parent);
406 list_for_each_entry(chain, &node->val, list) {
407 if (chain->ip >= PERF_CONTEXT_MAX)
409 ret += fprintf(fp, "%s%s", first ? "" : sep,
410 callchain_list__sym_name(chain,
411 bf, sizeof(bf), false));
418 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
422 u32 entries_printed = 0;
423 struct callchain_node *chain;
424 struct rb_node *rb_node = rb_first(tree);
428 chain = rb_entry(rb_node, struct callchain_node, rb_node);
430 ret += callchain_node__fprintf_value(chain, fp, total_samples);
431 ret += fprintf(fp, " ");
432 ret += __callchain__fprintf_folded(fp, chain);
433 ret += fprintf(fp, "\n");
434 if (++entries_printed == callchain_param.print_limit)
437 rb_node = rb_next(rb_node);
443 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
444 u64 total_samples, int left_margin,
447 u64 parent_samples = he->stat.period;
449 if (symbol_conf.cumulate_callchain)
450 parent_samples = he->stat_acc->period;
452 switch (callchain_param.mode) {
453 case CHAIN_GRAPH_REL:
454 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
455 parent_samples, left_margin);
457 case CHAIN_GRAPH_ABS:
458 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
459 parent_samples, left_margin);
462 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
465 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
470 pr_err("Bad callchain mode\n");
476 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
477 struct perf_hpp_list *hpp_list)
479 const char *sep = symbol_conf.field_sep;
480 struct perf_hpp_fmt *fmt;
481 char *start = hpp->buf;
485 if (symbol_conf.exclude_other && !he->parent)
488 perf_hpp_list__for_each_format(hpp_list, fmt) {
489 if (perf_hpp__should_skip(fmt, he->hists))
493 * If there's no field_sep, we still need
494 * to display initial ' '.
496 if (!sep || !first) {
497 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
498 advance_hpp(hpp, ret);
502 if (perf_hpp__use_color() && fmt->color)
503 ret = fmt->color(fmt, hpp, he);
505 ret = fmt->entry(fmt, hpp, he);
507 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
508 advance_hpp(hpp, ret);
511 return hpp->buf - start;
514 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
516 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
519 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
520 struct perf_hpp *hpp,
524 const char *sep = symbol_conf.field_sep;
525 struct perf_hpp_fmt *fmt;
526 struct perf_hpp_list_node *fmt_node;
527 char *buf = hpp->buf;
528 size_t size = hpp->size;
529 int ret, printed = 0;
532 if (symbol_conf.exclude_other && !he->parent)
535 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
536 advance_hpp(hpp, ret);
538 /* the first hpp_list_node is for overhead columns */
539 fmt_node = list_first_entry(&hists->hpp_formats,
540 struct perf_hpp_list_node, list);
541 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
543 * If there's no field_sep, we still need
544 * to display initial ' '.
546 if (!sep || !first) {
547 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
548 advance_hpp(hpp, ret);
552 if (perf_hpp__use_color() && fmt->color)
553 ret = fmt->color(fmt, hpp, he);
555 ret = fmt->entry(fmt, hpp, he);
557 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
558 advance_hpp(hpp, ret);
562 ret = scnprintf(hpp->buf, hpp->size, "%*s",
563 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
564 advance_hpp(hpp, ret);
566 printed += fprintf(fp, "%s", buf);
568 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
573 * No need to call hist_entry__snprintf_alignment() since this
574 * fmt is always the last column in the hierarchy mode.
576 if (perf_hpp__use_color() && fmt->color)
577 fmt->color(fmt, hpp, he);
579 fmt->entry(fmt, hpp, he);
582 * dynamic entries are right-aligned but we want left-aligned
583 * in the hierarchy mode
585 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
587 printed += putc('\n', fp);
589 if (symbol_conf.use_callchain && he->leaf) {
590 u64 total = hists__total_period(hists);
592 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
600 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
601 char *bf, size_t bfsz, FILE *fp,
605 int callchain_ret = 0;
607 struct perf_hpp hpp = {
611 struct hists *hists = he->hists;
612 u64 total_period = hists->stats.total_period;
614 if (size == 0 || size > bfsz)
615 size = hpp.size = bfsz;
617 if (symbol_conf.report_hierarchy)
618 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
620 hist_entry__snprintf(he, &hpp);
622 ret = fprintf(fp, "%s\n", bf);
625 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
628 if (callchain_ret == 0 && symbol_conf.inline_name) {
629 inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
632 ret += fprintf(fp, "\n");
634 ret += callchain_ret;
639 static int print_hierarchy_indent(const char *sep, int indent,
640 const char *line, FILE *fp)
642 if (sep != NULL || indent < 2)
645 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
648 static int hists__fprintf_hierarchy_headers(struct hists *hists,
649 struct perf_hpp *hpp, FILE *fp)
651 bool first_node, first_col;
655 unsigned header_width = 0;
656 struct perf_hpp_fmt *fmt;
657 struct perf_hpp_list_node *fmt_node;
658 const char *sep = symbol_conf.field_sep;
660 indent = hists->nr_hpp_node;
662 /* preserve max indent depth for column headers */
663 print_hierarchy_indent(sep, indent, spaces, fp);
665 /* the first hpp_list_node is for overhead columns */
666 fmt_node = list_first_entry(&hists->hpp_formats,
667 struct perf_hpp_list_node, list);
669 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
670 fmt->header(fmt, hpp, hists, 0, NULL);
671 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
674 /* combine sort headers with ' / ' */
676 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
678 header_width += fprintf(fp, " / ");
682 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
683 if (perf_hpp__should_skip(fmt, hists))
687 header_width += fprintf(fp, "+");
690 fmt->header(fmt, hpp, hists, 0, NULL);
692 header_width += fprintf(fp, "%s", trim(hpp->buf));
698 /* preserve max indent depth for initial dots */
699 print_hierarchy_indent(sep, indent, dots, fp);
701 /* the first hpp_list_node is for overhead columns */
702 fmt_node = list_first_entry(&hists->hpp_formats,
703 struct perf_hpp_list_node, list);
706 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
708 fprintf(fp, "%s", sep ?: "..");
711 width = fmt->width(fmt, hpp, hists);
712 fprintf(fp, "%.*s", width, dots);
716 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
718 width = depth * HIERARCHY_INDENT;
720 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
721 if (perf_hpp__should_skip(fmt, hists))
725 width++; /* for '+' sign between column header */
728 width += fmt->width(fmt, hpp, hists);
731 if (width > header_width)
732 header_width = width;
737 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
739 fprintf(fp, "\n#\n");
744 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
747 struct perf_hpp_fmt *fmt;
748 const char *sep = symbol_conf.field_sep;
752 hists__for_each_format(hists, fmt) {
753 if (perf_hpp__should_skip(fmt, hists))
757 fprintf(fp, "%s", sep ?: " ");
761 fmt->header(fmt, hpp, hists, line, &span);
764 fprintf(fp, "%s", hpp->buf);
769 hists__fprintf_standard_headers(struct hists *hists,
770 struct perf_hpp *hpp,
773 struct perf_hpp_list *hpp_list = hists->hpp_list;
774 struct perf_hpp_fmt *fmt;
776 const char *sep = symbol_conf.field_sep;
780 for (line = 0; line < hpp_list->nr_header_lines; line++) {
781 /* first # is displayed one level up */
784 fprintf_line(hists, hpp, line, fp);
789 return hpp_list->nr_header_lines;
795 hists__for_each_format(hists, fmt) {
798 if (perf_hpp__should_skip(fmt, hists))
802 fprintf(fp, "%s", sep ?: " ");
806 width = fmt->width(fmt, hpp, hists);
807 for (i = 0; i < width; i++)
813 return hpp_list->nr_header_lines + 2;
816 int hists__fprintf_headers(struct hists *hists, FILE *fp)
819 struct perf_hpp dummy_hpp = {
826 if (symbol_conf.report_hierarchy)
827 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
829 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
833 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
834 int max_cols, float min_pcnt, FILE *fp,
839 const char *sep = symbol_conf.field_sep;
847 hists__reset_column_width(hists);
849 if (symbol_conf.col_width_list_str)
850 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
853 nr_rows += hists__fprintf_headers(hists, fp);
855 if (max_rows && nr_rows >= max_rows)
858 linesz = hists__sort_list_width(hists) + 3 + 1;
859 linesz += perf_hpp__color_overhead();
860 line = malloc(linesz);
866 indent = hists__overhead_width(hists) + 4;
868 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
869 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
875 percent = hist_entry__get_percent_limit(h);
876 if (percent < min_pcnt)
879 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
881 if (max_rows && ++nr_rows >= max_rows)
885 * If all children are filtered out or percent-limited,
886 * display "no entry >= x.xx%" message.
888 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
889 int depth = hists->nr_hpp_node + h->depth + 1;
891 print_hierarchy_indent(sep, depth, spaces, fp);
892 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
894 if (max_rows && ++nr_rows >= max_rows)
898 if (h->ms.map == NULL && verbose > 1) {
899 __map_groups__fprintf_maps(h->thread->mg,
901 fprintf(fp, "%.10s end\n", graph_dotted_line);
907 zfree(&rem_sq_bracket);
912 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
917 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
920 if (stats->nr_events[i] == 0)
923 name = perf_event__name(i);
924 if (!strcmp(name, "UNKNOWN"))
927 ret += fprintf(fp, "%16s events: %10d\n", name,
928 stats->nr_events[i]);