3 #include "../../util/util.h"
4 #include "../../util/hist.h"
5 #include "../../util/sort.h"
6 #include "../../util/evsel.h"
9 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
12 int ret = fprintf(fp, " ");
14 for (i = 0; i < left_margin; i++)
15 ret += fprintf(fp, " ");
20 static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
21 int depth, int depth_mask, FILE *fp)
24 struct inline_node *node;
25 struct inline_list *ilist;
35 if (dso->kernel != DSO_TYPE_USER)
38 node = dso__parse_addr_inlines(dso,
39 map__rip_2objdump(map, ip));
43 list_for_each_entry(ilist, &node->val, list) {
44 if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
45 ret += callchain__fprintf_left_margin(fp, left_margin);
47 for (i = 0; i < depth; i++) {
48 if (depth_mask & (1 << i))
49 ret += fprintf(fp, "|");
51 ret += fprintf(fp, " ");
52 ret += fprintf(fp, " ");
55 if (callchain_param.key == CCKEY_ADDRESS) {
56 if (ilist->filename != NULL)
57 ret += fprintf(fp, "%s:%d (inline)",
61 ret += fprintf(fp, "??");
62 } else if (ilist->funcname != NULL)
63 ret += fprintf(fp, "%s (inline)",
65 else if (ilist->filename != NULL)
66 ret += fprintf(fp, "%s:%d (inline)",
70 ret += fprintf(fp, "??");
72 ret += fprintf(fp, "\n");
76 inline_node__delete(node);
80 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
84 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
86 for (i = 0; i < depth; i++)
87 if (depth_mask & (1 << i))
88 ret += fprintf(fp, "| ");
90 ret += fprintf(fp, " ");
92 ret += fprintf(fp, "\n");
97 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
98 struct callchain_list *chain,
99 int depth, int depth_mask, int period,
100 u64 total_samples, int left_margin)
104 char bf[1024], *alloc_str = NULL;
108 ret += callchain__fprintf_left_margin(fp, left_margin);
109 for (i = 0; i < depth; i++) {
110 if (depth_mask & (1 << i))
111 ret += fprintf(fp, "|");
113 ret += fprintf(fp, " ");
114 if (!period && i == depth - 1) {
115 ret += fprintf(fp, "--");
116 ret += callchain_node__fprintf_value(node, fp, total_samples);
117 ret += fprintf(fp, "--");
119 ret += fprintf(fp, "%s", " ");
122 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
124 if (symbol_conf.show_branchflag_count) {
126 callchain_list_counts__printf_value(node, chain, NULL,
129 callchain_list_counts__printf_value(NULL, chain, NULL,
132 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
133 str = "Not enough memory!";
142 if (symbol_conf.inline_name)
143 ret += inline__fprintf(chain->ms.map, chain->ip,
144 left_margin, depth, depth_mask, fp);
148 static struct symbol *rem_sq_bracket;
149 static struct callchain_list rem_hits;
151 static void init_rem_hits(void)
153 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
154 if (!rem_sq_bracket) {
155 fprintf(stderr, "Not enough memory to display remaining hits\n");
159 strcpy(rem_sq_bracket->name, "[...]");
160 rem_hits.ms.sym = rem_sq_bracket;
163 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
164 u64 total_samples, int depth,
165 int depth_mask, int left_margin)
167 struct rb_node *node, *next;
168 struct callchain_node *child = NULL;
169 struct callchain_list *chain;
170 int new_depth_mask = depth_mask;
174 uint entries_printed = 0;
177 remaining = total_samples;
179 node = rb_first(root);
184 child = rb_entry(node, struct callchain_node, rb_node);
185 cumul = callchain_cumul_hits(child);
187 cumul_count += callchain_cumul_counts(child);
190 * The depth mask manages the output of pipes that show
191 * the depth. We don't want to keep the pipes of the current
192 * level for the last child of this depth.
193 * Except if we have remaining filtered hits. They will
194 * supersede the last child
196 next = rb_next(node);
197 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
198 new_depth_mask &= ~(1 << (depth - 1));
201 * But we keep the older depth mask for the line separator
202 * to keep the level link until we reach the last child
204 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
207 list_for_each_entry(chain, &child->val, list) {
208 ret += ipchain__fprintf_graph(fp, child, chain, depth,
214 if (callchain_param.mode == CHAIN_GRAPH_REL)
215 new_total = child->children_hit;
217 new_total = total_samples;
219 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
221 new_depth_mask | (1 << depth),
224 if (++entries_printed == callchain_param.print_limit)
228 if (callchain_param.mode == CHAIN_GRAPH_REL &&
229 remaining && remaining != total_samples) {
230 struct callchain_node rem_node = {
237 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
238 rem_node.count = child->parent->children_count - cumul_count;
239 if (rem_node.count <= 0)
243 new_depth_mask &= ~(1 << (depth - 1));
244 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
245 new_depth_mask, 0, total_samples,
253 * If have one single callchain root, don't bother printing
254 * its percentage (100 % in fractal mode and the same percentage
255 * than the hist in graph mode). This also avoid one level of column.
257 * However when percent-limit applied, it's possible that single callchain
258 * node have different (non-100% in fractal mode) percentage.
260 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
262 struct callchain_node *cnode;
267 cnode = rb_entry(node, struct callchain_node, rb_node);
268 return callchain_cumul_hits(cnode) != parent_samples;
271 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
272 u64 total_samples, u64 parent_samples,
275 struct callchain_node *cnode;
276 struct callchain_list *chain;
277 u32 entries_printed = 0;
278 bool printed = false;
279 struct rb_node *node;
284 node = rb_first(root);
285 if (node && !need_percent_display(node, parent_samples)) {
286 cnode = rb_entry(node, struct callchain_node, rb_node);
287 list_for_each_entry(chain, &cnode->val, list) {
289 * If we sort by symbol, the first entry is the same than
290 * the symbol. No need to print it otherwise it appears as
293 if (!i++ && field_order == NULL &&
294 sort_order && !prefixcmp(sort_order, "sym"))
298 ret += callchain__fprintf_left_margin(fp, left_margin);
299 ret += fprintf(fp, "|\n");
300 ret += callchain__fprintf_left_margin(fp, left_margin);
301 ret += fprintf(fp, "---");
305 ret += callchain__fprintf_left_margin(fp, left_margin);
307 ret += fprintf(fp, "%s",
308 callchain_list__sym_name(chain, bf,
312 if (symbol_conf.show_branchflag_count)
313 ret += callchain_list_counts__printf_value(
314 NULL, chain, fp, NULL, 0);
315 ret += fprintf(fp, "\n");
317 if (++entries_printed == callchain_param.print_limit)
320 if (symbol_conf.inline_name)
321 ret += inline__fprintf(chain->ms.map,
327 root = &cnode->rb_root;
330 if (callchain_param.mode == CHAIN_GRAPH_REL)
331 total_samples = parent_samples;
333 ret += __callchain__fprintf_graph(fp, root, total_samples,
336 /* do not add a blank line if it printed nothing */
337 ret += fprintf(fp, "\n");
343 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
346 struct callchain_list *chain;
353 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
356 list_for_each_entry(chain, &node->val, list) {
357 if (chain->ip >= PERF_CONTEXT_MAX)
359 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
360 bf, sizeof(bf), false));
366 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
370 u32 entries_printed = 0;
371 struct callchain_node *chain;
372 struct rb_node *rb_node = rb_first(tree);
375 chain = rb_entry(rb_node, struct callchain_node, rb_node);
377 ret += fprintf(fp, " ");
378 ret += callchain_node__fprintf_value(chain, fp, total_samples);
379 ret += fprintf(fp, "\n");
380 ret += __callchain__fprintf_flat(fp, chain, total_samples);
381 ret += fprintf(fp, "\n");
382 if (++entries_printed == callchain_param.print_limit)
385 rb_node = rb_next(rb_node);
391 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
393 const char *sep = symbol_conf.field_sep ?: ";";
394 struct callchain_list *chain;
402 ret += __callchain__fprintf_folded(fp, node->parent);
405 list_for_each_entry(chain, &node->val, list) {
406 if (chain->ip >= PERF_CONTEXT_MAX)
408 ret += fprintf(fp, "%s%s", first ? "" : sep,
409 callchain_list__sym_name(chain,
410 bf, sizeof(bf), false));
417 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
421 u32 entries_printed = 0;
422 struct callchain_node *chain;
423 struct rb_node *rb_node = rb_first(tree);
427 chain = rb_entry(rb_node, struct callchain_node, rb_node);
429 ret += callchain_node__fprintf_value(chain, fp, total_samples);
430 ret += fprintf(fp, " ");
431 ret += __callchain__fprintf_folded(fp, chain);
432 ret += fprintf(fp, "\n");
433 if (++entries_printed == callchain_param.print_limit)
436 rb_node = rb_next(rb_node);
442 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
443 u64 total_samples, int left_margin,
446 u64 parent_samples = he->stat.period;
448 if (symbol_conf.cumulate_callchain)
449 parent_samples = he->stat_acc->period;
451 switch (callchain_param.mode) {
452 case CHAIN_GRAPH_REL:
453 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
454 parent_samples, left_margin);
456 case CHAIN_GRAPH_ABS:
457 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
458 parent_samples, left_margin);
461 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
464 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
469 pr_err("Bad callchain mode\n");
475 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
476 struct perf_hpp_list *hpp_list)
478 const char *sep = symbol_conf.field_sep;
479 struct perf_hpp_fmt *fmt;
480 char *start = hpp->buf;
484 if (symbol_conf.exclude_other && !he->parent)
487 perf_hpp_list__for_each_format(hpp_list, fmt) {
488 if (perf_hpp__should_skip(fmt, he->hists))
492 * If there's no field_sep, we still need
493 * to display initial ' '.
495 if (!sep || !first) {
496 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
497 advance_hpp(hpp, ret);
501 if (perf_hpp__use_color() && fmt->color)
502 ret = fmt->color(fmt, hpp, he);
504 ret = fmt->entry(fmt, hpp, he);
506 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
507 advance_hpp(hpp, ret);
510 return hpp->buf - start;
513 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
515 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
518 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
519 struct perf_hpp *hpp,
523 const char *sep = symbol_conf.field_sep;
524 struct perf_hpp_fmt *fmt;
525 struct perf_hpp_list_node *fmt_node;
526 char *buf = hpp->buf;
527 size_t size = hpp->size;
528 int ret, printed = 0;
531 if (symbol_conf.exclude_other && !he->parent)
534 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
535 advance_hpp(hpp, ret);
537 /* the first hpp_list_node is for overhead columns */
538 fmt_node = list_first_entry(&hists->hpp_formats,
539 struct perf_hpp_list_node, list);
540 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
542 * If there's no field_sep, we still need
543 * to display initial ' '.
545 if (!sep || !first) {
546 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
547 advance_hpp(hpp, ret);
551 if (perf_hpp__use_color() && fmt->color)
552 ret = fmt->color(fmt, hpp, he);
554 ret = fmt->entry(fmt, hpp, he);
556 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
557 advance_hpp(hpp, ret);
561 ret = scnprintf(hpp->buf, hpp->size, "%*s",
562 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
563 advance_hpp(hpp, ret);
565 printed += fprintf(fp, "%s", buf);
567 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
572 * No need to call hist_entry__snprintf_alignment() since this
573 * fmt is always the last column in the hierarchy mode.
575 if (perf_hpp__use_color() && fmt->color)
576 fmt->color(fmt, hpp, he);
578 fmt->entry(fmt, hpp, he);
581 * dynamic entries are right-aligned but we want left-aligned
582 * in the hierarchy mode
584 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
586 printed += putc('\n', fp);
588 if (symbol_conf.use_callchain && he->leaf) {
589 u64 total = hists__total_period(hists);
591 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
599 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
600 char *bf, size_t bfsz, FILE *fp,
604 int callchain_ret = 0;
606 struct perf_hpp hpp = {
610 struct hists *hists = he->hists;
611 u64 total_period = hists->stats.total_period;
613 if (size == 0 || size > bfsz)
614 size = hpp.size = bfsz;
616 if (symbol_conf.report_hierarchy)
617 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
619 hist_entry__snprintf(he, &hpp);
621 ret = fprintf(fp, "%s\n", bf);
624 callchain_ret = hist_entry_callchain__fprintf(he, total_period,
627 if (callchain_ret == 0 && symbol_conf.inline_name) {
628 inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
631 ret += fprintf(fp, "\n");
633 ret += callchain_ret;
638 static int print_hierarchy_indent(const char *sep, int indent,
639 const char *line, FILE *fp)
641 if (sep != NULL || indent < 2)
644 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
647 static int hists__fprintf_hierarchy_headers(struct hists *hists,
648 struct perf_hpp *hpp, FILE *fp)
650 bool first_node, first_col;
654 unsigned header_width = 0;
655 struct perf_hpp_fmt *fmt;
656 struct perf_hpp_list_node *fmt_node;
657 const char *sep = symbol_conf.field_sep;
659 indent = hists->nr_hpp_node;
661 /* preserve max indent depth for column headers */
662 print_hierarchy_indent(sep, indent, spaces, fp);
664 /* the first hpp_list_node is for overhead columns */
665 fmt_node = list_first_entry(&hists->hpp_formats,
666 struct perf_hpp_list_node, list);
668 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
669 fmt->header(fmt, hpp, hists, 0, NULL);
670 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
673 /* combine sort headers with ' / ' */
675 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
677 header_width += fprintf(fp, " / ");
681 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
682 if (perf_hpp__should_skip(fmt, hists))
686 header_width += fprintf(fp, "+");
689 fmt->header(fmt, hpp, hists, 0, NULL);
691 header_width += fprintf(fp, "%s", trim(hpp->buf));
697 /* preserve max indent depth for initial dots */
698 print_hierarchy_indent(sep, indent, dots, fp);
700 /* the first hpp_list_node is for overhead columns */
701 fmt_node = list_first_entry(&hists->hpp_formats,
702 struct perf_hpp_list_node, list);
705 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
707 fprintf(fp, "%s", sep ?: "..");
710 width = fmt->width(fmt, hpp, hists);
711 fprintf(fp, "%.*s", width, dots);
715 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
717 width = depth * HIERARCHY_INDENT;
719 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
720 if (perf_hpp__should_skip(fmt, hists))
724 width++; /* for '+' sign between column header */
727 width += fmt->width(fmt, hpp, hists);
730 if (width > header_width)
731 header_width = width;
736 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
738 fprintf(fp, "\n#\n");
743 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
746 struct perf_hpp_fmt *fmt;
747 const char *sep = symbol_conf.field_sep;
751 hists__for_each_format(hists, fmt) {
752 if (perf_hpp__should_skip(fmt, hists))
756 fprintf(fp, "%s", sep ?: " ");
760 fmt->header(fmt, hpp, hists, line, &span);
763 fprintf(fp, "%s", hpp->buf);
768 hists__fprintf_standard_headers(struct hists *hists,
769 struct perf_hpp *hpp,
772 struct perf_hpp_list *hpp_list = hists->hpp_list;
773 struct perf_hpp_fmt *fmt;
775 const char *sep = symbol_conf.field_sep;
779 for (line = 0; line < hpp_list->nr_header_lines; line++) {
780 /* first # is displayed one level up */
783 fprintf_line(hists, hpp, line, fp);
788 return hpp_list->nr_header_lines;
794 hists__for_each_format(hists, fmt) {
797 if (perf_hpp__should_skip(fmt, hists))
801 fprintf(fp, "%s", sep ?: " ");
805 width = fmt->width(fmt, hpp, hists);
806 for (i = 0; i < width; i++)
812 return hpp_list->nr_header_lines + 2;
815 int hists__fprintf_headers(struct hists *hists, FILE *fp)
818 struct perf_hpp dummy_hpp = {
825 if (symbol_conf.report_hierarchy)
826 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
828 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
832 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
833 int max_cols, float min_pcnt, FILE *fp,
838 const char *sep = symbol_conf.field_sep;
846 hists__reset_column_width(hists);
848 if (symbol_conf.col_width_list_str)
849 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
852 nr_rows += hists__fprintf_headers(hists, fp);
854 if (max_rows && nr_rows >= max_rows)
857 linesz = hists__sort_list_width(hists) + 3 + 1;
858 linesz += perf_hpp__color_overhead();
859 line = malloc(linesz);
865 indent = hists__overhead_width(hists) + 4;
867 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
868 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
874 percent = hist_entry__get_percent_limit(h);
875 if (percent < min_pcnt)
878 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
880 if (max_rows && ++nr_rows >= max_rows)
884 * If all children are filtered out or percent-limited,
885 * display "no entry >= x.xx%" message.
887 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
888 int depth = hists->nr_hpp_node + h->depth + 1;
890 print_hierarchy_indent(sep, depth, spaces, fp);
891 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
893 if (max_rows && ++nr_rows >= max_rows)
897 if (h->ms.map == NULL && verbose > 1) {
898 __map_groups__fprintf_maps(h->thread->mg,
900 fprintf(fp, "%.10s end\n", graph_dotted_line);
906 zfree(&rem_sq_bracket);
911 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
916 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
919 if (stats->nr_events[i] == 0)
922 name = perf_event__name(i);
923 if (!strcmp(name, "UNKNOWN"))
926 ret += fprintf(fp, "%16s events: %10d\n", name,
927 stats->nr_events[i]);