3 #include "../../util/util.h"
4 #include "../../util/hist.h"
5 #include "../../util/sort.h"
6 #include "../../util/evsel.h"
9 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
12 int ret = fprintf(fp, " ");
14 for (i = 0; i < left_margin; i++)
15 ret += fprintf(fp, " ");
20 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
24 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
26 for (i = 0; i < depth; i++)
27 if (depth_mask & (1 << i))
28 ret += fprintf(fp, "| ");
30 ret += fprintf(fp, " ");
32 ret += fprintf(fp, "\n");
37 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
38 struct callchain_list *chain,
39 int depth, int depth_mask, int period,
40 u64 total_samples, int left_margin)
44 char bf[1024], *alloc_str = NULL;
48 ret += callchain__fprintf_left_margin(fp, left_margin);
49 for (i = 0; i < depth; i++) {
50 if (depth_mask & (1 << i))
51 ret += fprintf(fp, "|");
53 ret += fprintf(fp, " ");
54 if (!period && i == depth - 1) {
55 ret += fprintf(fp, "--");
56 ret += callchain_node__fprintf_value(node, fp, total_samples);
57 ret += fprintf(fp, "--");
59 ret += fprintf(fp, "%s", " ");
62 str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
64 if (symbol_conf.show_branchflag_count) {
66 callchain_list_counts__printf_value(node, chain, NULL,
69 callchain_list_counts__printf_value(NULL, chain, NULL,
72 if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
73 str = "Not enough memory!";
84 static struct symbol *rem_sq_bracket;
85 static struct callchain_list rem_hits;
87 static void init_rem_hits(void)
89 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
90 if (!rem_sq_bracket) {
91 fprintf(stderr, "Not enough memory to display remaining hits\n");
95 strcpy(rem_sq_bracket->name, "[...]");
96 rem_hits.ms.sym = rem_sq_bracket;
99 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
100 u64 total_samples, int depth,
101 int depth_mask, int left_margin)
103 struct rb_node *node, *next;
104 struct callchain_node *child = NULL;
105 struct callchain_list *chain;
106 int new_depth_mask = depth_mask;
110 uint entries_printed = 0;
113 remaining = total_samples;
115 node = rb_first(root);
120 child = rb_entry(node, struct callchain_node, rb_node);
121 cumul = callchain_cumul_hits(child);
123 cumul_count += callchain_cumul_counts(child);
126 * The depth mask manages the output of pipes that show
127 * the depth. We don't want to keep the pipes of the current
128 * level for the last child of this depth.
129 * Except if we have remaining filtered hits. They will
130 * supersede the last child
132 next = rb_next(node);
133 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
134 new_depth_mask &= ~(1 << (depth - 1));
137 * But we keep the older depth mask for the line separator
138 * to keep the level link until we reach the last child
140 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
143 list_for_each_entry(chain, &child->val, list) {
144 ret += ipchain__fprintf_graph(fp, child, chain, depth,
150 if (callchain_param.mode == CHAIN_GRAPH_REL)
151 new_total = child->children_hit;
153 new_total = total_samples;
155 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
157 new_depth_mask | (1 << depth),
160 if (++entries_printed == callchain_param.print_limit)
164 if (callchain_param.mode == CHAIN_GRAPH_REL &&
165 remaining && remaining != total_samples) {
166 struct callchain_node rem_node = {
173 if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
174 rem_node.count = child->parent->children_count - cumul_count;
175 if (rem_node.count <= 0)
179 new_depth_mask &= ~(1 << (depth - 1));
180 ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
181 new_depth_mask, 0, total_samples,
189 * If have one single callchain root, don't bother printing
190 * its percentage (100 % in fractal mode and the same percentage
191 * than the hist in graph mode). This also avoid one level of column.
193 * However when percent-limit applied, it's possible that single callchain
194 * node have different (non-100% in fractal mode) percentage.
196 static bool need_percent_display(struct rb_node *node, u64 parent_samples)
198 struct callchain_node *cnode;
203 cnode = rb_entry(node, struct callchain_node, rb_node);
204 return callchain_cumul_hits(cnode) != parent_samples;
207 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
208 u64 total_samples, u64 parent_samples,
211 struct callchain_node *cnode;
212 struct callchain_list *chain;
213 u32 entries_printed = 0;
214 bool printed = false;
215 struct rb_node *node;
220 node = rb_first(root);
221 if (node && !need_percent_display(node, parent_samples)) {
222 cnode = rb_entry(node, struct callchain_node, rb_node);
223 list_for_each_entry(chain, &cnode->val, list) {
225 * If we sort by symbol, the first entry is the same than
226 * the symbol. No need to print it otherwise it appears as
229 if (!i++ && field_order == NULL &&
230 sort_order && !prefixcmp(sort_order, "sym"))
233 ret += callchain__fprintf_left_margin(fp, left_margin);
234 ret += fprintf(fp, "|\n");
235 ret += callchain__fprintf_left_margin(fp, left_margin);
236 ret += fprintf(fp, "---");
240 ret += callchain__fprintf_left_margin(fp, left_margin);
242 ret += fprintf(fp, "%s",
243 callchain_list__sym_name(chain, bf,
247 if (symbol_conf.show_branchflag_count)
248 ret += callchain_list_counts__printf_value(
249 NULL, chain, fp, NULL, 0);
250 ret += fprintf(fp, "\n");
252 if (++entries_printed == callchain_param.print_limit)
255 root = &cnode->rb_root;
258 if (callchain_param.mode == CHAIN_GRAPH_REL)
259 total_samples = parent_samples;
261 ret += __callchain__fprintf_graph(fp, root, total_samples,
264 /* do not add a blank line if it printed nothing */
265 ret += fprintf(fp, "\n");
271 static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
274 struct callchain_list *chain;
281 ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
284 list_for_each_entry(chain, &node->val, list) {
285 if (chain->ip >= PERF_CONTEXT_MAX)
287 ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
288 bf, sizeof(bf), false));
294 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
298 u32 entries_printed = 0;
299 struct callchain_node *chain;
300 struct rb_node *rb_node = rb_first(tree);
303 chain = rb_entry(rb_node, struct callchain_node, rb_node);
305 ret += fprintf(fp, " ");
306 ret += callchain_node__fprintf_value(chain, fp, total_samples);
307 ret += fprintf(fp, "\n");
308 ret += __callchain__fprintf_flat(fp, chain, total_samples);
309 ret += fprintf(fp, "\n");
310 if (++entries_printed == callchain_param.print_limit)
313 rb_node = rb_next(rb_node);
319 static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
321 const char *sep = symbol_conf.field_sep ?: ";";
322 struct callchain_list *chain;
330 ret += __callchain__fprintf_folded(fp, node->parent);
333 list_for_each_entry(chain, &node->val, list) {
334 if (chain->ip >= PERF_CONTEXT_MAX)
336 ret += fprintf(fp, "%s%s", first ? "" : sep,
337 callchain_list__sym_name(chain,
338 bf, sizeof(bf), false));
345 static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
349 u32 entries_printed = 0;
350 struct callchain_node *chain;
351 struct rb_node *rb_node = rb_first(tree);
355 chain = rb_entry(rb_node, struct callchain_node, rb_node);
357 ret += callchain_node__fprintf_value(chain, fp, total_samples);
358 ret += fprintf(fp, " ");
359 ret += __callchain__fprintf_folded(fp, chain);
360 ret += fprintf(fp, "\n");
361 if (++entries_printed == callchain_param.print_limit)
364 rb_node = rb_next(rb_node);
370 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
371 u64 total_samples, int left_margin,
374 u64 parent_samples = he->stat.period;
376 if (symbol_conf.cumulate_callchain)
377 parent_samples = he->stat_acc->period;
379 switch (callchain_param.mode) {
380 case CHAIN_GRAPH_REL:
381 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
382 parent_samples, left_margin);
384 case CHAIN_GRAPH_ABS:
385 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
386 parent_samples, left_margin);
389 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
392 return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
397 pr_err("Bad callchain mode\n");
403 int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
404 struct perf_hpp_list *hpp_list)
406 const char *sep = symbol_conf.field_sep;
407 struct perf_hpp_fmt *fmt;
408 char *start = hpp->buf;
412 if (symbol_conf.exclude_other && !he->parent)
415 perf_hpp_list__for_each_format(hpp_list, fmt) {
416 if (perf_hpp__should_skip(fmt, he->hists))
420 * If there's no field_sep, we still need
421 * to display initial ' '.
423 if (!sep || !first) {
424 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
425 advance_hpp(hpp, ret);
429 if (perf_hpp__use_color() && fmt->color)
430 ret = fmt->color(fmt, hpp, he);
432 ret = fmt->entry(fmt, hpp, he);
434 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
435 advance_hpp(hpp, ret);
438 return hpp->buf - start;
441 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
443 return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
446 static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
447 struct perf_hpp *hpp,
451 const char *sep = symbol_conf.field_sep;
452 struct perf_hpp_fmt *fmt;
453 struct perf_hpp_list_node *fmt_node;
454 char *buf = hpp->buf;
455 size_t size = hpp->size;
456 int ret, printed = 0;
459 if (symbol_conf.exclude_other && !he->parent)
462 ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
463 advance_hpp(hpp, ret);
465 /* the first hpp_list_node is for overhead columns */
466 fmt_node = list_first_entry(&hists->hpp_formats,
467 struct perf_hpp_list_node, list);
468 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
470 * If there's no field_sep, we still need
471 * to display initial ' '.
473 if (!sep || !first) {
474 ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
475 advance_hpp(hpp, ret);
479 if (perf_hpp__use_color() && fmt->color)
480 ret = fmt->color(fmt, hpp, he);
482 ret = fmt->entry(fmt, hpp, he);
484 ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
485 advance_hpp(hpp, ret);
489 ret = scnprintf(hpp->buf, hpp->size, "%*s",
490 (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
491 advance_hpp(hpp, ret);
493 printed += fprintf(fp, "%s", buf);
495 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
500 * No need to call hist_entry__snprintf_alignment() since this
501 * fmt is always the last column in the hierarchy mode.
503 if (perf_hpp__use_color() && fmt->color)
504 fmt->color(fmt, hpp, he);
506 fmt->entry(fmt, hpp, he);
509 * dynamic entries are right-aligned but we want left-aligned
510 * in the hierarchy mode
512 printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
514 printed += putc('\n', fp);
516 if (symbol_conf.use_callchain && he->leaf) {
517 u64 total = hists__total_period(hists);
519 printed += hist_entry_callchain__fprintf(he, total, 0, fp);
527 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
528 char *bf, size_t bfsz, FILE *fp,
532 struct perf_hpp hpp = {
536 struct hists *hists = he->hists;
537 u64 total_period = hists->stats.total_period;
539 if (size == 0 || size > bfsz)
540 size = hpp.size = bfsz;
542 if (symbol_conf.report_hierarchy)
543 return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
545 hist_entry__snprintf(he, &hpp);
547 ret = fprintf(fp, "%s\n", bf);
550 ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
555 static int print_hierarchy_indent(const char *sep, int indent,
556 const char *line, FILE *fp)
558 if (sep != NULL || indent < 2)
561 return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
564 static int hists__fprintf_hierarchy_headers(struct hists *hists,
565 struct perf_hpp *hpp, FILE *fp)
567 bool first_node, first_col;
571 unsigned header_width = 0;
572 struct perf_hpp_fmt *fmt;
573 struct perf_hpp_list_node *fmt_node;
574 const char *sep = symbol_conf.field_sep;
576 indent = hists->nr_hpp_node;
578 /* preserve max indent depth for column headers */
579 print_hierarchy_indent(sep, indent, spaces, fp);
581 /* the first hpp_list_node is for overhead columns */
582 fmt_node = list_first_entry(&hists->hpp_formats,
583 struct perf_hpp_list_node, list);
585 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
586 fmt->header(fmt, hpp, hists, 0, NULL);
587 fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
590 /* combine sort headers with ' / ' */
592 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
594 header_width += fprintf(fp, " / ");
598 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
599 if (perf_hpp__should_skip(fmt, hists))
603 header_width += fprintf(fp, "+");
606 fmt->header(fmt, hpp, hists, 0, NULL);
608 header_width += fprintf(fp, "%s", trim(hpp->buf));
614 /* preserve max indent depth for initial dots */
615 print_hierarchy_indent(sep, indent, dots, fp);
617 /* the first hpp_list_node is for overhead columns */
618 fmt_node = list_first_entry(&hists->hpp_formats,
619 struct perf_hpp_list_node, list);
622 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
624 fprintf(fp, "%s", sep ?: "..");
627 width = fmt->width(fmt, hpp, hists);
628 fprintf(fp, "%.*s", width, dots);
632 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
634 width = depth * HIERARCHY_INDENT;
636 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
637 if (perf_hpp__should_skip(fmt, hists))
641 width++; /* for '+' sign between column header */
644 width += fmt->width(fmt, hpp, hists);
647 if (width > header_width)
648 header_width = width;
653 fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
655 fprintf(fp, "\n#\n");
660 static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
663 struct perf_hpp_fmt *fmt;
664 const char *sep = symbol_conf.field_sep;
668 hists__for_each_format(hists, fmt) {
669 if (perf_hpp__should_skip(fmt, hists))
673 fprintf(fp, "%s", sep ?: " ");
677 fmt->header(fmt, hpp, hists, line, &span);
680 fprintf(fp, "%s", hpp->buf);
685 hists__fprintf_standard_headers(struct hists *hists,
686 struct perf_hpp *hpp,
689 struct perf_hpp_list *hpp_list = hists->hpp_list;
690 struct perf_hpp_fmt *fmt;
692 const char *sep = symbol_conf.field_sep;
696 for (line = 0; line < hpp_list->nr_header_lines; line++) {
697 /* first # is displayed one level up */
700 fprintf_line(hists, hpp, line, fp);
705 return hpp_list->nr_header_lines;
711 hists__for_each_format(hists, fmt) {
714 if (perf_hpp__should_skip(fmt, hists))
718 fprintf(fp, "%s", sep ?: " ");
722 width = fmt->width(fmt, hpp, hists);
723 for (i = 0; i < width; i++)
729 return hpp_list->nr_header_lines + 2;
732 int hists__fprintf_headers(struct hists *hists, FILE *fp)
735 struct perf_hpp dummy_hpp = {
742 if (symbol_conf.report_hierarchy)
743 return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
745 return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
749 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
750 int max_cols, float min_pcnt, FILE *fp,
755 const char *sep = symbol_conf.field_sep;
763 hists__reset_column_width(hists);
765 if (symbol_conf.col_width_list_str)
766 perf_hpp__set_user_width(symbol_conf.col_width_list_str);
769 nr_rows += hists__fprintf_headers(hists, fp);
771 if (max_rows && nr_rows >= max_rows)
774 linesz = hists__sort_list_width(hists) + 3 + 1;
775 linesz += perf_hpp__color_overhead();
776 line = malloc(linesz);
782 indent = hists__overhead_width(hists) + 4;
784 for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
785 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
791 percent = hist_entry__get_percent_limit(h);
792 if (percent < min_pcnt)
795 ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
797 if (max_rows && ++nr_rows >= max_rows)
801 * If all children are filtered out or percent-limited,
802 * display "no entry >= x.xx%" message.
804 if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
805 int depth = hists->nr_hpp_node + h->depth + 1;
807 print_hierarchy_indent(sep, depth, spaces, fp);
808 fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
810 if (max_rows && ++nr_rows >= max_rows)
814 if (h->ms.map == NULL && verbose > 1) {
815 __map_groups__fprintf_maps(h->thread->mg,
817 fprintf(fp, "%.10s end\n", graph_dotted_line);
823 zfree(&rem_sq_bracket);
828 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
833 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
836 if (stats->nr_events[i] == 0)
839 name = perf_event__name(i);
840 if (!strcmp(name, "UNKNOWN"))
843 ret += fprintf(fp, "%16s events: %10d\n", name,
844 stats->nr_events[i]);