9 static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14 struct hist_entry *he);
23 struct callchain_param callchain_param = {
24 .mode = CHAIN_GRAPH_REL,
29 u16 hists__col_len(struct hists *hists, enum hist_column col)
31 return hists->col_len[col];
34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
36 hists->col_len[col] = len;
39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
41 if (len > hists__col_len(hists, col)) {
42 hists__set_col_len(hists, col, len);
48 static void hists__reset_col_len(struct hists *hists)
52 for (col = 0; col < HISTC_NR_COLS; ++col)
53 hists__set_col_len(hists, col, 0);
56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
60 if (hists__col_len(hists, dso) < unresolved_col_width &&
61 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
62 !symbol_conf.dso_list)
63 hists__set_col_len(hists, dso, unresolved_col_width);
66 static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
68 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
72 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
74 hists__set_unres_dso_col_len(hists, HISTC_DSO);
76 len = thread__comm_len(h->thread);
77 if (hists__new_col_len(hists, HISTC_COMM, len))
78 hists__set_col_len(hists, HISTC_THREAD, len + 6);
81 len = dso__name_len(h->ms.map->dso);
82 hists__new_col_len(hists, HISTC_DSO, len);
88 * +4 accounts for '[x] ' priv level info
89 * +2 account of 0x prefix on raw addresses
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
93 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
95 symlen = dso__name_len(h->branch_info->from.map->dso);
96 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
98 symlen = unresolved_col_width + 4 + 2;
99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
103 if (h->branch_info->to.sym) {
104 symlen = (int)h->branch_info->to.sym->namelen + 4;
105 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
107 symlen = dso__name_len(h->branch_info->to.map->dso);
108 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
110 symlen = unresolved_col_width + 4 + 2;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
117 static void hist_entry__add_cpumode_period(struct hist_entry *he,
118 unsigned int cpumode, u64 period)
121 case PERF_RECORD_MISC_KERNEL:
122 he->period_sys += period;
124 case PERF_RECORD_MISC_USER:
125 he->period_us += period;
127 case PERF_RECORD_MISC_GUEST_KERNEL:
128 he->period_guest_sys += period;
130 case PERF_RECORD_MISC_GUEST_USER:
131 he->period_guest_us += period;
138 static void hist_entry__decay(struct hist_entry *he)
140 he->period = (he->period * 7) / 8;
141 he->nr_events = (he->nr_events * 7) / 8;
144 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
146 u64 prev_period = he->period;
148 if (prev_period == 0)
151 hist_entry__decay(he);
154 hists->stats.total_period -= prev_period - he->period;
156 return he->period == 0;
159 static void __hists__decay_entries(struct hists *hists, bool zap_user,
160 bool zap_kernel, bool threaded)
162 struct rb_node *next = rb_first(&hists->entries);
163 struct hist_entry *n;
166 n = rb_entry(next, struct hist_entry, rb_node);
167 next = rb_next(&n->rb_node);
169 * We may be annotating this, for instance, so keep it here in
170 * case some it gets new samples, we'll eventually free it when
171 * the user stops browsing and it agains gets fully decayed.
173 if (((zap_user && n->level == '.') ||
174 (zap_kernel && n->level != '.') ||
175 hists__decay_entry(hists, n)) &&
177 rb_erase(&n->rb_node, &hists->entries);
179 if (sort__need_collapse || threaded)
180 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
188 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
190 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
193 void hists__decay_entries_threaded(struct hists *hists,
194 bool zap_user, bool zap_kernel)
196 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
200 * histogram, sorted on item, collects periods
203 static struct hist_entry *hist_entry__new(struct hist_entry *template)
205 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
206 struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
212 he->ms.map->referenced = true;
213 if (symbol_conf.use_callchain)
214 callchain_init(he->callchain);
220 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
223 hists__calc_col_len(hists, h);
225 hists->stats.total_period += h->period;
229 static u8 symbol__parent_filter(const struct symbol *parent)
231 if (symbol_conf.exclude_other && parent == NULL)
232 return 1 << HIST_FILTER__PARENT;
236 static struct hist_entry *add_hist_entry(struct hists *hists,
237 struct hist_entry *entry,
238 struct addr_location *al,
242 struct rb_node *parent = NULL;
243 struct hist_entry *he;
246 pthread_mutex_lock(&hists->lock);
248 p = &hists->entries_in->rb_node;
252 he = rb_entry(parent, struct hist_entry, rb_node_in);
254 cmp = hist_entry__cmp(entry, he);
257 he->period += period;
260 /* If the map of an existing hist_entry has
261 * become out-of-date due to an exec() or
262 * similar, update it. Otherwise we will
263 * mis-adjust symbol addresses when computing
264 * the history counter to increment.
266 if (he->ms.map != entry->ms.map) {
267 he->ms.map = entry->ms.map;
269 he->ms.map->referenced = true;
280 he = hist_entry__new(entry);
284 rb_link_node(&he->rb_node_in, parent, p);
285 rb_insert_color(&he->rb_node_in, hists->entries_in);
287 hist_entry__add_cpumode_period(he, al->cpumode, period);
289 pthread_mutex_unlock(&hists->lock);
293 struct hist_entry *__hists__add_branch_entry(struct hists *self,
294 struct addr_location *al,
295 struct symbol *sym_parent,
296 struct branch_info *bi,
299 struct hist_entry entry = {
300 .thread = al->thread,
309 .parent = sym_parent,
310 .filtered = symbol__parent_filter(sym_parent),
314 return add_hist_entry(self, &entry, al, period);
317 struct hist_entry *__hists__add_entry(struct hists *self,
318 struct addr_location *al,
319 struct symbol *sym_parent, u64 period)
321 struct hist_entry entry = {
322 .thread = al->thread,
331 .parent = sym_parent,
332 .filtered = symbol__parent_filter(sym_parent),
335 return add_hist_entry(self, &entry, al, period);
339 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
341 struct sort_entry *se;
344 list_for_each_entry(se, &hist_entry__sort_list, list) {
345 cmp = se->se_cmp(left, right);
354 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
356 struct sort_entry *se;
359 list_for_each_entry(se, &hist_entry__sort_list, list) {
360 int64_t (*f)(struct hist_entry *, struct hist_entry *);
362 f = se->se_collapse ?: se->se_cmp;
364 cmp = f(left, right);
372 void hist_entry__free(struct hist_entry *he)
378 * collapse the histogram
381 static bool hists__collapse_insert_entry(struct hists *hists,
382 struct rb_root *root,
383 struct hist_entry *he)
385 struct rb_node **p = &root->rb_node;
386 struct rb_node *parent = NULL;
387 struct hist_entry *iter;
392 iter = rb_entry(parent, struct hist_entry, rb_node_in);
394 cmp = hist_entry__collapse(iter, he);
397 iter->period += he->period;
398 iter->nr_events += he->nr_events;
399 if (symbol_conf.use_callchain) {
400 callchain_cursor_reset(&hists->callchain_cursor);
401 callchain_merge(&hists->callchain_cursor, iter->callchain,
404 hist_entry__free(he);
414 rb_link_node(&he->rb_node_in, parent, p);
415 rb_insert_color(&he->rb_node_in, root);
419 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
421 struct rb_root *root;
423 pthread_mutex_lock(&hists->lock);
425 root = hists->entries_in;
426 if (++hists->entries_in > &hists->entries_in_array[1])
427 hists->entries_in = &hists->entries_in_array[0];
429 pthread_mutex_unlock(&hists->lock);
434 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
436 hists__filter_entry_by_dso(hists, he);
437 hists__filter_entry_by_thread(hists, he);
438 hists__filter_entry_by_symbol(hists, he);
441 static void __hists__collapse_resort(struct hists *hists, bool threaded)
443 struct rb_root *root;
444 struct rb_node *next;
445 struct hist_entry *n;
447 if (!sort__need_collapse && !threaded)
450 root = hists__get_rotate_entries_in(hists);
451 next = rb_first(root);
454 n = rb_entry(next, struct hist_entry, rb_node_in);
455 next = rb_next(&n->rb_node_in);
457 rb_erase(&n->rb_node_in, root);
458 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
460 * If it wasn't combined with one of the entries already
461 * collapsed, we need to apply the filters that may have
462 * been set by, say, the hist_browser.
464 hists__apply_filters(hists, n);
469 void hists__collapse_resort(struct hists *hists)
471 return __hists__collapse_resort(hists, false);
474 void hists__collapse_resort_threaded(struct hists *hists)
476 return __hists__collapse_resort(hists, true);
480 * reverse the map, sort on period.
483 static void __hists__insert_output_entry(struct rb_root *entries,
484 struct hist_entry *he,
485 u64 min_callchain_hits)
487 struct rb_node **p = &entries->rb_node;
488 struct rb_node *parent = NULL;
489 struct hist_entry *iter;
491 if (symbol_conf.use_callchain)
492 callchain_param.sort(&he->sorted_chain, he->callchain,
493 min_callchain_hits, &callchain_param);
497 iter = rb_entry(parent, struct hist_entry, rb_node);
499 if (he->period > iter->period)
505 rb_link_node(&he->rb_node, parent, p);
506 rb_insert_color(&he->rb_node, entries);
509 static void __hists__output_resort(struct hists *hists, bool threaded)
511 struct rb_root *root;
512 struct rb_node *next;
513 struct hist_entry *n;
514 u64 min_callchain_hits;
516 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
518 if (sort__need_collapse || threaded)
519 root = &hists->entries_collapsed;
521 root = hists->entries_in;
523 next = rb_first(root);
524 hists->entries = RB_ROOT;
526 hists->nr_entries = 0;
527 hists->stats.total_period = 0;
528 hists__reset_col_len(hists);
531 n = rb_entry(next, struct hist_entry, rb_node_in);
532 next = rb_next(&n->rb_node_in);
534 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
535 hists__inc_nr_entries(hists, n);
539 void hists__output_resort(struct hists *hists)
541 return __hists__output_resort(hists, false);
544 void hists__output_resort_threaded(struct hists *hists)
546 return __hists__output_resort(hists, true);
549 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
552 int ret = fprintf(fp, " ");
554 for (i = 0; i < left_margin; i++)
555 ret += fprintf(fp, " ");
560 static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
564 size_t ret = callchain__fprintf_left_margin(fp, left_margin);
566 for (i = 0; i < depth; i++)
567 if (depth_mask & (1 << i))
568 ret += fprintf(fp, "| ");
570 ret += fprintf(fp, " ");
572 ret += fprintf(fp, "\n");
577 static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
578 int depth, int depth_mask, int period,
579 u64 total_samples, u64 hits,
585 ret += callchain__fprintf_left_margin(fp, left_margin);
586 for (i = 0; i < depth; i++) {
587 if (depth_mask & (1 << i))
588 ret += fprintf(fp, "|");
590 ret += fprintf(fp, " ");
591 if (!period && i == depth - 1) {
594 percent = hits * 100.0 / total_samples;
595 ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
597 ret += fprintf(fp, "%s", " ");
600 ret += fprintf(fp, "%s\n", chain->ms.sym->name);
602 ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);
607 static struct symbol *rem_sq_bracket;
608 static struct callchain_list rem_hits;
610 static void init_rem_hits(void)
612 rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
613 if (!rem_sq_bracket) {
614 fprintf(stderr, "Not enough memory to display remaining hits\n");
618 strcpy(rem_sq_bracket->name, "[...]");
619 rem_hits.ms.sym = rem_sq_bracket;
622 static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
623 u64 total_samples, int depth,
624 int depth_mask, int left_margin)
626 struct rb_node *node, *next;
627 struct callchain_node *child;
628 struct callchain_list *chain;
629 int new_depth_mask = depth_mask;
633 uint entries_printed = 0;
635 remaining = total_samples;
637 node = rb_first(root);
642 child = rb_entry(node, struct callchain_node, rb_node);
643 cumul = callchain_cumul_hits(child);
647 * The depth mask manages the output of pipes that show
648 * the depth. We don't want to keep the pipes of the current
649 * level for the last child of this depth.
650 * Except if we have remaining filtered hits. They will
651 * supersede the last child
653 next = rb_next(node);
654 if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
655 new_depth_mask &= ~(1 << (depth - 1));
658 * But we keep the older depth mask for the line separator
659 * to keep the level link until we reach the last child
661 ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
664 list_for_each_entry(chain, &child->val, list) {
665 ret += ipchain__fprintf_graph(fp, chain, depth,
672 if (callchain_param.mode == CHAIN_GRAPH_REL)
673 new_total = child->children_hit;
675 new_total = total_samples;
677 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
679 new_depth_mask | (1 << depth),
682 if (++entries_printed == callchain_param.print_limit)
686 if (callchain_param.mode == CHAIN_GRAPH_REL &&
687 remaining && remaining != total_samples) {
692 new_depth_mask &= ~(1 << (depth - 1));
693 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
694 new_depth_mask, 0, total_samples,
695 remaining, left_margin);
701 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
702 u64 total_samples, int left_margin)
704 struct callchain_node *cnode;
705 struct callchain_list *chain;
706 u32 entries_printed = 0;
707 bool printed = false;
708 struct rb_node *node;
713 * If have one single callchain root, don't bother printing
714 * its percentage (100 % in fractal mode and the same percentage
715 * than the hist in graph mode). This also avoid one level of column.
717 node = rb_first(root);
718 if (node && !rb_next(node)) {
719 cnode = rb_entry(node, struct callchain_node, rb_node);
720 list_for_each_entry(chain, &cnode->val, list) {
722 * If we sort by symbol, the first entry is the same than
723 * the symbol. No need to print it otherwise it appears as
726 if (!i++ && sort__first_dimension == SORT_SYM)
729 ret += callchain__fprintf_left_margin(fp, left_margin);
730 ret += fprintf(fp, "|\n");
731 ret += callchain__fprintf_left_margin(fp, left_margin);
732 ret += fprintf(fp, "---");
736 ret += callchain__fprintf_left_margin(fp, left_margin);
739 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
741 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
743 if (++entries_printed == callchain_param.print_limit)
746 root = &cnode->rb_root;
749 return __callchain__fprintf_graph(fp, root, total_samples,
753 static size_t __callchain__fprintf_flat(FILE *fp,
754 struct callchain_node *self,
757 struct callchain_list *chain;
763 ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
766 list_for_each_entry(chain, &self->val, list) {
767 if (chain->ip >= PERF_CONTEXT_MAX)
770 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
772 ret += fprintf(fp, " %p\n",
773 (void *)(long)chain->ip);
779 static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
783 u32 entries_printed = 0;
784 struct rb_node *rb_node;
785 struct callchain_node *chain;
787 rb_node = rb_first(self);
791 chain = rb_entry(rb_node, struct callchain_node, rb_node);
792 percent = chain->hit * 100.0 / total_samples;
794 ret = percent_color_fprintf(fp, " %6.2f%%\n", percent);
795 ret += __callchain__fprintf_flat(fp, chain, total_samples);
796 ret += fprintf(fp, "\n");
797 if (++entries_printed == callchain_param.print_limit)
800 rb_node = rb_next(rb_node);
806 static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
807 u64 total_samples, int left_margin,
810 switch (callchain_param.mode) {
811 case CHAIN_GRAPH_REL:
812 return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
815 case CHAIN_GRAPH_ABS:
816 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
820 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
825 pr_err("Bad callchain mode\n");
831 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
833 struct rb_node *next = rb_first(&hists->entries);
834 struct hist_entry *n;
837 hists__reset_col_len(hists);
839 while (next && row++ < max_rows) {
840 n = rb_entry(next, struct hist_entry, rb_node);
842 hists__calc_col_len(hists, n);
843 next = rb_next(&n->rb_node);
847 static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
848 size_t size, struct hists *pair_hists,
849 bool show_displacement, long displacement,
850 bool color, u64 total_period)
852 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
854 const char *sep = symbol_conf.field_sep;
857 if (symbol_conf.exclude_other && !he->parent)
861 period = he->pair ? he->pair->period : 0;
862 nr_events = he->pair ? he->pair->nr_events : 0;
863 total = pair_hists->stats.total_period;
864 period_sys = he->pair ? he->pair->period_sys : 0;
865 period_us = he->pair ? he->pair->period_us : 0;
866 period_guest_sys = he->pair ? he->pair->period_guest_sys : 0;
867 period_guest_us = he->pair ? he->pair->period_guest_us : 0;
870 nr_events = he->nr_events;
871 total = total_period;
872 period_sys = he->period_sys;
873 period_us = he->period_us;
874 period_guest_sys = he->period_guest_sys;
875 period_guest_us = he->period_guest_us;
880 ret = percent_color_snprintf(s, size,
881 sep ? "%.2f" : " %6.2f%%",
882 (period * 100.0) / total);
884 ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%",
885 (period * 100.0) / total);
886 if (symbol_conf.show_cpu_utilization) {
887 ret += percent_color_snprintf(s + ret, size - ret,
888 sep ? "%.2f" : " %6.2f%%",
889 (period_sys * 100.0) / total);
890 ret += percent_color_snprintf(s + ret, size - ret,
891 sep ? "%.2f" : " %6.2f%%",
892 (period_us * 100.0) / total);
894 ret += percent_color_snprintf(s + ret,
896 sep ? "%.2f" : " %6.2f%%",
897 (period_guest_sys * 100.0) /
899 ret += percent_color_snprintf(s + ret,
901 sep ? "%.2f" : " %6.2f%%",
902 (period_guest_us * 100.0) /
907 ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
909 if (symbol_conf.show_nr_samples) {
911 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
913 ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
916 if (symbol_conf.show_total_period) {
918 ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
920 ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period);
925 double old_percent = 0, new_percent = 0, diff;
928 old_percent = (period * 100.0) / total;
929 if (total_period > 0)
930 new_percent = (he->period * 100.0) / total_period;
932 diff = new_percent - old_percent;
934 if (fabs(diff) >= 0.01)
935 scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
937 scnprintf(bf, sizeof(bf), " ");
940 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
942 ret += scnprintf(s + ret, size - ret, "%11.11s", bf);
944 if (show_displacement) {
946 scnprintf(bf, sizeof(bf), "%+4ld", displacement);
948 scnprintf(bf, sizeof(bf), " ");
951 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
953 ret += scnprintf(s + ret, size - ret, "%6.6s", bf);
960 int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size,
963 const char *sep = symbol_conf.field_sep;
964 struct sort_entry *se;
967 list_for_each_entry(se, &hist_entry__sort_list, list) {
971 ret += scnprintf(s + ret, size - ret, "%s", sep ?: " ");
972 ret += se->se_snprintf(he, s + ret, size - ret,
973 hists__col_len(hists, se->se_width_idx));
979 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
980 struct hists *hists, struct hists *pair_hists,
981 bool show_displacement, long displacement,
982 u64 total_period, FILE *fp)
987 if (size == 0 || size > sizeof(bf))
990 ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists,
991 show_displacement, displacement,
993 hist_entry__snprintf(he, bf + ret, size - ret, hists);
994 return fprintf(fp, "%s\n", bf);
997 static size_t hist_entry__fprintf_callchain(struct hist_entry *he,
999 u64 total_period, FILE *fp)
1001 int left_margin = 0;
1003 if (sort__first_dimension == SORT_COMM) {
1004 struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
1006 left_margin = hists__col_len(hists, se->se_width_idx);
1007 left_margin -= thread__comm_len(he->thread);
1010 return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
1013 size_t hists__fprintf(struct hists *hists, struct hists *pair,
1014 bool show_displacement, bool show_header, int max_rows,
1015 int max_cols, FILE *fp)
1017 struct sort_entry *se;
1021 unsigned long position = 1;
1022 long displacement = 0;
1024 const char *sep = symbol_conf.field_sep;
1025 const char *col_width = symbol_conf.col_width_list_str;
1033 fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");
1035 if (symbol_conf.show_cpu_utilization) {
1037 ret += fprintf(fp, "%csys", *sep);
1038 ret += fprintf(fp, "%cus", *sep);
1040 ret += fprintf(fp, "%cguest sys", *sep);
1041 ret += fprintf(fp, "%cguest us", *sep);
1044 ret += fprintf(fp, " sys ");
1045 ret += fprintf(fp, " us ");
1047 ret += fprintf(fp, " guest sys ");
1048 ret += fprintf(fp, " guest us ");
1053 if (symbol_conf.show_nr_samples) {
1055 fprintf(fp, "%cSamples", *sep);
1057 fputs(" Samples ", fp);
1060 if (symbol_conf.show_total_period) {
1062 ret += fprintf(fp, "%cPeriod", *sep);
1064 ret += fprintf(fp, " Period ");
1069 ret += fprintf(fp, "%cDelta", *sep);
1071 ret += fprintf(fp, " Delta ");
1073 if (show_displacement) {
1075 ret += fprintf(fp, "%cDisplacement", *sep);
1077 ret += fprintf(fp, " Displ");
1081 list_for_each_entry(se, &hist_entry__sort_list, list) {
1085 fprintf(fp, "%c%s", *sep, se->se_header);
1088 width = strlen(se->se_header);
1089 if (symbol_conf.col_width_list_str) {
1091 hists__set_col_len(hists, se->se_width_idx,
1093 col_width = strchr(col_width, ',');
1098 if (!hists__new_col_len(hists, se->se_width_idx, width))
1099 width = hists__col_len(hists, se->se_width_idx);
1100 fprintf(fp, " %*s", width, se->se_header);
1104 if (max_rows && ++nr_rows >= max_rows)
1110 fprintf(fp, "# ........");
1111 if (symbol_conf.show_cpu_utilization)
1112 fprintf(fp, " ....... .......");
1113 if (symbol_conf.show_nr_samples)
1114 fprintf(fp, " ..........");
1115 if (symbol_conf.show_total_period)
1116 fprintf(fp, " ............");
1118 fprintf(fp, " ..........");
1119 if (show_displacement)
1120 fprintf(fp, " .....");
1122 list_for_each_entry(se, &hist_entry__sort_list, list) {
1129 width = hists__col_len(hists, se->se_width_idx);
1131 width = strlen(se->se_header);
1132 for (i = 0; i < width; i++)
1137 if (max_rows && ++nr_rows >= max_rows)
1141 if (max_rows && ++nr_rows >= max_rows)
1145 total_period = hists->stats.total_period;
1147 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1148 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1153 if (show_displacement) {
1154 if (h->pair != NULL)
1155 displacement = ((long)h->pair->position -
1161 ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
1162 displacement, total_period, fp);
1164 if (symbol_conf.use_callchain)
1165 ret += hist_entry__fprintf_callchain(h, hists, total_period, fp);
1166 if (max_rows && ++nr_rows >= max_rows)
1169 if (h->ms.map == NULL && verbose > 1) {
1170 __map_groups__fprintf_maps(&h->thread->mg,
1171 MAP__FUNCTION, verbose, fp);
1172 fprintf(fp, "%.10s end\n", graph_dotted_line);
1176 free(rem_sq_bracket);
1182 * See hists__fprintf to match the column widths
1184 unsigned int hists__sort_list_width(struct hists *hists)
1186 struct sort_entry *se;
1187 int ret = 9; /* total % */
1189 if (symbol_conf.show_cpu_utilization) {
1190 ret += 7; /* count_sys % */
1191 ret += 6; /* count_us % */
1193 ret += 13; /* count_guest_sys % */
1194 ret += 12; /* count_guest_us % */
1198 if (symbol_conf.show_nr_samples)
1201 if (symbol_conf.show_total_period)
1204 list_for_each_entry(se, &hist_entry__sort_list, list)
1206 ret += 2 + hists__col_len(hists, se->se_width_idx);
1208 if (verbose) /* Addr + origin */
1209 ret += 3 + BITS_PER_LONG / 4;
1214 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1215 enum hist_filter filter)
1217 h->filtered &= ~(1 << filter);
1221 ++hists->nr_entries;
1223 hists->nr_entries += h->nr_rows;
1225 hists->stats.total_period += h->period;
1226 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
1228 hists__calc_col_len(hists, h);
1232 static bool hists__filter_entry_by_dso(struct hists *hists,
1233 struct hist_entry *he)
1235 if (hists->dso_filter != NULL &&
1236 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1237 he->filtered |= (1 << HIST_FILTER__DSO);
1244 void hists__filter_by_dso(struct hists *hists)
1248 hists->nr_entries = hists->stats.total_period = 0;
1249 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1250 hists__reset_col_len(hists);
1252 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1253 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1255 if (symbol_conf.exclude_other && !h->parent)
1258 if (hists__filter_entry_by_dso(hists, h))
1261 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1265 static bool hists__filter_entry_by_thread(struct hists *hists,
1266 struct hist_entry *he)
1268 if (hists->thread_filter != NULL &&
1269 he->thread != hists->thread_filter) {
1270 he->filtered |= (1 << HIST_FILTER__THREAD);
1277 void hists__filter_by_thread(struct hists *hists)
1281 hists->nr_entries = hists->stats.total_period = 0;
1282 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1283 hists__reset_col_len(hists);
1285 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1286 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1288 if (hists__filter_entry_by_thread(hists, h))
1291 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1295 static bool hists__filter_entry_by_symbol(struct hists *hists,
1296 struct hist_entry *he)
1298 if (hists->symbol_filter_str != NULL &&
1299 (!he->ms.sym || strstr(he->ms.sym->name,
1300 hists->symbol_filter_str) == NULL)) {
1301 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1308 void hists__filter_by_symbol(struct hists *hists)
1312 hists->nr_entries = hists->stats.total_period = 0;
1313 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1314 hists__reset_col_len(hists);
1316 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1317 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1319 if (hists__filter_entry_by_symbol(hists, h))
1322 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1326 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
1328 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
1331 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
1333 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
1336 void hists__inc_nr_events(struct hists *hists, u32 type)
1338 ++hists->stats.nr_events[0];
1339 ++hists->stats.nr_events[type];
1342 size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
1347 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1350 if (hists->stats.nr_events[i] == 0)
1353 name = perf_event__name(i);
1354 if (!strcmp(name, "UNKNOWN"))
1357 ret += fprintf(fp, "%16s events: %10d\n", name,
1358 hists->stats.nr_events[i]);