]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/hist.c
65d42758aaddb275c97f0faa68078e4ecaea33b5
[karo-tx-linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "map.h"
5 #include "session.h"
6 #include "namespaces.h"
7 #include "sort.h"
8 #include "evlist.h"
9 #include "evsel.h"
10 #include "annotate.h"
11 #include "srcline.h"
12 #include "ui/progress.h"
13 #include <errno.h>
14 #include <math.h>
15
16 static bool hists__filter_entry_by_dso(struct hists *hists,
17                                        struct hist_entry *he);
18 static bool hists__filter_entry_by_thread(struct hists *hists,
19                                           struct hist_entry *he);
20 static bool hists__filter_entry_by_symbol(struct hists *hists,
21                                           struct hist_entry *he);
22 static bool hists__filter_entry_by_socket(struct hists *hists,
23                                           struct hist_entry *he);
24
25 u16 hists__col_len(struct hists *hists, enum hist_column col)
26 {
27         return hists->col_len[col];
28 }
29
30 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
31 {
32         hists->col_len[col] = len;
33 }
34
35 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
36 {
37         if (len > hists__col_len(hists, col)) {
38                 hists__set_col_len(hists, col, len);
39                 return true;
40         }
41         return false;
42 }
43
44 void hists__reset_col_len(struct hists *hists)
45 {
46         enum hist_column col;
47
48         for (col = 0; col < HISTC_NR_COLS; ++col)
49                 hists__set_col_len(hists, col, 0);
50 }
51
52 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
53 {
54         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
55
56         if (hists__col_len(hists, dso) < unresolved_col_width &&
57             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
58             !symbol_conf.dso_list)
59                 hists__set_col_len(hists, dso, unresolved_col_width);
60 }
61
62 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
63 {
64         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
65         int symlen;
66         u16 len;
67
68         /*
69          * +4 accounts for '[x] ' priv level info
70          * +2 accounts for 0x prefix on raw addresses
71          * +3 accounts for ' y ' symtab origin info
72          */
73         if (h->ms.sym) {
74                 symlen = h->ms.sym->namelen + 4;
75                 if (verbose > 0)
76                         symlen += BITS_PER_LONG / 4 + 2 + 3;
77                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
78         } else {
79                 symlen = unresolved_col_width + 4 + 2;
80                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
81                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
82         }
83
84         len = thread__comm_len(h->thread);
85         if (hists__new_col_len(hists, HISTC_COMM, len))
86                 hists__set_col_len(hists, HISTC_THREAD, len + 8);
87
88         if (h->ms.map) {
89                 len = dso__name_len(h->ms.map->dso);
90                 hists__new_col_len(hists, HISTC_DSO, len);
91         }
92
93         if (h->parent)
94                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
95
96         if (h->branch_info) {
97                 if (h->branch_info->from.sym) {
98                         symlen = (int)h->branch_info->from.sym->namelen + 4;
99                         if (verbose > 0)
100                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
101                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102
103                         symlen = dso__name_len(h->branch_info->from.map->dso);
104                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
105                 } else {
106                         symlen = unresolved_col_width + 4 + 2;
107                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
108                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
109                 }
110
111                 if (h->branch_info->to.sym) {
112                         symlen = (int)h->branch_info->to.sym->namelen + 4;
113                         if (verbose > 0)
114                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
115                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116
117                         symlen = dso__name_len(h->branch_info->to.map->dso);
118                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
119                 } else {
120                         symlen = unresolved_col_width + 4 + 2;
121                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
122                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
123                 }
124
125                 if (h->branch_info->srcline_from)
126                         hists__new_col_len(hists, HISTC_SRCLINE_FROM,
127                                         strlen(h->branch_info->srcline_from));
128                 if (h->branch_info->srcline_to)
129                         hists__new_col_len(hists, HISTC_SRCLINE_TO,
130                                         strlen(h->branch_info->srcline_to));
131         }
132
133         if (h->mem_info) {
134                 if (h->mem_info->daddr.sym) {
135                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
136                                + unresolved_col_width + 2;
137                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
138                                            symlen);
139                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
140                                            symlen + 1);
141                 } else {
142                         symlen = unresolved_col_width + 4 + 2;
143                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
144                                            symlen);
145                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
146                                            symlen);
147                 }
148
149                 if (h->mem_info->iaddr.sym) {
150                         symlen = (int)h->mem_info->iaddr.sym->namelen + 4
151                                + unresolved_col_width + 2;
152                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
153                                            symlen);
154                 } else {
155                         symlen = unresolved_col_width + 4 + 2;
156                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
157                                            symlen);
158                 }
159
160                 if (h->mem_info->daddr.map) {
161                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
162                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
163                                            symlen);
164                 } else {
165                         symlen = unresolved_col_width + 4 + 2;
166                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
167                 }
168         } else {
169                 symlen = unresolved_col_width + 4 + 2;
170                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
171                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
172                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
173         }
174
175         hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
176         hists__new_col_len(hists, HISTC_CPU, 3);
177         hists__new_col_len(hists, HISTC_SOCKET, 6);
178         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
179         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
180         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
181         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
182         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
183         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
184
185         if (h->srcline) {
186                 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
187                 hists__new_col_len(hists, HISTC_SRCLINE, len);
188         }
189
190         if (h->srcfile)
191                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
192
193         if (h->transaction)
194                 hists__new_col_len(hists, HISTC_TRANSACTION,
195                                    hist_entry__transaction_len());
196
197         if (h->trace_output)
198                 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
199 }
200
201 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
202 {
203         struct rb_node *next = rb_first(&hists->entries);
204         struct hist_entry *n;
205         int row = 0;
206
207         hists__reset_col_len(hists);
208
209         while (next && row++ < max_rows) {
210                 n = rb_entry(next, struct hist_entry, rb_node);
211                 if (!n->filtered)
212                         hists__calc_col_len(hists, n);
213                 next = rb_next(&n->rb_node);
214         }
215 }
216
217 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
218                                         unsigned int cpumode, u64 period)
219 {
220         switch (cpumode) {
221         case PERF_RECORD_MISC_KERNEL:
222                 he_stat->period_sys += period;
223                 break;
224         case PERF_RECORD_MISC_USER:
225                 he_stat->period_us += period;
226                 break;
227         case PERF_RECORD_MISC_GUEST_KERNEL:
228                 he_stat->period_guest_sys += period;
229                 break;
230         case PERF_RECORD_MISC_GUEST_USER:
231                 he_stat->period_guest_us += period;
232                 break;
233         default:
234                 break;
235         }
236 }
237
238 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
239                                 u64 weight)
240 {
241
242         he_stat->period         += period;
243         he_stat->weight         += weight;
244         he_stat->nr_events      += 1;
245 }
246
247 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
248 {
249         dest->period            += src->period;
250         dest->period_sys        += src->period_sys;
251         dest->period_us         += src->period_us;
252         dest->period_guest_sys  += src->period_guest_sys;
253         dest->period_guest_us   += src->period_guest_us;
254         dest->nr_events         += src->nr_events;
255         dest->weight            += src->weight;
256 }
257
258 static void he_stat__decay(struct he_stat *he_stat)
259 {
260         he_stat->period = (he_stat->period * 7) / 8;
261         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
262         /* XXX need decay for weight too? */
263 }
264
265 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
266
267 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
268 {
269         u64 prev_period = he->stat.period;
270         u64 diff;
271
272         if (prev_period == 0)
273                 return true;
274
275         he_stat__decay(&he->stat);
276         if (symbol_conf.cumulate_callchain)
277                 he_stat__decay(he->stat_acc);
278         decay_callchain(he->callchain);
279
280         diff = prev_period - he->stat.period;
281
282         if (!he->depth) {
283                 hists->stats.total_period -= diff;
284                 if (!he->filtered)
285                         hists->stats.total_non_filtered_period -= diff;
286         }
287
288         if (!he->leaf) {
289                 struct hist_entry *child;
290                 struct rb_node *node = rb_first(&he->hroot_out);
291                 while (node) {
292                         child = rb_entry(node, struct hist_entry, rb_node);
293                         node = rb_next(node);
294
295                         if (hists__decay_entry(hists, child))
296                                 hists__delete_entry(hists, child);
297                 }
298         }
299
300         return he->stat.period == 0;
301 }
302
303 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
304 {
305         struct rb_root *root_in;
306         struct rb_root *root_out;
307
308         if (he->parent_he) {
309                 root_in  = &he->parent_he->hroot_in;
310                 root_out = &he->parent_he->hroot_out;
311         } else {
312                 if (hists__has(hists, need_collapse))
313                         root_in = &hists->entries_collapsed;
314                 else
315                         root_in = hists->entries_in;
316                 root_out = &hists->entries;
317         }
318
319         rb_erase(&he->rb_node_in, root_in);
320         rb_erase(&he->rb_node, root_out);
321
322         --hists->nr_entries;
323         if (!he->filtered)
324                 --hists->nr_non_filtered_entries;
325
326         hist_entry__delete(he);
327 }
328
329 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
330 {
331         struct rb_node *next = rb_first(&hists->entries);
332         struct hist_entry *n;
333
334         while (next) {
335                 n = rb_entry(next, struct hist_entry, rb_node);
336                 next = rb_next(&n->rb_node);
337                 if (((zap_user && n->level == '.') ||
338                      (zap_kernel && n->level != '.') ||
339                      hists__decay_entry(hists, n))) {
340                         hists__delete_entry(hists, n);
341                 }
342         }
343 }
344
345 void hists__delete_entries(struct hists *hists)
346 {
347         struct rb_node *next = rb_first(&hists->entries);
348         struct hist_entry *n;
349
350         while (next) {
351                 n = rb_entry(next, struct hist_entry, rb_node);
352                 next = rb_next(&n->rb_node);
353
354                 hists__delete_entry(hists, n);
355         }
356 }
357
358 /*
359  * histogram, sorted on item, collects periods
360  */
361
362 static int hist_entry__init(struct hist_entry *he,
363                             struct hist_entry *template,
364                             bool sample_self)
365 {
366         *he = *template;
367
368         if (symbol_conf.cumulate_callchain) {
369                 he->stat_acc = malloc(sizeof(he->stat));
370                 if (he->stat_acc == NULL)
371                         return -ENOMEM;
372                 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
373                 if (!sample_self)
374                         memset(&he->stat, 0, sizeof(he->stat));
375         }
376
377         map__get(he->ms.map);
378
379         if (he->branch_info) {
380                 /*
381                  * This branch info is (a part of) allocated from
382                  * sample__resolve_bstack() and will be freed after
383                  * adding new entries.  So we need to save a copy.
384                  */
385                 he->branch_info = malloc(sizeof(*he->branch_info));
386                 if (he->branch_info == NULL) {
387                         map__zput(he->ms.map);
388                         free(he->stat_acc);
389                         return -ENOMEM;
390                 }
391
392                 memcpy(he->branch_info, template->branch_info,
393                        sizeof(*he->branch_info));
394
395                 map__get(he->branch_info->from.map);
396                 map__get(he->branch_info->to.map);
397         }
398
399         if (he->mem_info) {
400                 map__get(he->mem_info->iaddr.map);
401                 map__get(he->mem_info->daddr.map);
402         }
403
404         if (symbol_conf.use_callchain)
405                 callchain_init(he->callchain);
406
407         if (he->raw_data) {
408                 he->raw_data = memdup(he->raw_data, he->raw_size);
409
410                 if (he->raw_data == NULL) {
411                         map__put(he->ms.map);
412                         if (he->branch_info) {
413                                 map__put(he->branch_info->from.map);
414                                 map__put(he->branch_info->to.map);
415                                 free(he->branch_info);
416                         }
417                         if (he->mem_info) {
418                                 map__put(he->mem_info->iaddr.map);
419                                 map__put(he->mem_info->daddr.map);
420                         }
421                         free(he->stat_acc);
422                         return -ENOMEM;
423                 }
424         }
425         INIT_LIST_HEAD(&he->pairs.node);
426         thread__get(he->thread);
427         he->hroot_in  = RB_ROOT;
428         he->hroot_out = RB_ROOT;
429
430         if (!symbol_conf.report_hierarchy)
431                 he->leaf = true;
432
433         return 0;
434 }
435
436 static void *hist_entry__zalloc(size_t size)
437 {
438         return zalloc(size + sizeof(struct hist_entry));
439 }
440
441 static void hist_entry__free(void *ptr)
442 {
443         free(ptr);
444 }
445
446 static struct hist_entry_ops default_ops = {
447         .new    = hist_entry__zalloc,
448         .free   = hist_entry__free,
449 };
450
451 static struct hist_entry *hist_entry__new(struct hist_entry *template,
452                                           bool sample_self)
453 {
454         struct hist_entry_ops *ops = template->ops;
455         size_t callchain_size = 0;
456         struct hist_entry *he;
457         int err = 0;
458
459         if (!ops)
460                 ops = template->ops = &default_ops;
461
462         if (symbol_conf.use_callchain)
463                 callchain_size = sizeof(struct callchain_root);
464
465         he = ops->new(callchain_size);
466         if (he) {
467                 err = hist_entry__init(he, template, sample_self);
468                 if (err) {
469                         ops->free(he);
470                         he = NULL;
471                 }
472         }
473
474         return he;
475 }
476
477 static u8 symbol__parent_filter(const struct symbol *parent)
478 {
479         if (symbol_conf.exclude_other && parent == NULL)
480                 return 1 << HIST_FILTER__PARENT;
481         return 0;
482 }
483
484 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
485 {
486         if (!symbol_conf.use_callchain)
487                 return;
488
489         he->hists->callchain_period += period;
490         if (!he->filtered)
491                 he->hists->callchain_non_filtered_period += period;
492 }
493
494 static struct hist_entry *hists__findnew_entry(struct hists *hists,
495                                                struct hist_entry *entry,
496                                                struct addr_location *al,
497                                                bool sample_self)
498 {
499         struct rb_node **p;
500         struct rb_node *parent = NULL;
501         struct hist_entry *he;
502         int64_t cmp;
503         u64 period = entry->stat.period;
504         u64 weight = entry->stat.weight;
505
506         p = &hists->entries_in->rb_node;
507
508         while (*p != NULL) {
509                 parent = *p;
510                 he = rb_entry(parent, struct hist_entry, rb_node_in);
511
512                 /*
513                  * Make sure that it receives arguments in a same order as
514                  * hist_entry__collapse() so that we can use an appropriate
515                  * function when searching an entry regardless which sort
516                  * keys were used.
517                  */
518                 cmp = hist_entry__cmp(he, entry);
519
520                 if (!cmp) {
521                         if (sample_self) {
522                                 he_stat__add_period(&he->stat, period, weight);
523                                 hist_entry__add_callchain_period(he, period);
524                         }
525                         if (symbol_conf.cumulate_callchain)
526                                 he_stat__add_period(he->stat_acc, period, weight);
527
528                         /*
529                          * This mem info was allocated from sample__resolve_mem
530                          * and will not be used anymore.
531                          */
532                         zfree(&entry->mem_info);
533
534                         /* If the map of an existing hist_entry has
535                          * become out-of-date due to an exec() or
536                          * similar, update it.  Otherwise we will
537                          * mis-adjust symbol addresses when computing
538                          * the history counter to increment.
539                          */
540                         if (he->ms.map != entry->ms.map) {
541                                 map__put(he->ms.map);
542                                 he->ms.map = map__get(entry->ms.map);
543                         }
544                         goto out;
545                 }
546
547                 if (cmp < 0)
548                         p = &(*p)->rb_left;
549                 else
550                         p = &(*p)->rb_right;
551         }
552
553         he = hist_entry__new(entry, sample_self);
554         if (!he)
555                 return NULL;
556
557         if (sample_self)
558                 hist_entry__add_callchain_period(he, period);
559         hists->nr_entries++;
560
561         rb_link_node(&he->rb_node_in, parent, p);
562         rb_insert_color(&he->rb_node_in, hists->entries_in);
563 out:
564         if (sample_self)
565                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
566         if (symbol_conf.cumulate_callchain)
567                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
568         return he;
569 }
570
571 static struct hist_entry*
572 __hists__add_entry(struct hists *hists,
573                    struct addr_location *al,
574                    struct symbol *sym_parent,
575                    struct branch_info *bi,
576                    struct mem_info *mi,
577                    struct perf_sample *sample,
578                    bool sample_self,
579                    struct hist_entry_ops *ops)
580 {
581         struct namespaces *ns = thread__namespaces(al->thread);
582         struct hist_entry entry = {
583                 .thread = al->thread,
584                 .comm = thread__comm(al->thread),
585                 .cgroup_id = {
586                         .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
587                         .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
588                 },
589                 .ms = {
590                         .map    = al->map,
591                         .sym    = al->sym,
592                 },
593                 .socket  = al->socket,
594                 .cpu     = al->cpu,
595                 .cpumode = al->cpumode,
596                 .ip      = al->addr,
597                 .level   = al->level,
598                 .stat = {
599                         .nr_events = 1,
600                         .period = sample->period,
601                         .weight = sample->weight,
602                 },
603                 .parent = sym_parent,
604                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
605                 .hists  = hists,
606                 .branch_info = bi,
607                 .mem_info = mi,
608                 .transaction = sample->transaction,
609                 .raw_data = sample->raw_data,
610                 .raw_size = sample->raw_size,
611                 .ops = ops,
612         };
613
614         return hists__findnew_entry(hists, &entry, al, sample_self);
615 }
616
617 struct hist_entry *hists__add_entry(struct hists *hists,
618                                     struct addr_location *al,
619                                     struct symbol *sym_parent,
620                                     struct branch_info *bi,
621                                     struct mem_info *mi,
622                                     struct perf_sample *sample,
623                                     bool sample_self)
624 {
625         return __hists__add_entry(hists, al, sym_parent, bi, mi,
626                                   sample, sample_self, NULL);
627 }
628
629 struct hist_entry *hists__add_entry_ops(struct hists *hists,
630                                         struct hist_entry_ops *ops,
631                                         struct addr_location *al,
632                                         struct symbol *sym_parent,
633                                         struct branch_info *bi,
634                                         struct mem_info *mi,
635                                         struct perf_sample *sample,
636                                         bool sample_self)
637 {
638         return __hists__add_entry(hists, al, sym_parent, bi, mi,
639                                   sample, sample_self, ops);
640 }
641
642 static int
643 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
644                     struct addr_location *al __maybe_unused)
645 {
646         return 0;
647 }
648
649 static int
650 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
651                         struct addr_location *al __maybe_unused)
652 {
653         return 0;
654 }
655
656 static int
657 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
658 {
659         struct perf_sample *sample = iter->sample;
660         struct mem_info *mi;
661
662         mi = sample__resolve_mem(sample, al);
663         if (mi == NULL)
664                 return -ENOMEM;
665
666         iter->priv = mi;
667         return 0;
668 }
669
670 static int
671 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
672 {
673         u64 cost;
674         struct mem_info *mi = iter->priv;
675         struct hists *hists = evsel__hists(iter->evsel);
676         struct perf_sample *sample = iter->sample;
677         struct hist_entry *he;
678
679         if (mi == NULL)
680                 return -EINVAL;
681
682         cost = sample->weight;
683         if (!cost)
684                 cost = 1;
685
686         /*
687          * must pass period=weight in order to get the correct
688          * sorting from hists__collapse_resort() which is solely
689          * based on periods. We want sorting be done on nr_events * weight
690          * and this is indirectly achieved by passing period=weight here
691          * and the he_stat__add_period() function.
692          */
693         sample->period = cost;
694
695         he = hists__add_entry(hists, al, iter->parent, NULL, mi,
696                               sample, true);
697         if (!he)
698                 return -ENOMEM;
699
700         iter->he = he;
701         return 0;
702 }
703
704 static int
705 iter_finish_mem_entry(struct hist_entry_iter *iter,
706                       struct addr_location *al __maybe_unused)
707 {
708         struct perf_evsel *evsel = iter->evsel;
709         struct hists *hists = evsel__hists(evsel);
710         struct hist_entry *he = iter->he;
711         int err = -EINVAL;
712
713         if (he == NULL)
714                 goto out;
715
716         hists__inc_nr_samples(hists, he->filtered);
717
718         err = hist_entry__append_callchain(he, iter->sample);
719
720 out:
721         /*
722          * We don't need to free iter->priv (mem_info) here since the mem info
723          * was either already freed in hists__findnew_entry() or passed to a
724          * new hist entry by hist_entry__new().
725          */
726         iter->priv = NULL;
727
728         iter->he = NULL;
729         return err;
730 }
731
732 static int
733 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
734 {
735         struct branch_info *bi;
736         struct perf_sample *sample = iter->sample;
737
738         bi = sample__resolve_bstack(sample, al);
739         if (!bi)
740                 return -ENOMEM;
741
742         iter->curr = 0;
743         iter->total = sample->branch_stack->nr;
744
745         iter->priv = bi;
746         return 0;
747 }
748
749 static int
750 iter_add_single_branch_entry(struct hist_entry_iter *iter,
751                              struct addr_location *al __maybe_unused)
752 {
753         /* to avoid calling callback function */
754         iter->he = NULL;
755
756         return 0;
757 }
758
759 static int
760 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
761 {
762         struct branch_info *bi = iter->priv;
763         int i = iter->curr;
764
765         if (bi == NULL)
766                 return 0;
767
768         if (iter->curr >= iter->total)
769                 return 0;
770
771         al->map = bi[i].to.map;
772         al->sym = bi[i].to.sym;
773         al->addr = bi[i].to.addr;
774         return 1;
775 }
776
777 static int
778 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
779 {
780         struct branch_info *bi;
781         struct perf_evsel *evsel = iter->evsel;
782         struct hists *hists = evsel__hists(evsel);
783         struct perf_sample *sample = iter->sample;
784         struct hist_entry *he = NULL;
785         int i = iter->curr;
786         int err = 0;
787
788         bi = iter->priv;
789
790         if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
791                 goto out;
792
793         /*
794          * The report shows the percentage of total branches captured
795          * and not events sampled. Thus we use a pseudo period of 1.
796          */
797         sample->period = 1;
798         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
799
800         he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
801                               sample, true);
802         if (he == NULL)
803                 return -ENOMEM;
804
805         hists__inc_nr_samples(hists, he->filtered);
806
807 out:
808         iter->he = he;
809         iter->curr++;
810         return err;
811 }
812
813 static int
814 iter_finish_branch_entry(struct hist_entry_iter *iter,
815                          struct addr_location *al __maybe_unused)
816 {
817         zfree(&iter->priv);
818         iter->he = NULL;
819
820         return iter->curr >= iter->total ? 0 : -1;
821 }
822
823 static int
824 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
825                           struct addr_location *al __maybe_unused)
826 {
827         return 0;
828 }
829
830 static int
831 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
832 {
833         struct perf_evsel *evsel = iter->evsel;
834         struct perf_sample *sample = iter->sample;
835         struct hist_entry *he;
836
837         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
838                               sample, true);
839         if (he == NULL)
840                 return -ENOMEM;
841
842         iter->he = he;
843         return 0;
844 }
845
846 static int
847 iter_finish_normal_entry(struct hist_entry_iter *iter,
848                          struct addr_location *al __maybe_unused)
849 {
850         struct hist_entry *he = iter->he;
851         struct perf_evsel *evsel = iter->evsel;
852         struct perf_sample *sample = iter->sample;
853
854         if (he == NULL)
855                 return 0;
856
857         iter->he = NULL;
858
859         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
860
861         return hist_entry__append_callchain(he, sample);
862 }
863
864 static int
865 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
866                               struct addr_location *al __maybe_unused)
867 {
868         struct hist_entry **he_cache;
869
870         callchain_cursor_commit(&callchain_cursor);
871
872         /*
873          * This is for detecting cycles or recursions so that they're
874          * cumulated only one time to prevent entries more than 100%
875          * overhead.
876          */
877         he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
878         if (he_cache == NULL)
879                 return -ENOMEM;
880
881         iter->priv = he_cache;
882         iter->curr = 0;
883
884         return 0;
885 }
886
887 static int
888 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
889                                  struct addr_location *al)
890 {
891         struct perf_evsel *evsel = iter->evsel;
892         struct hists *hists = evsel__hists(evsel);
893         struct perf_sample *sample = iter->sample;
894         struct hist_entry **he_cache = iter->priv;
895         struct hist_entry *he;
896         int err = 0;
897
898         he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
899                               sample, true);
900         if (he == NULL)
901                 return -ENOMEM;
902
903         iter->he = he;
904         he_cache[iter->curr++] = he;
905
906         hist_entry__append_callchain(he, sample);
907
908         /*
909          * We need to re-initialize the cursor since callchain_append()
910          * advanced the cursor to the end.
911          */
912         callchain_cursor_commit(&callchain_cursor);
913
914         hists__inc_nr_samples(hists, he->filtered);
915
916         return err;
917 }
918
919 static int
920 iter_next_cumulative_entry(struct hist_entry_iter *iter,
921                            struct addr_location *al)
922 {
923         struct callchain_cursor_node *node;
924
925         node = callchain_cursor_current(&callchain_cursor);
926         if (node == NULL)
927                 return 0;
928
929         return fill_callchain_info(al, node, iter->hide_unresolved);
930 }
931
932 static int
933 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
934                                struct addr_location *al)
935 {
936         struct perf_evsel *evsel = iter->evsel;
937         struct perf_sample *sample = iter->sample;
938         struct hist_entry **he_cache = iter->priv;
939         struct hist_entry *he;
940         struct hist_entry he_tmp = {
941                 .hists = evsel__hists(evsel),
942                 .cpu = al->cpu,
943                 .thread = al->thread,
944                 .comm = thread__comm(al->thread),
945                 .ip = al->addr,
946                 .ms = {
947                         .map = al->map,
948                         .sym = al->sym,
949                 },
950                 .parent = iter->parent,
951                 .raw_data = sample->raw_data,
952                 .raw_size = sample->raw_size,
953         };
954         int i;
955         struct callchain_cursor cursor;
956
957         callchain_cursor_snapshot(&cursor, &callchain_cursor);
958
959         callchain_cursor_advance(&callchain_cursor);
960
961         /*
962          * Check if there's duplicate entries in the callchain.
963          * It's possible that it has cycles or recursive calls.
964          */
965         for (i = 0; i < iter->curr; i++) {
966                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
967                         /* to avoid calling callback function */
968                         iter->he = NULL;
969                         return 0;
970                 }
971         }
972
973         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
974                               sample, false);
975         if (he == NULL)
976                 return -ENOMEM;
977
978         iter->he = he;
979         he_cache[iter->curr++] = he;
980
981         if (symbol_conf.use_callchain)
982                 callchain_append(he->callchain, &cursor, sample->period);
983         return 0;
984 }
985
986 static int
987 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
988                              struct addr_location *al __maybe_unused)
989 {
990         zfree(&iter->priv);
991         iter->he = NULL;
992
993         return 0;
994 }
995
996 const struct hist_iter_ops hist_iter_mem = {
997         .prepare_entry          = iter_prepare_mem_entry,
998         .add_single_entry       = iter_add_single_mem_entry,
999         .next_entry             = iter_next_nop_entry,
1000         .add_next_entry         = iter_add_next_nop_entry,
1001         .finish_entry           = iter_finish_mem_entry,
1002 };
1003
1004 const struct hist_iter_ops hist_iter_branch = {
1005         .prepare_entry          = iter_prepare_branch_entry,
1006         .add_single_entry       = iter_add_single_branch_entry,
1007         .next_entry             = iter_next_branch_entry,
1008         .add_next_entry         = iter_add_next_branch_entry,
1009         .finish_entry           = iter_finish_branch_entry,
1010 };
1011
1012 const struct hist_iter_ops hist_iter_normal = {
1013         .prepare_entry          = iter_prepare_normal_entry,
1014         .add_single_entry       = iter_add_single_normal_entry,
1015         .next_entry             = iter_next_nop_entry,
1016         .add_next_entry         = iter_add_next_nop_entry,
1017         .finish_entry           = iter_finish_normal_entry,
1018 };
1019
1020 const struct hist_iter_ops hist_iter_cumulative = {
1021         .prepare_entry          = iter_prepare_cumulative_entry,
1022         .add_single_entry       = iter_add_single_cumulative_entry,
1023         .next_entry             = iter_next_cumulative_entry,
1024         .add_next_entry         = iter_add_next_cumulative_entry,
1025         .finish_entry           = iter_finish_cumulative_entry,
1026 };
1027
1028 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1029                          int max_stack_depth, void *arg)
1030 {
1031         int err, err2;
1032         struct map *alm = NULL;
1033
1034         if (al && al->map)
1035                 alm = map__get(al->map);
1036
1037         err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1038                                         iter->evsel, al, max_stack_depth);
1039         if (err)
1040                 return err;
1041
1042         iter->max_stack = max_stack_depth;
1043
1044         err = iter->ops->prepare_entry(iter, al);
1045         if (err)
1046                 goto out;
1047
1048         err = iter->ops->add_single_entry(iter, al);
1049         if (err)
1050                 goto out;
1051
1052         if (iter->he && iter->add_entry_cb) {
1053                 err = iter->add_entry_cb(iter, al, true, arg);
1054                 if (err)
1055                         goto out;
1056         }
1057
1058         while (iter->ops->next_entry(iter, al)) {
1059                 err = iter->ops->add_next_entry(iter, al);
1060                 if (err)
1061                         break;
1062
1063                 if (iter->he && iter->add_entry_cb) {
1064                         err = iter->add_entry_cb(iter, al, false, arg);
1065                         if (err)
1066                                 goto out;
1067                 }
1068         }
1069
1070 out:
1071         err2 = iter->ops->finish_entry(iter, al);
1072         if (!err)
1073                 err = err2;
1074
1075         map__put(alm);
1076
1077         return err;
1078 }
1079
1080 int64_t
1081 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1082 {
1083         struct hists *hists = left->hists;
1084         struct perf_hpp_fmt *fmt;
1085         int64_t cmp = 0;
1086
1087         hists__for_each_sort_list(hists, fmt) {
1088                 if (perf_hpp__is_dynamic_entry(fmt) &&
1089                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1090                         continue;
1091
1092                 cmp = fmt->cmp(fmt, left, right);
1093                 if (cmp)
1094                         break;
1095         }
1096
1097         return cmp;
1098 }
1099
1100 int64_t
1101 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1102 {
1103         struct hists *hists = left->hists;
1104         struct perf_hpp_fmt *fmt;
1105         int64_t cmp = 0;
1106
1107         hists__for_each_sort_list(hists, fmt) {
1108                 if (perf_hpp__is_dynamic_entry(fmt) &&
1109                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1110                         continue;
1111
1112                 cmp = fmt->collapse(fmt, left, right);
1113                 if (cmp)
1114                         break;
1115         }
1116
1117         return cmp;
1118 }
1119
1120 void hist_entry__delete(struct hist_entry *he)
1121 {
1122         struct hist_entry_ops *ops = he->ops;
1123
1124         thread__zput(he->thread);
1125         map__zput(he->ms.map);
1126
1127         if (he->branch_info) {
1128                 map__zput(he->branch_info->from.map);
1129                 map__zput(he->branch_info->to.map);
1130                 free_srcline(he->branch_info->srcline_from);
1131                 free_srcline(he->branch_info->srcline_to);
1132                 zfree(&he->branch_info);
1133         }
1134
1135         if (he->mem_info) {
1136                 map__zput(he->mem_info->iaddr.map);
1137                 map__zput(he->mem_info->daddr.map);
1138                 zfree(&he->mem_info);
1139         }
1140
1141         if (he->inline_node) {
1142                 inline_node__delete(he->inline_node);
1143                 he->inline_node = NULL;
1144         }
1145
1146         zfree(&he->stat_acc);
1147         free_srcline(he->srcline);
1148         if (he->srcfile && he->srcfile[0])
1149                 free(he->srcfile);
1150         free_callchain(he->callchain);
1151         free(he->trace_output);
1152         free(he->raw_data);
1153         ops->free(he);
1154 }
1155
1156 /*
1157  * If this is not the last column, then we need to pad it according to the
1158  * pre-calculated max lenght for this column, otherwise don't bother adding
1159  * spaces because that would break viewing this with, for instance, 'less',
1160  * that would show tons of trailing spaces when a long C++ demangled method
1161  * names is sampled.
1162 */
1163 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1164                                    struct perf_hpp_fmt *fmt, int printed)
1165 {
1166         if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1167                 const int width = fmt->width(fmt, hpp, he->hists);
1168                 if (printed < width) {
1169                         advance_hpp(hpp, printed);
1170                         printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1171                 }
1172         }
1173
1174         return printed;
1175 }
1176
1177 /*
1178  * collapse the histogram
1179  */
1180
1181 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1182 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1183                                        enum hist_filter type);
1184
1185 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1186
1187 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1188 {
1189         return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1190 }
1191
1192 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1193                                                 enum hist_filter type,
1194                                                 fmt_chk_fn check)
1195 {
1196         struct perf_hpp_fmt *fmt;
1197         bool type_match = false;
1198         struct hist_entry *parent = he->parent_he;
1199
1200         switch (type) {
1201         case HIST_FILTER__THREAD:
1202                 if (symbol_conf.comm_list == NULL &&
1203                     symbol_conf.pid_list == NULL &&
1204                     symbol_conf.tid_list == NULL)
1205                         return;
1206                 break;
1207         case HIST_FILTER__DSO:
1208                 if (symbol_conf.dso_list == NULL)
1209                         return;
1210                 break;
1211         case HIST_FILTER__SYMBOL:
1212                 if (symbol_conf.sym_list == NULL)
1213                         return;
1214                 break;
1215         case HIST_FILTER__PARENT:
1216         case HIST_FILTER__GUEST:
1217         case HIST_FILTER__HOST:
1218         case HIST_FILTER__SOCKET:
1219         case HIST_FILTER__C2C:
1220         default:
1221                 return;
1222         }
1223
1224         /* if it's filtered by own fmt, it has to have filter bits */
1225         perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1226                 if (check(fmt)) {
1227                         type_match = true;
1228                         break;
1229                 }
1230         }
1231
1232         if (type_match) {
1233                 /*
1234                  * If the filter is for current level entry, propagate
1235                  * filter marker to parents.  The marker bit was
1236                  * already set by default so it only needs to clear
1237                  * non-filtered entries.
1238                  */
1239                 if (!(he->filtered & (1 << type))) {
1240                         while (parent) {
1241                                 parent->filtered &= ~(1 << type);
1242                                 parent = parent->parent_he;
1243                         }
1244                 }
1245         } else {
1246                 /*
1247                  * If current entry doesn't have matching formats, set
1248                  * filter marker for upper level entries.  it will be
1249                  * cleared if its lower level entries is not filtered.
1250                  *
1251                  * For lower-level entries, it inherits parent's
1252                  * filter bit so that lower level entries of a
1253                  * non-filtered entry won't set the filter marker.
1254                  */
1255                 if (parent == NULL)
1256                         he->filtered |= (1 << type);
1257                 else
1258                         he->filtered |= (parent->filtered & (1 << type));
1259         }
1260 }
1261
1262 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1263 {
1264         hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1265                                             check_thread_entry);
1266
1267         hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1268                                             perf_hpp__is_dso_entry);
1269
1270         hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1271                                             perf_hpp__is_sym_entry);
1272
1273         hists__apply_filters(he->hists, he);
1274 }
1275
1276 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1277                                                  struct rb_root *root,
1278                                                  struct hist_entry *he,
1279                                                  struct hist_entry *parent_he,
1280                                                  struct perf_hpp_list *hpp_list)
1281 {
1282         struct rb_node **p = &root->rb_node;
1283         struct rb_node *parent = NULL;
1284         struct hist_entry *iter, *new;
1285         struct perf_hpp_fmt *fmt;
1286         int64_t cmp;
1287
1288         while (*p != NULL) {
1289                 parent = *p;
1290                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1291
1292                 cmp = 0;
1293                 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1294                         cmp = fmt->collapse(fmt, iter, he);
1295                         if (cmp)
1296                                 break;
1297                 }
1298
1299                 if (!cmp) {
1300                         he_stat__add_stat(&iter->stat, &he->stat);
1301                         return iter;
1302                 }
1303
1304                 if (cmp < 0)
1305                         p = &parent->rb_left;
1306                 else
1307                         p = &parent->rb_right;
1308         }
1309
1310         new = hist_entry__new(he, true);
1311         if (new == NULL)
1312                 return NULL;
1313
1314         hists->nr_entries++;
1315
1316         /* save related format list for output */
1317         new->hpp_list = hpp_list;
1318         new->parent_he = parent_he;
1319
1320         hist_entry__apply_hierarchy_filters(new);
1321
1322         /* some fields are now passed to 'new' */
1323         perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1324                 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1325                         he->trace_output = NULL;
1326                 else
1327                         new->trace_output = NULL;
1328
1329                 if (perf_hpp__is_srcline_entry(fmt))
1330                         he->srcline = NULL;
1331                 else
1332                         new->srcline = NULL;
1333
1334                 if (perf_hpp__is_srcfile_entry(fmt))
1335                         he->srcfile = NULL;
1336                 else
1337                         new->srcfile = NULL;
1338         }
1339
1340         rb_link_node(&new->rb_node_in, parent, p);
1341         rb_insert_color(&new->rb_node_in, root);
1342         return new;
1343 }
1344
1345 static int hists__hierarchy_insert_entry(struct hists *hists,
1346                                          struct rb_root *root,
1347                                          struct hist_entry *he)
1348 {
1349         struct perf_hpp_list_node *node;
1350         struct hist_entry *new_he = NULL;
1351         struct hist_entry *parent = NULL;
1352         int depth = 0;
1353         int ret = 0;
1354
1355         list_for_each_entry(node, &hists->hpp_formats, list) {
1356                 /* skip period (overhead) and elided columns */
1357                 if (node->level == 0 || node->skip)
1358                         continue;
1359
1360                 /* insert copy of 'he' for each fmt into the hierarchy */
1361                 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1362                 if (new_he == NULL) {
1363                         ret = -1;
1364                         break;
1365                 }
1366
1367                 root = &new_he->hroot_in;
1368                 new_he->depth = depth++;
1369                 parent = new_he;
1370         }
1371
1372         if (new_he) {
1373                 new_he->leaf = true;
1374
1375                 if (symbol_conf.use_callchain) {
1376                         callchain_cursor_reset(&callchain_cursor);
1377                         if (callchain_merge(&callchain_cursor,
1378                                             new_he->callchain,
1379                                             he->callchain) < 0)
1380                                 ret = -1;
1381                 }
1382         }
1383
1384         /* 'he' is no longer used */
1385         hist_entry__delete(he);
1386
1387         /* return 0 (or -1) since it already applied filters */
1388         return ret;
1389 }
1390
1391 static int hists__collapse_insert_entry(struct hists *hists,
1392                                         struct rb_root *root,
1393                                         struct hist_entry *he)
1394 {
1395         struct rb_node **p = &root->rb_node;
1396         struct rb_node *parent = NULL;
1397         struct hist_entry *iter;
1398         int64_t cmp;
1399
1400         if (symbol_conf.report_hierarchy)
1401                 return hists__hierarchy_insert_entry(hists, root, he);
1402
1403         while (*p != NULL) {
1404                 parent = *p;
1405                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1406
1407                 cmp = hist_entry__collapse(iter, he);
1408
1409                 if (!cmp) {
1410                         int ret = 0;
1411
1412                         he_stat__add_stat(&iter->stat, &he->stat);
1413                         if (symbol_conf.cumulate_callchain)
1414                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1415
1416                         if (symbol_conf.use_callchain) {
1417                                 callchain_cursor_reset(&callchain_cursor);
1418                                 if (callchain_merge(&callchain_cursor,
1419                                                     iter->callchain,
1420                                                     he->callchain) < 0)
1421                                         ret = -1;
1422                         }
1423                         hist_entry__delete(he);
1424                         return ret;
1425                 }
1426
1427                 if (cmp < 0)
1428                         p = &(*p)->rb_left;
1429                 else
1430                         p = &(*p)->rb_right;
1431         }
1432         hists->nr_entries++;
1433
1434         rb_link_node(&he->rb_node_in, parent, p);
1435         rb_insert_color(&he->rb_node_in, root);
1436         return 1;
1437 }
1438
1439 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1440 {
1441         struct rb_root *root;
1442
1443         pthread_mutex_lock(&hists->lock);
1444
1445         root = hists->entries_in;
1446         if (++hists->entries_in > &hists->entries_in_array[1])
1447                 hists->entries_in = &hists->entries_in_array[0];
1448
1449         pthread_mutex_unlock(&hists->lock);
1450
1451         return root;
1452 }
1453
1454 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1455 {
1456         hists__filter_entry_by_dso(hists, he);
1457         hists__filter_entry_by_thread(hists, he);
1458         hists__filter_entry_by_symbol(hists, he);
1459         hists__filter_entry_by_socket(hists, he);
1460 }
1461
1462 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1463 {
1464         struct rb_root *root;
1465         struct rb_node *next;
1466         struct hist_entry *n;
1467         int ret;
1468
1469         if (!hists__has(hists, need_collapse))
1470                 return 0;
1471
1472         hists->nr_entries = 0;
1473
1474         root = hists__get_rotate_entries_in(hists);
1475
1476         next = rb_first(root);
1477
1478         while (next) {
1479                 if (session_done())
1480                         break;
1481                 n = rb_entry(next, struct hist_entry, rb_node_in);
1482                 next = rb_next(&n->rb_node_in);
1483
1484                 rb_erase(&n->rb_node_in, root);
1485                 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1486                 if (ret < 0)
1487                         return -1;
1488
1489                 if (ret) {
1490                         /*
1491                          * If it wasn't combined with one of the entries already
1492                          * collapsed, we need to apply the filters that may have
1493                          * been set by, say, the hist_browser.
1494                          */
1495                         hists__apply_filters(hists, n);
1496                 }
1497                 if (prog)
1498                         ui_progress__update(prog, 1);
1499         }
1500         return 0;
1501 }
1502
1503 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1504 {
1505         struct hists *hists = a->hists;
1506         struct perf_hpp_fmt *fmt;
1507         int64_t cmp = 0;
1508
1509         hists__for_each_sort_list(hists, fmt) {
1510                 if (perf_hpp__should_skip(fmt, a->hists))
1511                         continue;
1512
1513                 cmp = fmt->sort(fmt, a, b);
1514                 if (cmp)
1515                         break;
1516         }
1517
1518         return cmp;
1519 }
1520
1521 static void hists__reset_filter_stats(struct hists *hists)
1522 {
1523         hists->nr_non_filtered_entries = 0;
1524         hists->stats.total_non_filtered_period = 0;
1525 }
1526
1527 void hists__reset_stats(struct hists *hists)
1528 {
1529         hists->nr_entries = 0;
1530         hists->stats.total_period = 0;
1531
1532         hists__reset_filter_stats(hists);
1533 }
1534
1535 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1536 {
1537         hists->nr_non_filtered_entries++;
1538         hists->stats.total_non_filtered_period += h->stat.period;
1539 }
1540
1541 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1542 {
1543         if (!h->filtered)
1544                 hists__inc_filter_stats(hists, h);
1545
1546         hists->nr_entries++;
1547         hists->stats.total_period += h->stat.period;
1548 }
1549
1550 static void hierarchy_recalc_total_periods(struct hists *hists)
1551 {
1552         struct rb_node *node;
1553         struct hist_entry *he;
1554
1555         node = rb_first(&hists->entries);
1556
1557         hists->stats.total_period = 0;
1558         hists->stats.total_non_filtered_period = 0;
1559
1560         /*
1561          * recalculate total period using top-level entries only
1562          * since lower level entries only see non-filtered entries
1563          * but upper level entries have sum of both entries.
1564          */
1565         while (node) {
1566                 he = rb_entry(node, struct hist_entry, rb_node);
1567                 node = rb_next(node);
1568
1569                 hists->stats.total_period += he->stat.period;
1570                 if (!he->filtered)
1571                         hists->stats.total_non_filtered_period += he->stat.period;
1572         }
1573 }
1574
1575 static void hierarchy_insert_output_entry(struct rb_root *root,
1576                                           struct hist_entry *he)
1577 {
1578         struct rb_node **p = &root->rb_node;
1579         struct rb_node *parent = NULL;
1580         struct hist_entry *iter;
1581         struct perf_hpp_fmt *fmt;
1582
1583         while (*p != NULL) {
1584                 parent = *p;
1585                 iter = rb_entry(parent, struct hist_entry, rb_node);
1586
1587                 if (hist_entry__sort(he, iter) > 0)
1588                         p = &parent->rb_left;
1589                 else
1590                         p = &parent->rb_right;
1591         }
1592
1593         rb_link_node(&he->rb_node, parent, p);
1594         rb_insert_color(&he->rb_node, root);
1595
1596         /* update column width of dynamic entry */
1597         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1598                 if (perf_hpp__is_dynamic_entry(fmt))
1599                         fmt->sort(fmt, he, NULL);
1600         }
1601 }
1602
1603 static void hists__hierarchy_output_resort(struct hists *hists,
1604                                            struct ui_progress *prog,
1605                                            struct rb_root *root_in,
1606                                            struct rb_root *root_out,
1607                                            u64 min_callchain_hits,
1608                                            bool use_callchain)
1609 {
1610         struct rb_node *node;
1611         struct hist_entry *he;
1612
1613         *root_out = RB_ROOT;
1614         node = rb_first(root_in);
1615
1616         while (node) {
1617                 he = rb_entry(node, struct hist_entry, rb_node_in);
1618                 node = rb_next(node);
1619
1620                 hierarchy_insert_output_entry(root_out, he);
1621
1622                 if (prog)
1623                         ui_progress__update(prog, 1);
1624
1625                 hists->nr_entries++;
1626                 if (!he->filtered) {
1627                         hists->nr_non_filtered_entries++;
1628                         hists__calc_col_len(hists, he);
1629                 }
1630
1631                 if (!he->leaf) {
1632                         hists__hierarchy_output_resort(hists, prog,
1633                                                        &he->hroot_in,
1634                                                        &he->hroot_out,
1635                                                        min_callchain_hits,
1636                                                        use_callchain);
1637                         continue;
1638                 }
1639
1640                 if (!use_callchain)
1641                         continue;
1642
1643                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1644                         u64 total = he->stat.period;
1645
1646                         if (symbol_conf.cumulate_callchain)
1647                                 total = he->stat_acc->period;
1648
1649                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1650                 }
1651
1652                 callchain_param.sort(&he->sorted_chain, he->callchain,
1653                                      min_callchain_hits, &callchain_param);
1654         }
1655 }
1656
1657 static void __hists__insert_output_entry(struct rb_root *entries,
1658                                          struct hist_entry *he,
1659                                          u64 min_callchain_hits,
1660                                          bool use_callchain)
1661 {
1662         struct rb_node **p = &entries->rb_node;
1663         struct rb_node *parent = NULL;
1664         struct hist_entry *iter;
1665         struct perf_hpp_fmt *fmt;
1666
1667         if (use_callchain) {
1668                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1669                         u64 total = he->stat.period;
1670
1671                         if (symbol_conf.cumulate_callchain)
1672                                 total = he->stat_acc->period;
1673
1674                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1675                 }
1676                 callchain_param.sort(&he->sorted_chain, he->callchain,
1677                                       min_callchain_hits, &callchain_param);
1678         }
1679
1680         while (*p != NULL) {
1681                 parent = *p;
1682                 iter = rb_entry(parent, struct hist_entry, rb_node);
1683
1684                 if (hist_entry__sort(he, iter) > 0)
1685                         p = &(*p)->rb_left;
1686                 else
1687                         p = &(*p)->rb_right;
1688         }
1689
1690         rb_link_node(&he->rb_node, parent, p);
1691         rb_insert_color(&he->rb_node, entries);
1692
1693         perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1694                 if (perf_hpp__is_dynamic_entry(fmt) &&
1695                     perf_hpp__defined_dynamic_entry(fmt, he->hists))
1696                         fmt->sort(fmt, he, NULL);  /* update column width */
1697         }
1698 }
1699
1700 static void output_resort(struct hists *hists, struct ui_progress *prog,
1701                           bool use_callchain, hists__resort_cb_t cb)
1702 {
1703         struct rb_root *root;
1704         struct rb_node *next;
1705         struct hist_entry *n;
1706         u64 callchain_total;
1707         u64 min_callchain_hits;
1708
1709         callchain_total = hists->callchain_period;
1710         if (symbol_conf.filter_relative)
1711                 callchain_total = hists->callchain_non_filtered_period;
1712
1713         min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1714
1715         hists__reset_stats(hists);
1716         hists__reset_col_len(hists);
1717
1718         if (symbol_conf.report_hierarchy) {
1719                 hists__hierarchy_output_resort(hists, prog,
1720                                                &hists->entries_collapsed,
1721                                                &hists->entries,
1722                                                min_callchain_hits,
1723                                                use_callchain);
1724                 hierarchy_recalc_total_periods(hists);
1725                 return;
1726         }
1727
1728         if (hists__has(hists, need_collapse))
1729                 root = &hists->entries_collapsed;
1730         else
1731                 root = hists->entries_in;
1732
1733         next = rb_first(root);
1734         hists->entries = RB_ROOT;
1735
1736         while (next) {
1737                 n = rb_entry(next, struct hist_entry, rb_node_in);
1738                 next = rb_next(&n->rb_node_in);
1739
1740                 if (cb && cb(n))
1741                         continue;
1742
1743                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1744                 hists__inc_stats(hists, n);
1745
1746                 if (!n->filtered)
1747                         hists__calc_col_len(hists, n);
1748
1749                 if (prog)
1750                         ui_progress__update(prog, 1);
1751         }
1752 }
1753
1754 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1755 {
1756         bool use_callchain;
1757
1758         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1759                 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1760         else
1761                 use_callchain = symbol_conf.use_callchain;
1762
1763         output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
1764 }
1765
1766 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1767 {
1768         output_resort(hists, prog, symbol_conf.use_callchain, NULL);
1769 }
1770
1771 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1772                              hists__resort_cb_t cb)
1773 {
1774         output_resort(hists, prog, symbol_conf.use_callchain, cb);
1775 }
1776
1777 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1778 {
1779         if (he->leaf || hmd == HMD_FORCE_SIBLING)
1780                 return false;
1781
1782         if (he->unfolded || hmd == HMD_FORCE_CHILD)
1783                 return true;
1784
1785         return false;
1786 }
1787
1788 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1789 {
1790         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1791
1792         while (can_goto_child(he, HMD_NORMAL)) {
1793                 node = rb_last(&he->hroot_out);
1794                 he = rb_entry(node, struct hist_entry, rb_node);
1795         }
1796         return node;
1797 }
1798
1799 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1800 {
1801         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1802
1803         if (can_goto_child(he, hmd))
1804                 node = rb_first(&he->hroot_out);
1805         else
1806                 node = rb_next(node);
1807
1808         while (node == NULL) {
1809                 he = he->parent_he;
1810                 if (he == NULL)
1811                         break;
1812
1813                 node = rb_next(&he->rb_node);
1814         }
1815         return node;
1816 }
1817
1818 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1819 {
1820         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1821
1822         node = rb_prev(node);
1823         if (node)
1824                 return rb_hierarchy_last(node);
1825
1826         he = he->parent_he;
1827         if (he == NULL)
1828                 return NULL;
1829
1830         return &he->rb_node;
1831 }
1832
1833 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1834 {
1835         struct rb_node *node;
1836         struct hist_entry *child;
1837         float percent;
1838
1839         if (he->leaf)
1840                 return false;
1841
1842         node = rb_first(&he->hroot_out);
1843         child = rb_entry(node, struct hist_entry, rb_node);
1844
1845         while (node && child->filtered) {
1846                 node = rb_next(node);
1847                 child = rb_entry(node, struct hist_entry, rb_node);
1848         }
1849
1850         if (node)
1851                 percent = hist_entry__get_percent_limit(child);
1852         else
1853                 percent = 0;
1854
1855         return node && percent >= limit;
1856 }
1857
1858 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1859                                        enum hist_filter filter)
1860 {
1861         h->filtered &= ~(1 << filter);
1862
1863         if (symbol_conf.report_hierarchy) {
1864                 struct hist_entry *parent = h->parent_he;
1865
1866                 while (parent) {
1867                         he_stat__add_stat(&parent->stat, &h->stat);
1868
1869                         parent->filtered &= ~(1 << filter);
1870
1871                         if (parent->filtered)
1872                                 goto next;
1873
1874                         /* force fold unfiltered entry for simplicity */
1875                         parent->unfolded = false;
1876                         parent->has_no_entry = false;
1877                         parent->row_offset = 0;
1878                         parent->nr_rows = 0;
1879 next:
1880                         parent = parent->parent_he;
1881                 }
1882         }
1883
1884         if (h->filtered)
1885                 return;
1886
1887         /* force fold unfiltered entry for simplicity */
1888         h->unfolded = false;
1889         h->has_no_entry = false;
1890         h->row_offset = 0;
1891         h->nr_rows = 0;
1892
1893         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1894
1895         hists__inc_filter_stats(hists, h);
1896         hists__calc_col_len(hists, h);
1897 }
1898
1899
1900 static bool hists__filter_entry_by_dso(struct hists *hists,
1901                                        struct hist_entry *he)
1902 {
1903         if (hists->dso_filter != NULL &&
1904             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1905                 he->filtered |= (1 << HIST_FILTER__DSO);
1906                 return true;
1907         }
1908
1909         return false;
1910 }
1911
1912 static bool hists__filter_entry_by_thread(struct hists *hists,
1913                                           struct hist_entry *he)
1914 {
1915         if (hists->thread_filter != NULL &&
1916             he->thread != hists->thread_filter) {
1917                 he->filtered |= (1 << HIST_FILTER__THREAD);
1918                 return true;
1919         }
1920
1921         return false;
1922 }
1923
1924 static bool hists__filter_entry_by_symbol(struct hists *hists,
1925                                           struct hist_entry *he)
1926 {
1927         if (hists->symbol_filter_str != NULL &&
1928             (!he->ms.sym || strstr(he->ms.sym->name,
1929                                    hists->symbol_filter_str) == NULL)) {
1930                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1931                 return true;
1932         }
1933
1934         return false;
1935 }
1936
1937 static bool hists__filter_entry_by_socket(struct hists *hists,
1938                                           struct hist_entry *he)
1939 {
1940         if ((hists->socket_filter > -1) &&
1941             (he->socket != hists->socket_filter)) {
1942                 he->filtered |= (1 << HIST_FILTER__SOCKET);
1943                 return true;
1944         }
1945
1946         return false;
1947 }
1948
1949 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1950
1951 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1952 {
1953         struct rb_node *nd;
1954
1955         hists->stats.nr_non_filtered_samples = 0;
1956
1957         hists__reset_filter_stats(hists);
1958         hists__reset_col_len(hists);
1959
1960         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1961                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1962
1963                 if (filter(hists, h))
1964                         continue;
1965
1966                 hists__remove_entry_filter(hists, h, type);
1967         }
1968 }
1969
1970 static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
1971 {
1972         struct rb_node **p = &root->rb_node;
1973         struct rb_node *parent = NULL;
1974         struct hist_entry *iter;
1975         struct rb_root new_root = RB_ROOT;
1976         struct rb_node *nd;
1977
1978         while (*p != NULL) {
1979                 parent = *p;
1980                 iter = rb_entry(parent, struct hist_entry, rb_node);
1981
1982                 if (hist_entry__sort(he, iter) > 0)
1983                         p = &(*p)->rb_left;
1984                 else
1985                         p = &(*p)->rb_right;
1986         }
1987
1988         rb_link_node(&he->rb_node, parent, p);
1989         rb_insert_color(&he->rb_node, root);
1990
1991         if (he->leaf || he->filtered)
1992                 return;
1993
1994         nd = rb_first(&he->hroot_out);
1995         while (nd) {
1996                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1997
1998                 nd = rb_next(nd);
1999                 rb_erase(&h->rb_node, &he->hroot_out);
2000
2001                 resort_filtered_entry(&new_root, h);
2002         }
2003
2004         he->hroot_out = new_root;
2005 }
2006
2007 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2008 {
2009         struct rb_node *nd;
2010         struct rb_root new_root = RB_ROOT;
2011
2012         hists->stats.nr_non_filtered_samples = 0;
2013
2014         hists__reset_filter_stats(hists);
2015         hists__reset_col_len(hists);
2016
2017         nd = rb_first(&hists->entries);
2018         while (nd) {
2019                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2020                 int ret;
2021
2022                 ret = hist_entry__filter(h, type, arg);
2023
2024                 /*
2025                  * case 1. non-matching type
2026                  * zero out the period, set filter marker and move to child
2027                  */
2028                 if (ret < 0) {
2029                         memset(&h->stat, 0, sizeof(h->stat));
2030                         h->filtered |= (1 << type);
2031
2032                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2033                 }
2034                 /*
2035                  * case 2. matched type (filter out)
2036                  * set filter marker and move to next
2037                  */
2038                 else if (ret == 1) {
2039                         h->filtered |= (1 << type);
2040
2041                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2042                 }
2043                 /*
2044                  * case 3. ok (not filtered)
2045                  * add period to hists and parents, erase the filter marker
2046                  * and move to next sibling
2047                  */
2048                 else {
2049                         hists__remove_entry_filter(hists, h, type);
2050
2051                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2052                 }
2053         }
2054
2055         hierarchy_recalc_total_periods(hists);
2056
2057         /*
2058          * resort output after applying a new filter since filter in a lower
2059          * hierarchy can change periods in a upper hierarchy.
2060          */
2061         nd = rb_first(&hists->entries);
2062         while (nd) {
2063                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2064
2065                 nd = rb_next(nd);
2066                 rb_erase(&h->rb_node, &hists->entries);
2067
2068                 resort_filtered_entry(&new_root, h);
2069         }
2070
2071         hists->entries = new_root;
2072 }
2073
2074 void hists__filter_by_thread(struct hists *hists)
2075 {
2076         if (symbol_conf.report_hierarchy)
2077                 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2078                                         hists->thread_filter);
2079         else
2080                 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2081                                       hists__filter_entry_by_thread);
2082 }
2083
2084 void hists__filter_by_dso(struct hists *hists)
2085 {
2086         if (symbol_conf.report_hierarchy)
2087                 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2088                                         hists->dso_filter);
2089         else
2090                 hists__filter_by_type(hists, HIST_FILTER__DSO,
2091                                       hists__filter_entry_by_dso);
2092 }
2093
2094 void hists__filter_by_symbol(struct hists *hists)
2095 {
2096         if (symbol_conf.report_hierarchy)
2097                 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2098                                         hists->symbol_filter_str);
2099         else
2100                 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2101                                       hists__filter_entry_by_symbol);
2102 }
2103
2104 void hists__filter_by_socket(struct hists *hists)
2105 {
2106         if (symbol_conf.report_hierarchy)
2107                 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2108                                         &hists->socket_filter);
2109         else
2110                 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2111                                       hists__filter_entry_by_socket);
2112 }
2113
2114 void events_stats__inc(struct events_stats *stats, u32 type)
2115 {
2116         ++stats->nr_events[0];
2117         ++stats->nr_events[type];
2118 }
2119
2120 void hists__inc_nr_events(struct hists *hists, u32 type)
2121 {
2122         events_stats__inc(&hists->stats, type);
2123 }
2124
2125 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2126 {
2127         events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2128         if (!filtered)
2129                 hists->stats.nr_non_filtered_samples++;
2130 }
2131
2132 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2133                                                  struct hist_entry *pair)
2134 {
2135         struct rb_root *root;
2136         struct rb_node **p;
2137         struct rb_node *parent = NULL;
2138         struct hist_entry *he;
2139         int64_t cmp;
2140
2141         if (hists__has(hists, need_collapse))
2142                 root = &hists->entries_collapsed;
2143         else
2144                 root = hists->entries_in;
2145
2146         p = &root->rb_node;
2147
2148         while (*p != NULL) {
2149                 parent = *p;
2150                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2151
2152                 cmp = hist_entry__collapse(he, pair);
2153
2154                 if (!cmp)
2155                         goto out;
2156
2157                 if (cmp < 0)
2158                         p = &(*p)->rb_left;
2159                 else
2160                         p = &(*p)->rb_right;
2161         }
2162
2163         he = hist_entry__new(pair, true);
2164         if (he) {
2165                 memset(&he->stat, 0, sizeof(he->stat));
2166                 he->hists = hists;
2167                 if (symbol_conf.cumulate_callchain)
2168                         memset(he->stat_acc, 0, sizeof(he->stat));
2169                 rb_link_node(&he->rb_node_in, parent, p);
2170                 rb_insert_color(&he->rb_node_in, root);
2171                 hists__inc_stats(hists, he);
2172                 he->dummy = true;
2173         }
2174 out:
2175         return he;
2176 }
2177
2178 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2179                                                     struct rb_root *root,
2180                                                     struct hist_entry *pair)
2181 {
2182         struct rb_node **p;
2183         struct rb_node *parent = NULL;
2184         struct hist_entry *he;
2185         struct perf_hpp_fmt *fmt;
2186
2187         p = &root->rb_node;
2188         while (*p != NULL) {
2189                 int64_t cmp = 0;
2190
2191                 parent = *p;
2192                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2193
2194                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2195                         cmp = fmt->collapse(fmt, he, pair);
2196                         if (cmp)
2197                                 break;
2198                 }
2199                 if (!cmp)
2200                         goto out;
2201
2202                 if (cmp < 0)
2203                         p = &parent->rb_left;
2204                 else
2205                         p = &parent->rb_right;
2206         }
2207
2208         he = hist_entry__new(pair, true);
2209         if (he) {
2210                 rb_link_node(&he->rb_node_in, parent, p);
2211                 rb_insert_color(&he->rb_node_in, root);
2212
2213                 he->dummy = true;
2214                 he->hists = hists;
2215                 memset(&he->stat, 0, sizeof(he->stat));
2216                 hists__inc_stats(hists, he);
2217         }
2218 out:
2219         return he;
2220 }
2221
2222 static struct hist_entry *hists__find_entry(struct hists *hists,
2223                                             struct hist_entry *he)
2224 {
2225         struct rb_node *n;
2226
2227         if (hists__has(hists, need_collapse))
2228                 n = hists->entries_collapsed.rb_node;
2229         else
2230                 n = hists->entries_in->rb_node;
2231
2232         while (n) {
2233                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2234                 int64_t cmp = hist_entry__collapse(iter, he);
2235
2236                 if (cmp < 0)
2237                         n = n->rb_left;
2238                 else if (cmp > 0)
2239                         n = n->rb_right;
2240                 else
2241                         return iter;
2242         }
2243
2244         return NULL;
2245 }
2246
2247 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
2248                                                       struct hist_entry *he)
2249 {
2250         struct rb_node *n = root->rb_node;
2251
2252         while (n) {
2253                 struct hist_entry *iter;
2254                 struct perf_hpp_fmt *fmt;
2255                 int64_t cmp = 0;
2256
2257                 iter = rb_entry(n, struct hist_entry, rb_node_in);
2258                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2259                         cmp = fmt->collapse(fmt, iter, he);
2260                         if (cmp)
2261                                 break;
2262                 }
2263
2264                 if (cmp < 0)
2265                         n = n->rb_left;
2266                 else if (cmp > 0)
2267                         n = n->rb_right;
2268                 else
2269                         return iter;
2270         }
2271
2272         return NULL;
2273 }
2274
2275 static void hists__match_hierarchy(struct rb_root *leader_root,
2276                                    struct rb_root *other_root)
2277 {
2278         struct rb_node *nd;
2279         struct hist_entry *pos, *pair;
2280
2281         for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
2282                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2283                 pair = hists__find_hierarchy_entry(other_root, pos);
2284
2285                 if (pair) {
2286                         hist_entry__add_pair(pair, pos);
2287                         hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2288                 }
2289         }
2290 }
2291
2292 /*
2293  * Look for pairs to link to the leader buckets (hist_entries):
2294  */
2295 void hists__match(struct hists *leader, struct hists *other)
2296 {
2297         struct rb_root *root;
2298         struct rb_node *nd;
2299         struct hist_entry *pos, *pair;
2300
2301         if (symbol_conf.report_hierarchy) {
2302                 /* hierarchy report always collapses entries */
2303                 return hists__match_hierarchy(&leader->entries_collapsed,
2304                                               &other->entries_collapsed);
2305         }
2306
2307         if (hists__has(leader, need_collapse))
2308                 root = &leader->entries_collapsed;
2309         else
2310                 root = leader->entries_in;
2311
2312         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2313                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2314                 pair = hists__find_entry(other, pos);
2315
2316                 if (pair)
2317                         hist_entry__add_pair(pair, pos);
2318         }
2319 }
2320
2321 static int hists__link_hierarchy(struct hists *leader_hists,
2322                                  struct hist_entry *parent,
2323                                  struct rb_root *leader_root,
2324                                  struct rb_root *other_root)
2325 {
2326         struct rb_node *nd;
2327         struct hist_entry *pos, *leader;
2328
2329         for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
2330                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2331
2332                 if (hist_entry__has_pairs(pos)) {
2333                         bool found = false;
2334
2335                         list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2336                                 if (leader->hists == leader_hists) {
2337                                         found = true;
2338                                         break;
2339                                 }
2340                         }
2341                         if (!found)
2342                                 return -1;
2343                 } else {
2344                         leader = add_dummy_hierarchy_entry(leader_hists,
2345                                                            leader_root, pos);
2346                         if (leader == NULL)
2347                                 return -1;
2348
2349                         /* do not point parent in the pos */
2350                         leader->parent_he = parent;
2351
2352                         hist_entry__add_pair(pos, leader);
2353                 }
2354
2355                 if (!pos->leaf) {
2356                         if (hists__link_hierarchy(leader_hists, leader,
2357                                                   &leader->hroot_in,
2358                                                   &pos->hroot_in) < 0)
2359                                 return -1;
2360                 }
2361         }
2362         return 0;
2363 }
2364
2365 /*
2366  * Look for entries in the other hists that are not present in the leader, if
2367  * we find them, just add a dummy entry on the leader hists, with period=0,
2368  * nr_events=0, to serve as the list header.
2369  */
2370 int hists__link(struct hists *leader, struct hists *other)
2371 {
2372         struct rb_root *root;
2373         struct rb_node *nd;
2374         struct hist_entry *pos, *pair;
2375
2376         if (symbol_conf.report_hierarchy) {
2377                 /* hierarchy report always collapses entries */
2378                 return hists__link_hierarchy(leader, NULL,
2379                                              &leader->entries_collapsed,
2380                                              &other->entries_collapsed);
2381         }
2382
2383         if (hists__has(other, need_collapse))
2384                 root = &other->entries_collapsed;
2385         else
2386                 root = other->entries_in;
2387
2388         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2389                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2390
2391                 if (!hist_entry__has_pairs(pos)) {
2392                         pair = hists__add_dummy_entry(leader, pos);
2393                         if (pair == NULL)
2394                                 return -1;
2395                         hist_entry__add_pair(pos, pair);
2396                 }
2397         }
2398
2399         return 0;
2400 }
2401
2402 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2403                           struct perf_sample *sample, bool nonany_branch_mode)
2404 {
2405         struct branch_info *bi;
2406
2407         /* If we have branch cycles always annotate them. */
2408         if (bs && bs->nr && bs->entries[0].flags.cycles) {
2409                 int i;
2410
2411                 bi = sample__resolve_bstack(sample, al);
2412                 if (bi) {
2413                         struct addr_map_symbol *prev = NULL;
2414
2415                         /*
2416                          * Ignore errors, still want to process the
2417                          * other entries.
2418                          *
2419                          * For non standard branch modes always
2420                          * force no IPC (prev == NULL)
2421                          *
2422                          * Note that perf stores branches reversed from
2423                          * program order!
2424                          */
2425                         for (i = bs->nr - 1; i >= 0; i--) {
2426                                 addr_map_symbol__account_cycles(&bi[i].from,
2427                                         nonany_branch_mode ? NULL : prev,
2428                                         bi[i].flags.cycles);
2429                                 prev = &bi[i].to;
2430                         }
2431                         free(bi);
2432                 }
2433         }
2434 }
2435
2436 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
2437 {
2438         struct perf_evsel *pos;
2439         size_t ret = 0;
2440
2441         evlist__for_each_entry(evlist, pos) {
2442                 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2443                 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2444         }
2445
2446         return ret;
2447 }
2448
2449
2450 u64 hists__total_period(struct hists *hists)
2451 {
2452         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2453                 hists->stats.total_period;
2454 }
2455
2456 int parse_filter_percentage(const struct option *opt __maybe_unused,
2457                             const char *arg, int unset __maybe_unused)
2458 {
2459         if (!strcmp(arg, "relative"))
2460                 symbol_conf.filter_relative = true;
2461         else if (!strcmp(arg, "absolute"))
2462                 symbol_conf.filter_relative = false;
2463         else {
2464                 pr_debug("Invalid percentage: %s\n", arg);
2465                 return -1;
2466         }
2467
2468         return 0;
2469 }
2470
2471 int perf_hist_config(const char *var, const char *value)
2472 {
2473         if (!strcmp(var, "hist.percentage"))
2474                 return parse_filter_percentage(NULL, value, 0);
2475
2476         return 0;
2477 }
2478
2479 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2480 {
2481         memset(hists, 0, sizeof(*hists));
2482         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
2483         hists->entries_in = &hists->entries_in_array[0];
2484         hists->entries_collapsed = RB_ROOT;
2485         hists->entries = RB_ROOT;
2486         pthread_mutex_init(&hists->lock, NULL);
2487         hists->socket_filter = -1;
2488         hists->hpp_list = hpp_list;
2489         INIT_LIST_HEAD(&hists->hpp_formats);
2490         return 0;
2491 }
2492
2493 static void hists__delete_remaining_entries(struct rb_root *root)
2494 {
2495         struct rb_node *node;
2496         struct hist_entry *he;
2497
2498         while (!RB_EMPTY_ROOT(root)) {
2499                 node = rb_first(root);
2500                 rb_erase(node, root);
2501
2502                 he = rb_entry(node, struct hist_entry, rb_node_in);
2503                 hist_entry__delete(he);
2504         }
2505 }
2506
2507 static void hists__delete_all_entries(struct hists *hists)
2508 {
2509         hists__delete_entries(hists);
2510         hists__delete_remaining_entries(&hists->entries_in_array[0]);
2511         hists__delete_remaining_entries(&hists->entries_in_array[1]);
2512         hists__delete_remaining_entries(&hists->entries_collapsed);
2513 }
2514
2515 static void hists_evsel__exit(struct perf_evsel *evsel)
2516 {
2517         struct hists *hists = evsel__hists(evsel);
2518         struct perf_hpp_fmt *fmt, *pos;
2519         struct perf_hpp_list_node *node, *tmp;
2520
2521         hists__delete_all_entries(hists);
2522
2523         list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2524                 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2525                         list_del(&fmt->list);
2526                         free(fmt);
2527                 }
2528                 list_del(&node->list);
2529                 free(node);
2530         }
2531 }
2532
2533 static int hists_evsel__init(struct perf_evsel *evsel)
2534 {
2535         struct hists *hists = evsel__hists(evsel);
2536
2537         __hists__init(hists, &perf_hpp_list);
2538         return 0;
2539 }
2540
2541 /*
2542  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2543  * stored in the rbtree...
2544  */
2545
2546 int hists__init(void)
2547 {
2548         int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2549                                             hists_evsel__init,
2550                                             hists_evsel__exit);
2551         if (err)
2552                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2553
2554         return err;
2555 }
2556
2557 void perf_hpp_list__init(struct perf_hpp_list *list)
2558 {
2559         INIT_LIST_HEAD(&list->fields);
2560         INIT_LIST_HEAD(&list->sorts);
2561 }