]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/hist.c
rt2x00: rt2800pci: use module_pci_driver macro
[karo-tx-linux.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include "evsel.h"
8 #include <math.h>
9
10 static bool hists__filter_entry_by_dso(struct hists *hists,
11                                        struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13                                           struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15                                           struct hist_entry *he);
16
17 enum hist_filter {
18         HIST_FILTER__DSO,
19         HIST_FILTER__THREAD,
20         HIST_FILTER__PARENT,
21         HIST_FILTER__SYMBOL,
22 };
23
24 struct callchain_param  callchain_param = {
25         .mode   = CHAIN_GRAPH_REL,
26         .min_percent = 0.5,
27         .order  = ORDER_CALLEE,
28         .key    = CCKEY_FUNCTION
29 };
30
31 u16 hists__col_len(struct hists *hists, enum hist_column col)
32 {
33         return hists->col_len[col];
34 }
35
36 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
37 {
38         hists->col_len[col] = len;
39 }
40
41 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
42 {
43         if (len > hists__col_len(hists, col)) {
44                 hists__set_col_len(hists, col, len);
45                 return true;
46         }
47         return false;
48 }
49
50 void hists__reset_col_len(struct hists *hists)
51 {
52         enum hist_column col;
53
54         for (col = 0; col < HISTC_NR_COLS; ++col)
55                 hists__set_col_len(hists, col, 0);
56 }
57
58 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
59 {
60         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61
62         if (hists__col_len(hists, dso) < unresolved_col_width &&
63             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
64             !symbol_conf.dso_list)
65                 hists__set_col_len(hists, dso, unresolved_col_width);
66 }
67
68 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
69 {
70         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
71         int symlen;
72         u16 len;
73
74         /*
75          * +4 accounts for '[x] ' priv level info
76          * +2 accounts for 0x prefix on raw addresses
77          * +3 accounts for ' y ' symtab origin info
78          */
79         if (h->ms.sym) {
80                 symlen = h->ms.sym->namelen + 4;
81                 if (verbose)
82                         symlen += BITS_PER_LONG / 4 + 2 + 3;
83                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
84         } else {
85                 symlen = unresolved_col_width + 4 + 2;
86                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
87                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
88         }
89
90         len = thread__comm_len(h->thread);
91         if (hists__new_col_len(hists, HISTC_COMM, len))
92                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
93
94         if (h->ms.map) {
95                 len = dso__name_len(h->ms.map->dso);
96                 hists__new_col_len(hists, HISTC_DSO, len);
97         }
98
99         if (h->parent)
100                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
101
102         if (h->branch_info) {
103                 if (h->branch_info->from.sym) {
104                         symlen = (int)h->branch_info->from.sym->namelen + 4;
105                         if (verbose)
106                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
107                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
108
109                         symlen = dso__name_len(h->branch_info->from.map->dso);
110                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
111                 } else {
112                         symlen = unresolved_col_width + 4 + 2;
113                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
114                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
115                 }
116
117                 if (h->branch_info->to.sym) {
118                         symlen = (int)h->branch_info->to.sym->namelen + 4;
119                         if (verbose)
120                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
121                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
122
123                         symlen = dso__name_len(h->branch_info->to.map->dso);
124                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
125                 } else {
126                         symlen = unresolved_col_width + 4 + 2;
127                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
128                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
129                 }
130         }
131
132         if (h->mem_info) {
133                 if (h->mem_info->daddr.sym) {
134                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
135                                + unresolved_col_width + 2;
136                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
137                                            symlen);
138                 } else {
139                         symlen = unresolved_col_width + 4 + 2;
140                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
141                                            symlen);
142                 }
143                 if (h->mem_info->daddr.map) {
144                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
145                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
146                                            symlen);
147                 } else {
148                         symlen = unresolved_col_width + 4 + 2;
149                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
150                 }
151         } else {
152                 symlen = unresolved_col_width + 4 + 2;
153                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
154                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
155         }
156
157         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
158         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
159         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
160         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
161         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
162         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
163 }
164
165 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
166 {
167         struct rb_node *next = rb_first(&hists->entries);
168         struct hist_entry *n;
169         int row = 0;
170
171         hists__reset_col_len(hists);
172
173         while (next && row++ < max_rows) {
174                 n = rb_entry(next, struct hist_entry, rb_node);
175                 if (!n->filtered)
176                         hists__calc_col_len(hists, n);
177                 next = rb_next(&n->rb_node);
178         }
179 }
180
181 static void hist_entry__add_cpumode_period(struct hist_entry *he,
182                                            unsigned int cpumode, u64 period)
183 {
184         switch (cpumode) {
185         case PERF_RECORD_MISC_KERNEL:
186                 he->stat.period_sys += period;
187                 break;
188         case PERF_RECORD_MISC_USER:
189                 he->stat.period_us += period;
190                 break;
191         case PERF_RECORD_MISC_GUEST_KERNEL:
192                 he->stat.period_guest_sys += period;
193                 break;
194         case PERF_RECORD_MISC_GUEST_USER:
195                 he->stat.period_guest_us += period;
196                 break;
197         default:
198                 break;
199         }
200 }
201
202 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
203                                 u64 weight)
204 {
205
206         he_stat->period         += period;
207         he_stat->weight         += weight;
208         he_stat->nr_events      += 1;
209 }
210
211 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
212 {
213         dest->period            += src->period;
214         dest->period_sys        += src->period_sys;
215         dest->period_us         += src->period_us;
216         dest->period_guest_sys  += src->period_guest_sys;
217         dest->period_guest_us   += src->period_guest_us;
218         dest->nr_events         += src->nr_events;
219         dest->weight            += src->weight;
220 }
221
222 static void hist_entry__decay(struct hist_entry *he)
223 {
224         he->stat.period = (he->stat.period * 7) / 8;
225         he->stat.nr_events = (he->stat.nr_events * 7) / 8;
226         /* XXX need decay for weight too? */
227 }
228
229 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
230 {
231         u64 prev_period = he->stat.period;
232
233         if (prev_period == 0)
234                 return true;
235
236         hist_entry__decay(he);
237
238         if (!he->filtered)
239                 hists->stats.total_period -= prev_period - he->stat.period;
240
241         return he->stat.period == 0;
242 }
243
244 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
245 {
246         struct rb_node *next = rb_first(&hists->entries);
247         struct hist_entry *n;
248
249         while (next) {
250                 n = rb_entry(next, struct hist_entry, rb_node);
251                 next = rb_next(&n->rb_node);
252                 /*
253                  * We may be annotating this, for instance, so keep it here in
254                  * case some it gets new samples, we'll eventually free it when
255                  * the user stops browsing and it agains gets fully decayed.
256                  */
257                 if (((zap_user && n->level == '.') ||
258                      (zap_kernel && n->level != '.') ||
259                      hists__decay_entry(hists, n)) &&
260                     !n->used) {
261                         rb_erase(&n->rb_node, &hists->entries);
262
263                         if (sort__need_collapse)
264                                 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
265
266                         hist_entry__free(n);
267                         --hists->nr_entries;
268                 }
269         }
270 }
271
272 /*
273  * histogram, sorted on item, collects periods
274  */
275
276 static struct hist_entry *hist_entry__new(struct hist_entry *template)
277 {
278         size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
279         struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
280
281         if (he != NULL) {
282                 *he = *template;
283
284                 if (he->ms.map)
285                         he->ms.map->referenced = true;
286
287                 if (he->branch_info) {
288                         /*
289                          * This branch info is (a part of) allocated from
290                          * machine__resolve_bstack() and will be freed after
291                          * adding new entries.  So we need to save a copy.
292                          */
293                         he->branch_info = malloc(sizeof(*he->branch_info));
294                         if (he->branch_info == NULL) {
295                                 free(he);
296                                 return NULL;
297                         }
298
299                         memcpy(he->branch_info, template->branch_info,
300                                sizeof(*he->branch_info));
301
302                         if (he->branch_info->from.map)
303                                 he->branch_info->from.map->referenced = true;
304                         if (he->branch_info->to.map)
305                                 he->branch_info->to.map->referenced = true;
306                 }
307
308                 if (he->mem_info) {
309                         if (he->mem_info->iaddr.map)
310                                 he->mem_info->iaddr.map->referenced = true;
311                         if (he->mem_info->daddr.map)
312                                 he->mem_info->daddr.map->referenced = true;
313                 }
314
315                 if (symbol_conf.use_callchain)
316                         callchain_init(he->callchain);
317
318                 INIT_LIST_HEAD(&he->pairs.node);
319         }
320
321         return he;
322 }
323
324 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
325 {
326         if (!h->filtered) {
327                 hists__calc_col_len(hists, h);
328                 ++hists->nr_entries;
329                 hists->stats.total_period += h->stat.period;
330         }
331 }
332
333 static u8 symbol__parent_filter(const struct symbol *parent)
334 {
335         if (symbol_conf.exclude_other && parent == NULL)
336                 return 1 << HIST_FILTER__PARENT;
337         return 0;
338 }
339
340 static struct hist_entry *add_hist_entry(struct hists *hists,
341                                       struct hist_entry *entry,
342                                       struct addr_location *al,
343                                       u64 period,
344                                       u64 weight)
345 {
346         struct rb_node **p;
347         struct rb_node *parent = NULL;
348         struct hist_entry *he;
349         int cmp;
350
351         p = &hists->entries_in->rb_node;
352
353         while (*p != NULL) {
354                 parent = *p;
355                 he = rb_entry(parent, struct hist_entry, rb_node_in);
356
357                 /*
358                  * Make sure that it receives arguments in a same order as
359                  * hist_entry__collapse() so that we can use an appropriate
360                  * function when searching an entry regardless which sort
361                  * keys were used.
362                  */
363                 cmp = hist_entry__cmp(he, entry);
364
365                 if (!cmp) {
366                         he_stat__add_period(&he->stat, period, weight);
367
368                         /*
369                          * This mem info was allocated from machine__resolve_mem
370                          * and will not be used anymore.
371                          */
372                         free(entry->mem_info);
373
374                         /* If the map of an existing hist_entry has
375                          * become out-of-date due to an exec() or
376                          * similar, update it.  Otherwise we will
377                          * mis-adjust symbol addresses when computing
378                          * the history counter to increment.
379                          */
380                         if (he->ms.map != entry->ms.map) {
381                                 he->ms.map = entry->ms.map;
382                                 if (he->ms.map)
383                                         he->ms.map->referenced = true;
384                         }
385                         goto out;
386                 }
387
388                 if (cmp < 0)
389                         p = &(*p)->rb_left;
390                 else
391                         p = &(*p)->rb_right;
392         }
393
394         he = hist_entry__new(entry);
395         if (!he)
396                 return NULL;
397
398         rb_link_node(&he->rb_node_in, parent, p);
399         rb_insert_color(&he->rb_node_in, hists->entries_in);
400 out:
401         hist_entry__add_cpumode_period(he, al->cpumode, period);
402         return he;
403 }
404
405 struct hist_entry *__hists__add_mem_entry(struct hists *self,
406                                           struct addr_location *al,
407                                           struct symbol *sym_parent,
408                                           struct mem_info *mi,
409                                           u64 period,
410                                           u64 weight)
411 {
412         struct hist_entry entry = {
413                 .thread = al->thread,
414                 .ms = {
415                         .map    = al->map,
416                         .sym    = al->sym,
417                 },
418                 .stat = {
419                         .period = period,
420                         .weight = weight,
421                         .nr_events = 1,
422                 },
423                 .cpu    = al->cpu,
424                 .ip     = al->addr,
425                 .level  = al->level,
426                 .parent = sym_parent,
427                 .filtered = symbol__parent_filter(sym_parent),
428                 .hists = self,
429                 .mem_info = mi,
430                 .branch_info = NULL,
431         };
432         return add_hist_entry(self, &entry, al, period, weight);
433 }
434
435 struct hist_entry *__hists__add_branch_entry(struct hists *self,
436                                              struct addr_location *al,
437                                              struct symbol *sym_parent,
438                                              struct branch_info *bi,
439                                              u64 period,
440                                              u64 weight)
441 {
442         struct hist_entry entry = {
443                 .thread = al->thread,
444                 .ms = {
445                         .map    = bi->to.map,
446                         .sym    = bi->to.sym,
447                 },
448                 .cpu    = al->cpu,
449                 .ip     = bi->to.addr,
450                 .level  = al->level,
451                 .stat = {
452                         .period = period,
453                         .nr_events = 1,
454                         .weight = weight,
455                 },
456                 .parent = sym_parent,
457                 .filtered = symbol__parent_filter(sym_parent),
458                 .branch_info = bi,
459                 .hists  = self,
460                 .mem_info = NULL,
461         };
462
463         return add_hist_entry(self, &entry, al, period, weight);
464 }
465
466 struct hist_entry *__hists__add_entry(struct hists *self,
467                                       struct addr_location *al,
468                                       struct symbol *sym_parent, u64 period,
469                                       u64 weight)
470 {
471         struct hist_entry entry = {
472                 .thread = al->thread,
473                 .ms = {
474                         .map    = al->map,
475                         .sym    = al->sym,
476                 },
477                 .cpu    = al->cpu,
478                 .ip     = al->addr,
479                 .level  = al->level,
480                 .stat = {
481                         .period = period,
482                         .nr_events = 1,
483                         .weight = weight,
484                 },
485                 .parent = sym_parent,
486                 .filtered = symbol__parent_filter(sym_parent),
487                 .hists  = self,
488                 .branch_info = NULL,
489                 .mem_info = NULL,
490         };
491
492         return add_hist_entry(self, &entry, al, period, weight);
493 }
494
495 int64_t
496 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
497 {
498         struct sort_entry *se;
499         int64_t cmp = 0;
500
501         list_for_each_entry(se, &hist_entry__sort_list, list) {
502                 cmp = se->se_cmp(left, right);
503                 if (cmp)
504                         break;
505         }
506
507         return cmp;
508 }
509
510 int64_t
511 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
512 {
513         struct sort_entry *se;
514         int64_t cmp = 0;
515
516         list_for_each_entry(se, &hist_entry__sort_list, list) {
517                 int64_t (*f)(struct hist_entry *, struct hist_entry *);
518
519                 f = se->se_collapse ?: se->se_cmp;
520
521                 cmp = f(left, right);
522                 if (cmp)
523                         break;
524         }
525
526         return cmp;
527 }
528
529 void hist_entry__free(struct hist_entry *he)
530 {
531         free(he->branch_info);
532         free(he->mem_info);
533         free(he);
534 }
535
536 /*
537  * collapse the histogram
538  */
539
540 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
541                                          struct rb_root *root,
542                                          struct hist_entry *he)
543 {
544         struct rb_node **p = &root->rb_node;
545         struct rb_node *parent = NULL;
546         struct hist_entry *iter;
547         int64_t cmp;
548
549         while (*p != NULL) {
550                 parent = *p;
551                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
552
553                 cmp = hist_entry__collapse(iter, he);
554
555                 if (!cmp) {
556                         he_stat__add_stat(&iter->stat, &he->stat);
557
558                         if (symbol_conf.use_callchain) {
559                                 callchain_cursor_reset(&callchain_cursor);
560                                 callchain_merge(&callchain_cursor,
561                                                 iter->callchain,
562                                                 he->callchain);
563                         }
564                         hist_entry__free(he);
565                         return false;
566                 }
567
568                 if (cmp < 0)
569                         p = &(*p)->rb_left;
570                 else
571                         p = &(*p)->rb_right;
572         }
573
574         rb_link_node(&he->rb_node_in, parent, p);
575         rb_insert_color(&he->rb_node_in, root);
576         return true;
577 }
578
579 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
580 {
581         struct rb_root *root;
582
583         pthread_mutex_lock(&hists->lock);
584
585         root = hists->entries_in;
586         if (++hists->entries_in > &hists->entries_in_array[1])
587                 hists->entries_in = &hists->entries_in_array[0];
588
589         pthread_mutex_unlock(&hists->lock);
590
591         return root;
592 }
593
594 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
595 {
596         hists__filter_entry_by_dso(hists, he);
597         hists__filter_entry_by_thread(hists, he);
598         hists__filter_entry_by_symbol(hists, he);
599 }
600
601 void hists__collapse_resort(struct hists *hists)
602 {
603         struct rb_root *root;
604         struct rb_node *next;
605         struct hist_entry *n;
606
607         if (!sort__need_collapse)
608                 return;
609
610         root = hists__get_rotate_entries_in(hists);
611         next = rb_first(root);
612
613         while (next) {
614                 n = rb_entry(next, struct hist_entry, rb_node_in);
615                 next = rb_next(&n->rb_node_in);
616
617                 rb_erase(&n->rb_node_in, root);
618                 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
619                         /*
620                          * If it wasn't combined with one of the entries already
621                          * collapsed, we need to apply the filters that may have
622                          * been set by, say, the hist_browser.
623                          */
624                         hists__apply_filters(hists, n);
625                 }
626         }
627 }
628
629 /*
630  * reverse the map, sort on period.
631  */
632
633 static int period_cmp(u64 period_a, u64 period_b)
634 {
635         if (period_a > period_b)
636                 return 1;
637         if (period_a < period_b)
638                 return -1;
639         return 0;
640 }
641
642 static int hist_entry__sort_on_period(struct hist_entry *a,
643                                       struct hist_entry *b)
644 {
645         int ret;
646         int i, nr_members;
647         struct perf_evsel *evsel;
648         struct hist_entry *pair;
649         u64 *periods_a, *periods_b;
650
651         ret = period_cmp(a->stat.period, b->stat.period);
652         if (ret || !symbol_conf.event_group)
653                 return ret;
654
655         evsel = hists_to_evsel(a->hists);
656         nr_members = evsel->nr_members;
657         if (nr_members <= 1)
658                 return ret;
659
660         periods_a = zalloc(sizeof(periods_a) * nr_members);
661         periods_b = zalloc(sizeof(periods_b) * nr_members);
662
663         if (!periods_a || !periods_b)
664                 goto out;
665
666         list_for_each_entry(pair, &a->pairs.head, pairs.node) {
667                 evsel = hists_to_evsel(pair->hists);
668                 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
669         }
670
671         list_for_each_entry(pair, &b->pairs.head, pairs.node) {
672                 evsel = hists_to_evsel(pair->hists);
673                 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
674         }
675
676         for (i = 1; i < nr_members; i++) {
677                 ret = period_cmp(periods_a[i], periods_b[i]);
678                 if (ret)
679                         break;
680         }
681
682 out:
683         free(periods_a);
684         free(periods_b);
685
686         return ret;
687 }
688
689 static void __hists__insert_output_entry(struct rb_root *entries,
690                                          struct hist_entry *he,
691                                          u64 min_callchain_hits)
692 {
693         struct rb_node **p = &entries->rb_node;
694         struct rb_node *parent = NULL;
695         struct hist_entry *iter;
696
697         if (symbol_conf.use_callchain)
698                 callchain_param.sort(&he->sorted_chain, he->callchain,
699                                       min_callchain_hits, &callchain_param);
700
701         while (*p != NULL) {
702                 parent = *p;
703                 iter = rb_entry(parent, struct hist_entry, rb_node);
704
705                 if (hist_entry__sort_on_period(he, iter) > 0)
706                         p = &(*p)->rb_left;
707                 else
708                         p = &(*p)->rb_right;
709         }
710
711         rb_link_node(&he->rb_node, parent, p);
712         rb_insert_color(&he->rb_node, entries);
713 }
714
715 void hists__output_resort(struct hists *hists)
716 {
717         struct rb_root *root;
718         struct rb_node *next;
719         struct hist_entry *n;
720         u64 min_callchain_hits;
721
722         min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
723
724         if (sort__need_collapse)
725                 root = &hists->entries_collapsed;
726         else
727                 root = hists->entries_in;
728
729         next = rb_first(root);
730         hists->entries = RB_ROOT;
731
732         hists->nr_entries = 0;
733         hists->stats.total_period = 0;
734         hists__reset_col_len(hists);
735
736         while (next) {
737                 n = rb_entry(next, struct hist_entry, rb_node_in);
738                 next = rb_next(&n->rb_node_in);
739
740                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
741                 hists__inc_nr_entries(hists, n);
742         }
743 }
744
745 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
746                                        enum hist_filter filter)
747 {
748         h->filtered &= ~(1 << filter);
749         if (h->filtered)
750                 return;
751
752         ++hists->nr_entries;
753         if (h->ms.unfolded)
754                 hists->nr_entries += h->nr_rows;
755         h->row_offset = 0;
756         hists->stats.total_period += h->stat.period;
757         hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
758
759         hists__calc_col_len(hists, h);
760 }
761
762
763 static bool hists__filter_entry_by_dso(struct hists *hists,
764                                        struct hist_entry *he)
765 {
766         if (hists->dso_filter != NULL &&
767             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
768                 he->filtered |= (1 << HIST_FILTER__DSO);
769                 return true;
770         }
771
772         return false;
773 }
774
775 void hists__filter_by_dso(struct hists *hists)
776 {
777         struct rb_node *nd;
778
779         hists->nr_entries = hists->stats.total_period = 0;
780         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
781         hists__reset_col_len(hists);
782
783         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
784                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
785
786                 if (symbol_conf.exclude_other && !h->parent)
787                         continue;
788
789                 if (hists__filter_entry_by_dso(hists, h))
790                         continue;
791
792                 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
793         }
794 }
795
796 static bool hists__filter_entry_by_thread(struct hists *hists,
797                                           struct hist_entry *he)
798 {
799         if (hists->thread_filter != NULL &&
800             he->thread != hists->thread_filter) {
801                 he->filtered |= (1 << HIST_FILTER__THREAD);
802                 return true;
803         }
804
805         return false;
806 }
807
808 void hists__filter_by_thread(struct hists *hists)
809 {
810         struct rb_node *nd;
811
812         hists->nr_entries = hists->stats.total_period = 0;
813         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
814         hists__reset_col_len(hists);
815
816         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
817                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
818
819                 if (hists__filter_entry_by_thread(hists, h))
820                         continue;
821
822                 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
823         }
824 }
825
826 static bool hists__filter_entry_by_symbol(struct hists *hists,
827                                           struct hist_entry *he)
828 {
829         if (hists->symbol_filter_str != NULL &&
830             (!he->ms.sym || strstr(he->ms.sym->name,
831                                    hists->symbol_filter_str) == NULL)) {
832                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
833                 return true;
834         }
835
836         return false;
837 }
838
839 void hists__filter_by_symbol(struct hists *hists)
840 {
841         struct rb_node *nd;
842
843         hists->nr_entries = hists->stats.total_period = 0;
844         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
845         hists__reset_col_len(hists);
846
847         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
848                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
849
850                 if (hists__filter_entry_by_symbol(hists, h))
851                         continue;
852
853                 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
854         }
855 }
856
857 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
858 {
859         return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
860 }
861
862 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
863 {
864         return symbol__annotate(he->ms.sym, he->ms.map, privsize);
865 }
866
867 void events_stats__inc(struct events_stats *stats, u32 type)
868 {
869         ++stats->nr_events[0];
870         ++stats->nr_events[type];
871 }
872
873 void hists__inc_nr_events(struct hists *hists, u32 type)
874 {
875         events_stats__inc(&hists->stats, type);
876 }
877
878 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
879                                                  struct hist_entry *pair)
880 {
881         struct rb_root *root;
882         struct rb_node **p;
883         struct rb_node *parent = NULL;
884         struct hist_entry *he;
885         int cmp;
886
887         if (sort__need_collapse)
888                 root = &hists->entries_collapsed;
889         else
890                 root = hists->entries_in;
891
892         p = &root->rb_node;
893
894         while (*p != NULL) {
895                 parent = *p;
896                 he = rb_entry(parent, struct hist_entry, rb_node_in);
897
898                 cmp = hist_entry__collapse(he, pair);
899
900                 if (!cmp)
901                         goto out;
902
903                 if (cmp < 0)
904                         p = &(*p)->rb_left;
905                 else
906                         p = &(*p)->rb_right;
907         }
908
909         he = hist_entry__new(pair);
910         if (he) {
911                 memset(&he->stat, 0, sizeof(he->stat));
912                 he->hists = hists;
913                 rb_link_node(&he->rb_node_in, parent, p);
914                 rb_insert_color(&he->rb_node_in, root);
915                 hists__inc_nr_entries(hists, he);
916                 he->dummy = true;
917         }
918 out:
919         return he;
920 }
921
922 static struct hist_entry *hists__find_entry(struct hists *hists,
923                                             struct hist_entry *he)
924 {
925         struct rb_node *n;
926
927         if (sort__need_collapse)
928                 n = hists->entries_collapsed.rb_node;
929         else
930                 n = hists->entries_in->rb_node;
931
932         while (n) {
933                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
934                 int64_t cmp = hist_entry__collapse(iter, he);
935
936                 if (cmp < 0)
937                         n = n->rb_left;
938                 else if (cmp > 0)
939                         n = n->rb_right;
940                 else
941                         return iter;
942         }
943
944         return NULL;
945 }
946
947 /*
948  * Look for pairs to link to the leader buckets (hist_entries):
949  */
950 void hists__match(struct hists *leader, struct hists *other)
951 {
952         struct rb_root *root;
953         struct rb_node *nd;
954         struct hist_entry *pos, *pair;
955
956         if (sort__need_collapse)
957                 root = &leader->entries_collapsed;
958         else
959                 root = leader->entries_in;
960
961         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
962                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
963                 pair = hists__find_entry(other, pos);
964
965                 if (pair)
966                         hist_entry__add_pair(pair, pos);
967         }
968 }
969
970 /*
971  * Look for entries in the other hists that are not present in the leader, if
972  * we find them, just add a dummy entry on the leader hists, with period=0,
973  * nr_events=0, to serve as the list header.
974  */
975 int hists__link(struct hists *leader, struct hists *other)
976 {
977         struct rb_root *root;
978         struct rb_node *nd;
979         struct hist_entry *pos, *pair;
980
981         if (sort__need_collapse)
982                 root = &other->entries_collapsed;
983         else
984                 root = other->entries_in;
985
986         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
987                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
988
989                 if (!hist_entry__has_pairs(pos)) {
990                         pair = hists__add_dummy_entry(leader, pos);
991                         if (pair == NULL)
992                                 return -1;
993                         hist_entry__add_pair(pos, pair);
994                 }
995         }
996
997         return 0;
998 }