]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/hist.c
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[karo-tx-linux.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evsel.h"
7 #include <math.h>
8
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10                                        struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12                                           struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14                                           struct hist_entry *he);
15
16 struct callchain_param  callchain_param = {
17         .mode   = CHAIN_GRAPH_REL,
18         .min_percent = 0.5,
19         .order  = ORDER_CALLEE,
20         .key    = CCKEY_FUNCTION
21 };
22
23 u16 hists__col_len(struct hists *hists, enum hist_column col)
24 {
25         return hists->col_len[col];
26 }
27
28 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
29 {
30         hists->col_len[col] = len;
31 }
32
33 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
34 {
35         if (len > hists__col_len(hists, col)) {
36                 hists__set_col_len(hists, col, len);
37                 return true;
38         }
39         return false;
40 }
41
42 void hists__reset_col_len(struct hists *hists)
43 {
44         enum hist_column col;
45
46         for (col = 0; col < HISTC_NR_COLS; ++col)
47                 hists__set_col_len(hists, col, 0);
48 }
49
50 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
51 {
52         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
53
54         if (hists__col_len(hists, dso) < unresolved_col_width &&
55             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
56             !symbol_conf.dso_list)
57                 hists__set_col_len(hists, dso, unresolved_col_width);
58 }
59
60 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
61 {
62         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
63         int symlen;
64         u16 len;
65
66         /*
67          * +4 accounts for '[x] ' priv level info
68          * +2 accounts for 0x prefix on raw addresses
69          * +3 accounts for ' y ' symtab origin info
70          */
71         if (h->ms.sym) {
72                 symlen = h->ms.sym->namelen + 4;
73                 if (verbose)
74                         symlen += BITS_PER_LONG / 4 + 2 + 3;
75                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
76         } else {
77                 symlen = unresolved_col_width + 4 + 2;
78                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
79                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
80         }
81
82         len = thread__comm_len(h->thread);
83         if (hists__new_col_len(hists, HISTC_COMM, len))
84                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
85
86         if (h->ms.map) {
87                 len = dso__name_len(h->ms.map->dso);
88                 hists__new_col_len(hists, HISTC_DSO, len);
89         }
90
91         if (h->parent)
92                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
93
94         if (h->branch_info) {
95                 if (h->branch_info->from.sym) {
96                         symlen = (int)h->branch_info->from.sym->namelen + 4;
97                         if (verbose)
98                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
99                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100
101                         symlen = dso__name_len(h->branch_info->from.map->dso);
102                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
103                 } else {
104                         symlen = unresolved_col_width + 4 + 2;
105                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
106                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
107                 }
108
109                 if (h->branch_info->to.sym) {
110                         symlen = (int)h->branch_info->to.sym->namelen + 4;
111                         if (verbose)
112                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
113                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
114
115                         symlen = dso__name_len(h->branch_info->to.map->dso);
116                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
117                 } else {
118                         symlen = unresolved_col_width + 4 + 2;
119                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
120                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
121                 }
122         }
123
124         if (h->mem_info) {
125                 if (h->mem_info->daddr.sym) {
126                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
127                                + unresolved_col_width + 2;
128                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
129                                            symlen);
130                 } else {
131                         symlen = unresolved_col_width + 4 + 2;
132                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133                                            symlen);
134                 }
135                 if (h->mem_info->daddr.map) {
136                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
137                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
138                                            symlen);
139                 } else {
140                         symlen = unresolved_col_width + 4 + 2;
141                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
142                 }
143         } else {
144                 symlen = unresolved_col_width + 4 + 2;
145                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
146                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
147         }
148
149         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
150         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
151         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
152         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
153         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
154         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
155
156         if (h->transaction)
157                 hists__new_col_len(hists, HISTC_TRANSACTION,
158                                    hist_entry__transaction_len());
159 }
160
161 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
162 {
163         struct rb_node *next = rb_first(&hists->entries);
164         struct hist_entry *n;
165         int row = 0;
166
167         hists__reset_col_len(hists);
168
169         while (next && row++ < max_rows) {
170                 n = rb_entry(next, struct hist_entry, rb_node);
171                 if (!n->filtered)
172                         hists__calc_col_len(hists, n);
173                 next = rb_next(&n->rb_node);
174         }
175 }
176
177 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
178                                         unsigned int cpumode, u64 period)
179 {
180         switch (cpumode) {
181         case PERF_RECORD_MISC_KERNEL:
182                 he_stat->period_sys += period;
183                 break;
184         case PERF_RECORD_MISC_USER:
185                 he_stat->period_us += period;
186                 break;
187         case PERF_RECORD_MISC_GUEST_KERNEL:
188                 he_stat->period_guest_sys += period;
189                 break;
190         case PERF_RECORD_MISC_GUEST_USER:
191                 he_stat->period_guest_us += period;
192                 break;
193         default:
194                 break;
195         }
196 }
197
198 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
199                                 u64 weight)
200 {
201
202         he_stat->period         += period;
203         he_stat->weight         += weight;
204         he_stat->nr_events      += 1;
205 }
206
207 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
208 {
209         dest->period            += src->period;
210         dest->period_sys        += src->period_sys;
211         dest->period_us         += src->period_us;
212         dest->period_guest_sys  += src->period_guest_sys;
213         dest->period_guest_us   += src->period_guest_us;
214         dest->nr_events         += src->nr_events;
215         dest->weight            += src->weight;
216 }
217
218 static void he_stat__decay(struct he_stat *he_stat)
219 {
220         he_stat->period = (he_stat->period * 7) / 8;
221         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
222         /* XXX need decay for weight too? */
223 }
224
225 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
226 {
227         u64 prev_period = he->stat.period;
228
229         if (prev_period == 0)
230                 return true;
231
232         he_stat__decay(&he->stat);
233
234         if (!he->filtered)
235                 hists->stats.total_period -= prev_period - he->stat.period;
236
237         return he->stat.period == 0;
238 }
239
240 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
241 {
242         struct rb_node *next = rb_first(&hists->entries);
243         struct hist_entry *n;
244
245         while (next) {
246                 n = rb_entry(next, struct hist_entry, rb_node);
247                 next = rb_next(&n->rb_node);
248                 /*
249                  * We may be annotating this, for instance, so keep it here in
250                  * case some it gets new samples, we'll eventually free it when
251                  * the user stops browsing and it agains gets fully decayed.
252                  */
253                 if (((zap_user && n->level == '.') ||
254                      (zap_kernel && n->level != '.') ||
255                      hists__decay_entry(hists, n)) &&
256                     !n->used) {
257                         rb_erase(&n->rb_node, &hists->entries);
258
259                         if (sort__need_collapse)
260                                 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
261
262                         hist_entry__free(n);
263                         --hists->nr_entries;
264                 }
265         }
266 }
267
268 /*
269  * histogram, sorted on item, collects periods
270  */
271
272 static struct hist_entry *hist_entry__new(struct hist_entry *template)
273 {
274         size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
275         struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
276
277         if (he != NULL) {
278                 *he = *template;
279
280                 if (he->ms.map)
281                         he->ms.map->referenced = true;
282
283                 if (he->branch_info) {
284                         /*
285                          * This branch info is (a part of) allocated from
286                          * sample__resolve_bstack() and will be freed after
287                          * adding new entries.  So we need to save a copy.
288                          */
289                         he->branch_info = malloc(sizeof(*he->branch_info));
290                         if (he->branch_info == NULL) {
291                                 free(he);
292                                 return NULL;
293                         }
294
295                         memcpy(he->branch_info, template->branch_info,
296                                sizeof(*he->branch_info));
297
298                         if (he->branch_info->from.map)
299                                 he->branch_info->from.map->referenced = true;
300                         if (he->branch_info->to.map)
301                                 he->branch_info->to.map->referenced = true;
302                 }
303
304                 if (he->mem_info) {
305                         if (he->mem_info->iaddr.map)
306                                 he->mem_info->iaddr.map->referenced = true;
307                         if (he->mem_info->daddr.map)
308                                 he->mem_info->daddr.map->referenced = true;
309                 }
310
311                 if (symbol_conf.use_callchain)
312                         callchain_init(he->callchain);
313
314                 INIT_LIST_HEAD(&he->pairs.node);
315         }
316
317         return he;
318 }
319
320 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
321 {
322         if (!h->filtered) {
323                 hists__calc_col_len(hists, h);
324                 ++hists->nr_entries;
325                 hists->stats.total_period += h->stat.period;
326         }
327 }
328
329 static u8 symbol__parent_filter(const struct symbol *parent)
330 {
331         if (symbol_conf.exclude_other && parent == NULL)
332                 return 1 << HIST_FILTER__PARENT;
333         return 0;
334 }
335
336 static struct hist_entry *add_hist_entry(struct hists *hists,
337                                          struct hist_entry *entry,
338                                          struct addr_location *al)
339 {
340         struct rb_node **p;
341         struct rb_node *parent = NULL;
342         struct hist_entry *he;
343         int64_t cmp;
344         u64 period = entry->stat.period;
345         u64 weight = entry->stat.weight;
346
347         p = &hists->entries_in->rb_node;
348
349         while (*p != NULL) {
350                 parent = *p;
351                 he = rb_entry(parent, struct hist_entry, rb_node_in);
352
353                 /*
354                  * Make sure that it receives arguments in a same order as
355                  * hist_entry__collapse() so that we can use an appropriate
356                  * function when searching an entry regardless which sort
357                  * keys were used.
358                  */
359                 cmp = hist_entry__cmp(he, entry);
360
361                 if (!cmp) {
362                         he_stat__add_period(&he->stat, period, weight);
363
364                         /*
365                          * This mem info was allocated from sample__resolve_mem
366                          * and will not be used anymore.
367                          */
368                         zfree(&entry->mem_info);
369
370                         /* If the map of an existing hist_entry has
371                          * become out-of-date due to an exec() or
372                          * similar, update it.  Otherwise we will
373                          * mis-adjust symbol addresses when computing
374                          * the history counter to increment.
375                          */
376                         if (he->ms.map != entry->ms.map) {
377                                 he->ms.map = entry->ms.map;
378                                 if (he->ms.map)
379                                         he->ms.map->referenced = true;
380                         }
381                         goto out;
382                 }
383
384                 if (cmp < 0)
385                         p = &(*p)->rb_left;
386                 else
387                         p = &(*p)->rb_right;
388         }
389
390         he = hist_entry__new(entry);
391         if (!he)
392                 return NULL;
393
394         hists->nr_entries++;
395         rb_link_node(&he->rb_node_in, parent, p);
396         rb_insert_color(&he->rb_node_in, hists->entries_in);
397 out:
398         he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
399         return he;
400 }
401
402 struct hist_entry *__hists__add_entry(struct hists *hists,
403                                       struct addr_location *al,
404                                       struct symbol *sym_parent,
405                                       struct branch_info *bi,
406                                       struct mem_info *mi,
407                                       u64 period, u64 weight, u64 transaction)
408 {
409         struct hist_entry entry = {
410                 .thread = al->thread,
411                 .comm = thread__comm(al->thread),
412                 .ms = {
413                         .map    = al->map,
414                         .sym    = al->sym,
415                 },
416                 .cpu    = al->cpu,
417                 .ip     = al->addr,
418                 .level  = al->level,
419                 .stat = {
420                         .nr_events = 1,
421                         .period = period,
422                         .weight = weight,
423                 },
424                 .parent = sym_parent,
425                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
426                 .hists  = hists,
427                 .branch_info = bi,
428                 .mem_info = mi,
429                 .transaction = transaction,
430         };
431
432         return add_hist_entry(hists, &entry, al);
433 }
434
435 int64_t
436 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
437 {
438         struct sort_entry *se;
439         int64_t cmp = 0;
440
441         list_for_each_entry(se, &hist_entry__sort_list, list) {
442                 cmp = se->se_cmp(left, right);
443                 if (cmp)
444                         break;
445         }
446
447         return cmp;
448 }
449
450 int64_t
451 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
452 {
453         struct sort_entry *se;
454         int64_t cmp = 0;
455
456         list_for_each_entry(se, &hist_entry__sort_list, list) {
457                 int64_t (*f)(struct hist_entry *, struct hist_entry *);
458
459                 f = se->se_collapse ?: se->se_cmp;
460
461                 cmp = f(left, right);
462                 if (cmp)
463                         break;
464         }
465
466         return cmp;
467 }
468
469 void hist_entry__free(struct hist_entry *he)
470 {
471         zfree(&he->branch_info);
472         zfree(&he->mem_info);
473         free_srcline(he->srcline);
474         free(he);
475 }
476
477 /*
478  * collapse the histogram
479  */
480
481 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
482                                          struct rb_root *root,
483                                          struct hist_entry *he)
484 {
485         struct rb_node **p = &root->rb_node;
486         struct rb_node *parent = NULL;
487         struct hist_entry *iter;
488         int64_t cmp;
489
490         while (*p != NULL) {
491                 parent = *p;
492                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
493
494                 cmp = hist_entry__collapse(iter, he);
495
496                 if (!cmp) {
497                         he_stat__add_stat(&iter->stat, &he->stat);
498
499                         if (symbol_conf.use_callchain) {
500                                 callchain_cursor_reset(&callchain_cursor);
501                                 callchain_merge(&callchain_cursor,
502                                                 iter->callchain,
503                                                 he->callchain);
504                         }
505                         hist_entry__free(he);
506                         return false;
507                 }
508
509                 if (cmp < 0)
510                         p = &(*p)->rb_left;
511                 else
512                         p = &(*p)->rb_right;
513         }
514
515         rb_link_node(&he->rb_node_in, parent, p);
516         rb_insert_color(&he->rb_node_in, root);
517         return true;
518 }
519
520 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
521 {
522         struct rb_root *root;
523
524         pthread_mutex_lock(&hists->lock);
525
526         root = hists->entries_in;
527         if (++hists->entries_in > &hists->entries_in_array[1])
528                 hists->entries_in = &hists->entries_in_array[0];
529
530         pthread_mutex_unlock(&hists->lock);
531
532         return root;
533 }
534
535 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
536 {
537         hists__filter_entry_by_dso(hists, he);
538         hists__filter_entry_by_thread(hists, he);
539         hists__filter_entry_by_symbol(hists, he);
540 }
541
542 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
543 {
544         struct rb_root *root;
545         struct rb_node *next;
546         struct hist_entry *n;
547
548         if (!sort__need_collapse)
549                 return;
550
551         root = hists__get_rotate_entries_in(hists);
552         next = rb_first(root);
553
554         while (next) {
555                 if (session_done())
556                         break;
557                 n = rb_entry(next, struct hist_entry, rb_node_in);
558                 next = rb_next(&n->rb_node_in);
559
560                 rb_erase(&n->rb_node_in, root);
561                 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
562                         /*
563                          * If it wasn't combined with one of the entries already
564                          * collapsed, we need to apply the filters that may have
565                          * been set by, say, the hist_browser.
566                          */
567                         hists__apply_filters(hists, n);
568                 }
569                 if (prog)
570                         ui_progress__update(prog, 1);
571         }
572 }
573
574 /*
575  * reverse the map, sort on period.
576  */
577
578 static int period_cmp(u64 period_a, u64 period_b)
579 {
580         if (period_a > period_b)
581                 return 1;
582         if (period_a < period_b)
583                 return -1;
584         return 0;
585 }
586
587 static int hist_entry__sort_on_period(struct hist_entry *a,
588                                       struct hist_entry *b)
589 {
590         int ret;
591         int i, nr_members;
592         struct perf_evsel *evsel;
593         struct hist_entry *pair;
594         u64 *periods_a, *periods_b;
595
596         ret = period_cmp(a->stat.period, b->stat.period);
597         if (ret || !symbol_conf.event_group)
598                 return ret;
599
600         evsel = hists_to_evsel(a->hists);
601         nr_members = evsel->nr_members;
602         if (nr_members <= 1)
603                 return ret;
604
605         periods_a = zalloc(sizeof(periods_a) * nr_members);
606         periods_b = zalloc(sizeof(periods_b) * nr_members);
607
608         if (!periods_a || !periods_b)
609                 goto out;
610
611         list_for_each_entry(pair, &a->pairs.head, pairs.node) {
612                 evsel = hists_to_evsel(pair->hists);
613                 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
614         }
615
616         list_for_each_entry(pair, &b->pairs.head, pairs.node) {
617                 evsel = hists_to_evsel(pair->hists);
618                 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
619         }
620
621         for (i = 1; i < nr_members; i++) {
622                 ret = period_cmp(periods_a[i], periods_b[i]);
623                 if (ret)
624                         break;
625         }
626
627 out:
628         free(periods_a);
629         free(periods_b);
630
631         return ret;
632 }
633
634 static void __hists__insert_output_entry(struct rb_root *entries,
635                                          struct hist_entry *he,
636                                          u64 min_callchain_hits)
637 {
638         struct rb_node **p = &entries->rb_node;
639         struct rb_node *parent = NULL;
640         struct hist_entry *iter;
641
642         if (symbol_conf.use_callchain)
643                 callchain_param.sort(&he->sorted_chain, he->callchain,
644                                       min_callchain_hits, &callchain_param);
645
646         while (*p != NULL) {
647                 parent = *p;
648                 iter = rb_entry(parent, struct hist_entry, rb_node);
649
650                 if (hist_entry__sort_on_period(he, iter) > 0)
651                         p = &(*p)->rb_left;
652                 else
653                         p = &(*p)->rb_right;
654         }
655
656         rb_link_node(&he->rb_node, parent, p);
657         rb_insert_color(&he->rb_node, entries);
658 }
659
660 void hists__output_resort(struct hists *hists)
661 {
662         struct rb_root *root;
663         struct rb_node *next;
664         struct hist_entry *n;
665         u64 min_callchain_hits;
666
667         min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
668
669         if (sort__need_collapse)
670                 root = &hists->entries_collapsed;
671         else
672                 root = hists->entries_in;
673
674         next = rb_first(root);
675         hists->entries = RB_ROOT;
676
677         hists->nr_entries = 0;
678         hists->stats.total_period = 0;
679         hists__reset_col_len(hists);
680
681         while (next) {
682                 n = rb_entry(next, struct hist_entry, rb_node_in);
683                 next = rb_next(&n->rb_node_in);
684
685                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
686                 hists__inc_nr_entries(hists, n);
687         }
688 }
689
690 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
691                                        enum hist_filter filter)
692 {
693         h->filtered &= ~(1 << filter);
694         if (h->filtered)
695                 return;
696
697         ++hists->nr_entries;
698         if (h->ms.unfolded)
699                 hists->nr_entries += h->nr_rows;
700         h->row_offset = 0;
701         hists->stats.total_period += h->stat.period;
702         hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
703
704         hists__calc_col_len(hists, h);
705 }
706
707
708 static bool hists__filter_entry_by_dso(struct hists *hists,
709                                        struct hist_entry *he)
710 {
711         if (hists->dso_filter != NULL &&
712             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
713                 he->filtered |= (1 << HIST_FILTER__DSO);
714                 return true;
715         }
716
717         return false;
718 }
719
720 void hists__filter_by_dso(struct hists *hists)
721 {
722         struct rb_node *nd;
723
724         hists->nr_entries = hists->stats.total_period = 0;
725         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
726         hists__reset_col_len(hists);
727
728         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
729                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
730
731                 if (symbol_conf.exclude_other && !h->parent)
732                         continue;
733
734                 if (hists__filter_entry_by_dso(hists, h))
735                         continue;
736
737                 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
738         }
739 }
740
741 static bool hists__filter_entry_by_thread(struct hists *hists,
742                                           struct hist_entry *he)
743 {
744         if (hists->thread_filter != NULL &&
745             he->thread != hists->thread_filter) {
746                 he->filtered |= (1 << HIST_FILTER__THREAD);
747                 return true;
748         }
749
750         return false;
751 }
752
753 void hists__filter_by_thread(struct hists *hists)
754 {
755         struct rb_node *nd;
756
757         hists->nr_entries = hists->stats.total_period = 0;
758         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
759         hists__reset_col_len(hists);
760
761         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
762                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
763
764                 if (hists__filter_entry_by_thread(hists, h))
765                         continue;
766
767                 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
768         }
769 }
770
771 static bool hists__filter_entry_by_symbol(struct hists *hists,
772                                           struct hist_entry *he)
773 {
774         if (hists->symbol_filter_str != NULL &&
775             (!he->ms.sym || strstr(he->ms.sym->name,
776                                    hists->symbol_filter_str) == NULL)) {
777                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
778                 return true;
779         }
780
781         return false;
782 }
783
784 void hists__filter_by_symbol(struct hists *hists)
785 {
786         struct rb_node *nd;
787
788         hists->nr_entries = hists->stats.total_period = 0;
789         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
790         hists__reset_col_len(hists);
791
792         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
793                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
794
795                 if (hists__filter_entry_by_symbol(hists, h))
796                         continue;
797
798                 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
799         }
800 }
801
802 void events_stats__inc(struct events_stats *stats, u32 type)
803 {
804         ++stats->nr_events[0];
805         ++stats->nr_events[type];
806 }
807
808 void hists__inc_nr_events(struct hists *hists, u32 type)
809 {
810         events_stats__inc(&hists->stats, type);
811 }
812
813 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
814                                                  struct hist_entry *pair)
815 {
816         struct rb_root *root;
817         struct rb_node **p;
818         struct rb_node *parent = NULL;
819         struct hist_entry *he;
820         int64_t cmp;
821
822         if (sort__need_collapse)
823                 root = &hists->entries_collapsed;
824         else
825                 root = hists->entries_in;
826
827         p = &root->rb_node;
828
829         while (*p != NULL) {
830                 parent = *p;
831                 he = rb_entry(parent, struct hist_entry, rb_node_in);
832
833                 cmp = hist_entry__collapse(he, pair);
834
835                 if (!cmp)
836                         goto out;
837
838                 if (cmp < 0)
839                         p = &(*p)->rb_left;
840                 else
841                         p = &(*p)->rb_right;
842         }
843
844         he = hist_entry__new(pair);
845         if (he) {
846                 memset(&he->stat, 0, sizeof(he->stat));
847                 he->hists = hists;
848                 rb_link_node(&he->rb_node_in, parent, p);
849                 rb_insert_color(&he->rb_node_in, root);
850                 hists__inc_nr_entries(hists, he);
851                 he->dummy = true;
852         }
853 out:
854         return he;
855 }
856
857 static struct hist_entry *hists__find_entry(struct hists *hists,
858                                             struct hist_entry *he)
859 {
860         struct rb_node *n;
861
862         if (sort__need_collapse)
863                 n = hists->entries_collapsed.rb_node;
864         else
865                 n = hists->entries_in->rb_node;
866
867         while (n) {
868                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
869                 int64_t cmp = hist_entry__collapse(iter, he);
870
871                 if (cmp < 0)
872                         n = n->rb_left;
873                 else if (cmp > 0)
874                         n = n->rb_right;
875                 else
876                         return iter;
877         }
878
879         return NULL;
880 }
881
882 /*
883  * Look for pairs to link to the leader buckets (hist_entries):
884  */
885 void hists__match(struct hists *leader, struct hists *other)
886 {
887         struct rb_root *root;
888         struct rb_node *nd;
889         struct hist_entry *pos, *pair;
890
891         if (sort__need_collapse)
892                 root = &leader->entries_collapsed;
893         else
894                 root = leader->entries_in;
895
896         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
897                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
898                 pair = hists__find_entry(other, pos);
899
900                 if (pair)
901                         hist_entry__add_pair(pair, pos);
902         }
903 }
904
905 /*
906  * Look for entries in the other hists that are not present in the leader, if
907  * we find them, just add a dummy entry on the leader hists, with period=0,
908  * nr_events=0, to serve as the list header.
909  */
910 int hists__link(struct hists *leader, struct hists *other)
911 {
912         struct rb_root *root;
913         struct rb_node *nd;
914         struct hist_entry *pos, *pair;
915
916         if (sort__need_collapse)
917                 root = &other->entries_collapsed;
918         else
919                 root = other->entries_in;
920
921         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
922                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
923
924                 if (!hist_entry__has_pairs(pos)) {
925                         pair = hists__add_dummy_entry(leader, pos);
926                         if (pair == NULL)
927                                 return -1;
928                         hist_entry__add_pair(pos, pair);
929                 }
930         }
931
932         return 0;
933 }