2 #include "util/debug.h"
3 #include "util/symbol.h"
5 #include "util/evsel.h"
6 #include "util/evlist.h"
7 #include "util/machine.h"
8 #include "util/thread.h"
9 #include "util/parse-events.h"
10 #include "tests/tests.h"
11 #include "tests/hists_common.h"
12 #include <linux/kernel.h>
17 struct thread *thread;
22 /* For the numbers, see hists_common.c */
23 static struct sample fake_samples[] = {
24 /* perf [kernel] schedule() */
25 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
26 /* perf [perf] main() */
27 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
28 /* perf [perf] cmd_record() */
29 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
30 /* perf [libc] malloc() */
31 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
32 /* perf [libc] free() */
33 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
34 /* perf [perf] main() */
35 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
36 /* perf [kernel] page_fault() */
37 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
38 /* bash [bash] main() */
39 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
40 /* bash [bash] xmalloc() */
41 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
42 /* bash [kernel] page_fault() */
43 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
47 * Will be casted to struct ip_callchain which has all 64 bit entries
50 static u64 fake_callchains[][10] = {
51 /* schedule => run_command => main */
52 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
54 { 1, FAKE_IP_PERF_MAIN, },
55 /* cmd_record => run_command => main */
56 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
57 /* malloc => cmd_record => run_command => main */
58 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
60 /* free => cmd_record => run_command => main */
61 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
64 { 1, FAKE_IP_PERF_MAIN, },
65 /* page_fault => sys_perf_event_open => run_command => main */
66 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
67 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
69 { 1, FAKE_IP_BASH_MAIN, },
70 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */
71 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
72 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
73 /* page_fault => malloc => main */
74 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
77 static int add_hist_entries(struct hists *hists, struct machine *machine)
79 struct addr_location al;
80 struct perf_evsel *evsel = hists_to_evsel(hists);
81 struct perf_sample sample = { .period = 1000, };
84 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
85 struct hist_entry_iter iter = {
88 .hide_unresolved = false,
91 if (symbol_conf.cumulate_callchain)
92 iter.ops = &hist_iter_cumulative;
94 iter.ops = &hist_iter_normal;
96 sample.cpumode = PERF_RECORD_MISC_USER;
97 sample.pid = fake_samples[i].pid;
98 sample.tid = fake_samples[i].pid;
99 sample.ip = fake_samples[i].ip;
100 sample.callchain = (struct ip_callchain *)fake_callchains[i];
102 if (machine__resolve(machine, &al, &sample) < 0)
105 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
107 addr_location__put(&al);
111 fake_samples[i].thread = al.thread;
112 fake_samples[i].map = al.map;
113 fake_samples[i].sym = al.sym;
119 pr_debug("Not enough memory for adding a hist entry\n");
123 static void del_hist_entries(struct hists *hists)
125 struct hist_entry *he;
126 struct rb_root *root_in;
127 struct rb_root *root_out;
128 struct rb_node *node;
130 if (hists__has(hists, need_collapse))
131 root_in = &hists->entries_collapsed;
133 root_in = hists->entries_in;
135 root_out = &hists->entries;
137 while (!RB_EMPTY_ROOT(root_out)) {
138 node = rb_first(root_out);
140 he = rb_entry(node, struct hist_entry, rb_node);
141 rb_erase(node, root_out);
142 rb_erase(&he->rb_node_in, root_in);
143 hist_entry__delete(he);
147 typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
149 #define COMM(he) (thread__comm_str(he->thread))
150 #define DSO(he) (he->ms.map->dso->short_name)
151 #define SYM(he) (he->ms.sym->name)
152 #define CPU(he) (he->cpu)
153 #define PID(he) (he->thread->tid)
154 #define DEPTH(he) (he->callchain->max_depth)
155 #define CDSO(cl) (cl->ms.map->dso->short_name)
156 #define CSYM(cl) (cl->ms.sym->name)
166 struct callchain_result {
174 static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
175 struct callchain_result *expected_callchain, size_t nr_callchain)
179 struct hist_entry *he;
180 struct rb_root *root;
181 struct rb_node *node;
182 struct callchain_node *cnode;
183 struct callchain_list *clist;
186 * adding and deleting hist entries must be done outside of this
187 * function since TEST_ASSERT_VAL() returns in case of failure.
189 hists__collapse_resort(hists, NULL);
190 perf_evsel__output_resort(hists_to_evsel(hists), NULL);
193 pr_info("use callchain: %d, cumulate callchain: %d\n",
194 symbol_conf.use_callchain,
195 symbol_conf.cumulate_callchain);
196 print_hists_out(hists);
199 root = &hists->entries;
200 for (node = rb_first(root), i = 0;
201 node && (he = rb_entry(node, struct hist_entry, rb_node));
202 node = rb_next(node), i++) {
203 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
205 TEST_ASSERT_VAL("Incorrect number of hist entry",
207 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
208 !strcmp(COMM(he), expected[i].comm) &&
209 !strcmp(DSO(he), expected[i].dso) &&
210 !strcmp(SYM(he), expected[i].sym));
212 if (symbol_conf.cumulate_callchain)
213 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
215 if (!symbol_conf.use_callchain)
218 /* check callchain entries */
219 root = &he->callchain->node.rb_root;
221 TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root));
222 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
225 list_for_each_entry(clist, &cnode->val, list) {
226 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
228 TEST_ASSERT_VAL("Incorrect number of callchain entry",
229 c < expected_callchain[i].nr);
231 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
232 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
235 /* TODO: handle multiple child nodes properly */
236 TEST_ASSERT_VAL("Incorrect number of callchain entry",
237 c <= expected_callchain[i].nr);
239 TEST_ASSERT_VAL("Incorrect number of hist entry",
241 TEST_ASSERT_VAL("Incorrect number of callchain entry",
242 !symbol_conf.use_callchain || nr_expected == nr_callchain);
246 /* NO callchain + NO children */
247 static int test1(struct perf_evsel *evsel, struct machine *machine)
250 struct hists *hists = evsel__hists(evsel);
254 * Overhead Command Shared Object Symbol
255 * ======== ======= ============= ==============
256 * 20.00% perf perf [.] main
257 * 10.00% bash [kernel] [k] page_fault
258 * 10.00% bash bash [.] main
259 * 10.00% bash bash [.] xmalloc
260 * 10.00% perf [kernel] [k] page_fault
261 * 10.00% perf [kernel] [k] schedule
262 * 10.00% perf libc [.] free
263 * 10.00% perf libc [.] malloc
264 * 10.00% perf perf [.] cmd_record
266 struct result expected[] = {
267 { 0, 2000, "perf", "perf", "main" },
268 { 0, 1000, "bash", "[kernel]", "page_fault" },
269 { 0, 1000, "bash", "bash", "main" },
270 { 0, 1000, "bash", "bash", "xmalloc" },
271 { 0, 1000, "perf", "[kernel]", "page_fault" },
272 { 0, 1000, "perf", "[kernel]", "schedule" },
273 { 0, 1000, "perf", "libc", "free" },
274 { 0, 1000, "perf", "libc", "malloc" },
275 { 0, 1000, "perf", "perf", "cmd_record" },
278 symbol_conf.use_callchain = false;
279 symbol_conf.cumulate_callchain = false;
280 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
283 callchain_register_param(&callchain_param);
285 err = add_hist_entries(hists, machine);
289 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
292 del_hist_entries(hists);
293 reset_output_field();
297 /* callcain + NO children */
298 static int test2(struct perf_evsel *evsel, struct machine *machine)
301 struct hists *hists = evsel__hists(evsel);
305 * Overhead Command Shared Object Symbol
306 * ======== ======= ============= ==============
307 * 20.00% perf perf [.] main
311 * 10.00% bash [kernel] [k] page_fault
317 * 10.00% bash bash [.] main
321 * 10.00% bash bash [.] xmalloc
325 * xmalloc <--- NOTE: there's a cycle
330 * 10.00% perf [kernel] [k] page_fault
333 * sys_perf_event_open
337 * 10.00% perf [kernel] [k] schedule
343 * 10.00% perf libc [.] free
350 * 10.00% perf libc [.] malloc
357 * 10.00% perf perf [.] cmd_record
364 struct result expected[] = {
365 { 0, 2000, "perf", "perf", "main" },
366 { 0, 1000, "bash", "[kernel]", "page_fault" },
367 { 0, 1000, "bash", "bash", "main" },
368 { 0, 1000, "bash", "bash", "xmalloc" },
369 { 0, 1000, "perf", "[kernel]", "page_fault" },
370 { 0, 1000, "perf", "[kernel]", "schedule" },
371 { 0, 1000, "perf", "libc", "free" },
372 { 0, 1000, "perf", "libc", "malloc" },
373 { 0, 1000, "perf", "perf", "cmd_record" },
375 struct callchain_result expected_callchain[] = {
377 1, { { "perf", "main" }, },
380 3, { { "[kernel]", "page_fault" },
381 { "libc", "malloc" },
382 { "bash", "main" }, },
385 1, { { "bash", "main" }, },
388 6, { { "bash", "xmalloc" },
389 { "libc", "malloc" },
390 { "bash", "xmalloc" },
391 { "libc", "malloc" },
392 { "bash", "xmalloc" },
393 { "bash", "main" }, },
396 4, { { "[kernel]", "page_fault" },
397 { "[kernel]", "sys_perf_event_open" },
398 { "perf", "run_command" },
399 { "perf", "main" }, },
402 3, { { "[kernel]", "schedule" },
403 { "perf", "run_command" },
404 { "perf", "main" }, },
407 4, { { "libc", "free" },
408 { "perf", "cmd_record" },
409 { "perf", "run_command" },
410 { "perf", "main" }, },
413 4, { { "libc", "malloc" },
414 { "perf", "cmd_record" },
415 { "perf", "run_command" },
416 { "perf", "main" }, },
419 3, { { "perf", "cmd_record" },
420 { "perf", "run_command" },
421 { "perf", "main" }, },
425 symbol_conf.use_callchain = true;
426 symbol_conf.cumulate_callchain = false;
427 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
430 callchain_register_param(&callchain_param);
432 err = add_hist_entries(hists, machine);
436 err = do_test(hists, expected, ARRAY_SIZE(expected),
437 expected_callchain, ARRAY_SIZE(expected_callchain));
440 del_hist_entries(hists);
441 reset_output_field();
445 /* NO callchain + children */
446 static int test3(struct perf_evsel *evsel, struct machine *machine)
449 struct hists *hists = evsel__hists(evsel);
453 * Children Self Command Shared Object Symbol
454 * ======== ======== ======= ============= =======================
455 * 70.00% 20.00% perf perf [.] main
456 * 50.00% 0.00% perf perf [.] run_command
457 * 30.00% 10.00% bash bash [.] main
458 * 30.00% 10.00% perf perf [.] cmd_record
459 * 20.00% 0.00% bash libc [.] malloc
460 * 10.00% 10.00% bash [kernel] [k] page_fault
461 * 10.00% 10.00% bash bash [.] xmalloc
462 * 10.00% 10.00% perf [kernel] [k] page_fault
463 * 10.00% 10.00% perf libc [.] malloc
464 * 10.00% 10.00% perf [kernel] [k] schedule
465 * 10.00% 10.00% perf libc [.] free
466 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
468 struct result expected[] = {
469 { 7000, 2000, "perf", "perf", "main" },
470 { 5000, 0, "perf", "perf", "run_command" },
471 { 3000, 1000, "bash", "bash", "main" },
472 { 3000, 1000, "perf", "perf", "cmd_record" },
473 { 2000, 0, "bash", "libc", "malloc" },
474 { 1000, 1000, "bash", "[kernel]", "page_fault" },
475 { 1000, 1000, "bash", "bash", "xmalloc" },
476 { 1000, 1000, "perf", "[kernel]", "page_fault" },
477 { 1000, 1000, "perf", "[kernel]", "schedule" },
478 { 1000, 1000, "perf", "libc", "free" },
479 { 1000, 1000, "perf", "libc", "malloc" },
480 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
483 symbol_conf.use_callchain = false;
484 symbol_conf.cumulate_callchain = true;
485 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
488 callchain_register_param(&callchain_param);
490 err = add_hist_entries(hists, machine);
494 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
497 del_hist_entries(hists);
498 reset_output_field();
502 /* callchain + children */
503 static int test4(struct perf_evsel *evsel, struct machine *machine)
506 struct hists *hists = evsel__hists(evsel);
510 * Children Self Command Shared Object Symbol
511 * ======== ======== ======= ============= =======================
512 * 70.00% 20.00% perf perf [.] main
516 * 50.00% 0.00% perf perf [.] run_command
521 * 30.00% 10.00% bash bash [.] main
525 * 30.00% 10.00% perf perf [.] cmd_record
531 * 20.00% 0.00% bash libc [.] malloc
535 * |--50.00%-- xmalloc
539 * 10.00% 10.00% bash [kernel] [k] page_fault
545 * 10.00% 10.00% bash bash [.] xmalloc
549 * xmalloc <--- NOTE: there's a cycle
554 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
556 * --- sys_perf_event_open
560 * 10.00% 10.00% perf [kernel] [k] page_fault
563 * sys_perf_event_open
567 * 10.00% 10.00% perf [kernel] [k] schedule
573 * 10.00% 10.00% perf libc [.] free
580 * 10.00% 10.00% perf libc [.] malloc
588 struct result expected[] = {
589 { 7000, 2000, "perf", "perf", "main" },
590 { 5000, 0, "perf", "perf", "run_command" },
591 { 3000, 1000, "bash", "bash", "main" },
592 { 3000, 1000, "perf", "perf", "cmd_record" },
593 { 2000, 0, "bash", "libc", "malloc" },
594 { 1000, 1000, "bash", "[kernel]", "page_fault" },
595 { 1000, 1000, "bash", "bash", "xmalloc" },
596 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
597 { 1000, 1000, "perf", "[kernel]", "page_fault" },
598 { 1000, 1000, "perf", "[kernel]", "schedule" },
599 { 1000, 1000, "perf", "libc", "free" },
600 { 1000, 1000, "perf", "libc", "malloc" },
602 struct callchain_result expected_callchain[] = {
604 1, { { "perf", "main" }, },
607 2, { { "perf", "run_command" },
608 { "perf", "main" }, },
611 1, { { "bash", "main" }, },
614 3, { { "perf", "cmd_record" },
615 { "perf", "run_command" },
616 { "perf", "main" }, },
619 4, { { "libc", "malloc" },
620 { "bash", "xmalloc" },
622 { "bash", "main" }, },
625 3, { { "[kernel]", "page_fault" },
626 { "libc", "malloc" },
627 { "bash", "main" }, },
630 6, { { "bash", "xmalloc" },
631 { "libc", "malloc" },
632 { "bash", "xmalloc" },
633 { "libc", "malloc" },
634 { "bash", "xmalloc" },
635 { "bash", "main" }, },
638 3, { { "[kernel]", "sys_perf_event_open" },
639 { "perf", "run_command" },
640 { "perf", "main" }, },
643 4, { { "[kernel]", "page_fault" },
644 { "[kernel]", "sys_perf_event_open" },
645 { "perf", "run_command" },
646 { "perf", "main" }, },
649 3, { { "[kernel]", "schedule" },
650 { "perf", "run_command" },
651 { "perf", "main" }, },
654 4, { { "libc", "free" },
655 { "perf", "cmd_record" },
656 { "perf", "run_command" },
657 { "perf", "main" }, },
660 4, { { "libc", "malloc" },
661 { "perf", "cmd_record" },
662 { "perf", "run_command" },
663 { "perf", "main" }, },
667 symbol_conf.use_callchain = true;
668 symbol_conf.cumulate_callchain = true;
669 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
673 callchain_param = callchain_param_default;
674 callchain_register_param(&callchain_param);
676 err = add_hist_entries(hists, machine);
680 err = do_test(hists, expected, ARRAY_SIZE(expected),
681 expected_callchain, ARRAY_SIZE(expected_callchain));
684 del_hist_entries(hists);
685 reset_output_field();
689 int test__hists_cumulate(int subtest __maybe_unused)
692 struct machines machines;
693 struct machine *machine;
694 struct perf_evsel *evsel;
695 struct perf_evlist *evlist = perf_evlist__new();
697 test_fn_t testcases[] = {
704 TEST_ASSERT_VAL("No memory", evlist);
706 err = parse_events(evlist, "cpu-clock", NULL);
711 machines__init(&machines);
713 /* setup threads/dso/map/symbols also */
714 machine = setup_fake_machine(&machines);
719 machine__fprintf(machine, stderr);
721 evsel = perf_evlist__first(evlist);
723 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
724 err = testcases[i](evsel, machine);
730 /* tear down everything */
731 perf_evlist__delete(evlist);
732 machines__exit(&machines);