10 #include "parse-events.h"
25 } fake_mmap_info[] = {
26 { 100, 0x40000, "perf" },
27 { 100, 0x50000, "libc" },
28 { 100, 0xf0000, "[kernel]" },
29 { 200, 0x40000, "perf" },
30 { 200, 0x50000, "libc" },
31 { 200, 0xf0000, "[kernel]" },
32 { 300, 0x40000, "bash" },
33 { 300, 0x50000, "libc" },
34 { 300, 0xf0000, "[kernel]" },
43 static struct fake_sym perf_syms[] = {
45 { 800, 100, "run_command" },
46 { 900, 100, "cmd_record" },
49 static struct fake_sym bash_syms[] = {
51 { 800, 100, "xmalloc" },
52 { 900, 100, "xfree" },
55 static struct fake_sym libc_syms[] = {
56 { 700, 100, "malloc" },
58 { 900, 100, "realloc" },
61 static struct fake_sym kernel_syms[] = {
62 { 700, 100, "schedule" },
63 { 800, 100, "page_fault" },
64 { 900, 100, "sys_perf_event_open" },
69 struct fake_sym *syms;
72 { "perf", perf_syms, ARRAY_SIZE(perf_syms) },
73 { "bash", bash_syms, ARRAY_SIZE(bash_syms) },
74 { "libc", libc_syms, ARRAY_SIZE(libc_syms) },
75 { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) },
78 static struct machine *setup_fake_machine(struct machines *machines)
80 struct machine *machine = machines__find(machines, HOST_KERNEL_ID);
83 if (machine == NULL) {
84 pr_debug("Not enough memory for machine setup\n");
88 for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
89 struct thread *thread;
91 thread = machine__findnew_thread(machine, fake_threads[i].pid,
96 thread__set_comm(thread, fake_threads[i].comm, 0);
99 for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
100 union perf_event fake_mmap_event = {
102 .header = { .misc = PERF_RECORD_MISC_USER, },
103 .pid = fake_mmap_info[i].pid,
104 .tid = fake_mmap_info[i].pid,
105 .start = fake_mmap_info[i].start,
111 strcpy(fake_mmap_event.mmap.filename,
112 fake_mmap_info[i].filename);
114 machine__process_mmap_event(machine, &fake_mmap_event, NULL);
117 for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) {
121 dso = __dsos__findnew(&machine->user_dsos,
122 fake_symbols[i].dso_name);
126 /* emulate dso__load() */
127 dso__set_loaded(dso, MAP__FUNCTION);
129 for (k = 0; k < fake_symbols[i].nr_syms; k++) {
131 struct fake_sym *fsym = &fake_symbols[i].syms[k];
133 sym = symbol__new(fsym->start, fsym->length,
134 STB_GLOBAL, fsym->name);
138 symbols__insert(&dso->symbols[MAP__FUNCTION], sym);
145 pr_debug("Not enough memory for machine setup\n");
146 machine__delete_threads(machine);
147 machine__delete(machine);
154 struct thread *thread;
159 static struct sample fake_common_samples[] = {
160 /* perf [kernel] schedule() */
161 { .pid = 100, .ip = 0xf0000 + 700, },
162 /* perf [perf] main() */
163 { .pid = 200, .ip = 0x40000 + 700, },
164 /* perf [perf] cmd_record() */
165 { .pid = 200, .ip = 0x40000 + 900, },
166 /* bash [bash] xmalloc() */
167 { .pid = 300, .ip = 0x40000 + 800, },
168 /* bash [libc] malloc() */
169 { .pid = 300, .ip = 0x50000 + 700, },
172 static struct sample fake_samples[][5] = {
174 /* perf [perf] run_command() */
175 { .pid = 100, .ip = 0x40000 + 800, },
176 /* perf [libc] malloc() */
177 { .pid = 100, .ip = 0x50000 + 700, },
178 /* perf [kernel] page_fault() */
179 { .pid = 100, .ip = 0xf0000 + 800, },
180 /* perf [kernel] sys_perf_event_open() */
181 { .pid = 200, .ip = 0xf0000 + 900, },
182 /* bash [libc] free() */
183 { .pid = 300, .ip = 0x50000 + 800, },
186 /* perf [libc] free() */
187 { .pid = 200, .ip = 0x50000 + 800, },
188 /* bash [libc] malloc() */
189 { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */
190 /* bash [bash] xfee() */
191 { .pid = 300, .ip = 0x40000 + 900, },
192 /* bash [libc] realloc() */
193 { .pid = 300, .ip = 0x50000 + 900, },
194 /* bash [kernel] page_fault() */
195 { .pid = 300, .ip = 0xf0000 + 800, },
199 static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
201 struct perf_evsel *evsel;
202 struct addr_location al;
203 struct hist_entry *he;
204 struct perf_sample sample = { .cpu = 0, };
208 * each evsel will have 10 samples - 5 common and 5 distinct.
209 * However the second evsel also has a collapsed entry for
210 * "bash [libc] malloc" so total 9 entries will be in the tree.
212 evlist__for_each(evlist, evsel) {
213 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
214 const union perf_event event = {
216 .misc = PERF_RECORD_MISC_USER,
220 sample.pid = fake_common_samples[k].pid;
221 sample.ip = fake_common_samples[k].ip;
222 if (perf_event__preprocess_sample(&event, machine, &al,
226 he = __hists__add_entry(&evsel->hists, &al, NULL,
227 NULL, NULL, 1, 1, 0);
231 fake_common_samples[k].thread = al.thread;
232 fake_common_samples[k].map = al.map;
233 fake_common_samples[k].sym = al.sym;
236 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
237 const union perf_event event = {
239 .misc = PERF_RECORD_MISC_USER,
243 sample.pid = fake_samples[i][k].pid;
244 sample.ip = fake_samples[i][k].ip;
245 if (perf_event__preprocess_sample(&event, machine, &al,
249 he = __hists__add_entry(&evsel->hists, &al, NULL,
250 NULL, NULL, 1, 1, 0);
254 fake_samples[i][k].thread = al.thread;
255 fake_samples[i][k].map = al.map;
256 fake_samples[i][k].sym = al.sym;
264 pr_debug("Not enough memory for adding a hist entry\n");
268 static int find_sample(struct sample *samples, size_t nr_samples,
269 struct thread *t, struct map *m, struct symbol *s)
271 while (nr_samples--) {
272 if (samples->thread == t && samples->map == m &&
280 static int __validate_match(struct hists *hists)
283 struct rb_root *root;
284 struct rb_node *node;
287 * Only entries from fake_common_samples should have a pair.
289 if (sort__need_collapse)
290 root = &hists->entries_collapsed;
292 root = hists->entries_in;
294 node = rb_first(root);
296 struct hist_entry *he;
298 he = rb_entry(node, struct hist_entry, rb_node_in);
300 if (hist_entry__has_pairs(he)) {
301 if (find_sample(fake_common_samples,
302 ARRAY_SIZE(fake_common_samples),
303 he->thread, he->ms.map, he->ms.sym)) {
306 pr_debug("Can't find the matched entry\n");
311 node = rb_next(node);
314 if (count != ARRAY_SIZE(fake_common_samples)) {
315 pr_debug("Invalid count for matched entries: %zd of %zd\n",
316 count, ARRAY_SIZE(fake_common_samples));
323 static int validate_match(struct hists *leader, struct hists *other)
325 return __validate_match(leader) || __validate_match(other);
328 static int __validate_link(struct hists *hists, int idx)
331 size_t count_pair = 0;
332 size_t count_dummy = 0;
333 struct rb_root *root;
334 struct rb_node *node;
337 * Leader hists (idx = 0) will have dummy entries from other,
338 * and some entries will have no pair. However every entry
339 * in other hists should have (dummy) pair.
341 if (sort__need_collapse)
342 root = &hists->entries_collapsed;
344 root = hists->entries_in;
346 node = rb_first(root);
348 struct hist_entry *he;
350 he = rb_entry(node, struct hist_entry, rb_node_in);
352 if (hist_entry__has_pairs(he)) {
353 if (!find_sample(fake_common_samples,
354 ARRAY_SIZE(fake_common_samples),
355 he->thread, he->ms.map, he->ms.sym) &&
356 !find_sample(fake_samples[idx],
357 ARRAY_SIZE(fake_samples[idx]),
358 he->thread, he->ms.map, he->ms.sym)) {
363 pr_debug("A entry from the other hists should have pair\n");
368 node = rb_next(node);
372 * Note that we have a entry collapsed in the other (idx = 1) hists.
375 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
376 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
377 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
380 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
381 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
382 count, count_pair + ARRAY_SIZE(fake_samples[0]));
386 if (count != count_pair) {
387 pr_debug("Invalid count of total other entries: %zd of %zd\n",
391 if (count_dummy > 0) {
392 pr_debug("Other hists should not have dummy entries: %zd\n",
401 static int validate_link(struct hists *leader, struct hists *other)
403 return __validate_link(leader, 0) || __validate_link(other, 1);
406 static void print_hists(struct hists *hists)
409 struct rb_root *root;
410 struct rb_node *node;
412 if (sort__need_collapse)
413 root = &hists->entries_collapsed;
415 root = hists->entries_in;
417 pr_info("----- %s --------\n", __func__);
418 node = rb_first(root);
420 struct hist_entry *he;
422 he = rb_entry(node, struct hist_entry, rb_node_in);
424 pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n",
425 i, thread__comm_str(he->thread), he->ms.map->dso->short_name,
426 he->ms.sym->name, he->stat.period);
429 node = rb_next(node);
433 int test__hists_link(void)
436 struct machines machines;
437 struct machine *machine = NULL;
438 struct perf_evsel *evsel, *first;
439 struct perf_evlist *evlist = perf_evlist__new();
444 err = parse_events(evlist, "cpu-clock");
447 err = parse_events(evlist, "task-clock");
451 /* default sort order (comm,dso,sym) will be used */
452 if (setup_sorting() < 0)
455 machines__init(&machines);
457 /* setup threads/dso/map/symbols also */
458 machine = setup_fake_machine(&machines);
463 machine__fprintf(machine, stderr);
465 /* process sample events */
466 err = add_hist_entries(evlist, machine);
470 evlist__for_each(evlist, evsel) {
471 hists__collapse_resort(&evsel->hists, NULL);
474 print_hists(&evsel->hists);
477 first = perf_evlist__first(evlist);
478 evsel = perf_evlist__last(evlist);
480 /* match common entries */
481 hists__match(&first->hists, &evsel->hists);
482 err = validate_match(&first->hists, &evsel->hists);
486 /* link common and/or dummy entries */
487 hists__link(&first->hists, &evsel->hists);
488 err = validate_link(&first->hists, &evsel->hists);
495 /* tear down everything */
496 perf_evlist__delete(evlist);
497 machines__exit(&machines);