]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/event.c
arm: imx6: defconfig: update tx6 defconfigs
[karo-tx-linux.git] / tools / perf / util / event.c
1 #include <linux/types.h>
2 #include "event.h"
3 #include "debug.h"
4 #include "machine.h"
5 #include "sort.h"
6 #include "string.h"
7 #include "strlist.h"
8 #include "thread.h"
9 #include "thread_map.h"
10
11 static const char *perf_event__names[] = {
12         [0]                                     = "TOTAL",
13         [PERF_RECORD_MMAP]                      = "MMAP",
14         [PERF_RECORD_MMAP2]                     = "MMAP2",
15         [PERF_RECORD_LOST]                      = "LOST",
16         [PERF_RECORD_COMM]                      = "COMM",
17         [PERF_RECORD_EXIT]                      = "EXIT",
18         [PERF_RECORD_THROTTLE]                  = "THROTTLE",
19         [PERF_RECORD_UNTHROTTLE]                = "UNTHROTTLE",
20         [PERF_RECORD_FORK]                      = "FORK",
21         [PERF_RECORD_READ]                      = "READ",
22         [PERF_RECORD_SAMPLE]                    = "SAMPLE",
23         [PERF_RECORD_HEADER_ATTR]               = "ATTR",
24         [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
25         [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
26         [PERF_RECORD_HEADER_BUILD_ID]           = "BUILD_ID",
27         [PERF_RECORD_FINISHED_ROUND]            = "FINISHED_ROUND",
28 };
29
30 const char *perf_event__name(unsigned int id)
31 {
32         if (id >= ARRAY_SIZE(perf_event__names))
33                 return "INVALID";
34         if (!perf_event__names[id])
35                 return "UNKNOWN";
36         return perf_event__names[id];
37 }
38
39 static struct perf_sample synth_sample = {
40         .pid       = -1,
41         .tid       = -1,
42         .time      = -1,
43         .stream_id = -1,
44         .cpu       = -1,
45         .period    = 1,
46 };
47
48 static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
49 {
50         char filename[PATH_MAX];
51         char bf[BUFSIZ];
52         FILE *fp;
53         size_t size = 0;
54         pid_t tgid = -1;
55
56         snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
57
58         fp = fopen(filename, "r");
59         if (fp == NULL) {
60                 pr_debug("couldn't open %s\n", filename);
61                 return 0;
62         }
63
64         while (!comm[0] || (tgid < 0)) {
65                 if (fgets(bf, sizeof(bf), fp) == NULL) {
66                         pr_warning("couldn't get COMM and pgid, malformed %s\n",
67                                    filename);
68                         break;
69                 }
70
71                 if (memcmp(bf, "Name:", 5) == 0) {
72                         char *name = bf + 5;
73                         while (*name && isspace(*name))
74                                 ++name;
75                         size = strlen(name) - 1;
76                         if (size >= len)
77                                 size = len - 1;
78                         memcpy(comm, name, size);
79                         comm[size] = '\0';
80
81                 } else if (memcmp(bf, "Tgid:", 5) == 0) {
82                         char *tgids = bf + 5;
83                         while (*tgids && isspace(*tgids))
84                                 ++tgids;
85                         tgid = atoi(tgids);
86                 }
87         }
88
89         fclose(fp);
90
91         return tgid;
92 }
93
94 static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
95                                          union perf_event *event, pid_t pid,
96                                          int full,
97                                          perf_event__handler_t process,
98                                          struct machine *machine)
99 {
100         char filename[PATH_MAX];
101         size_t size;
102         DIR *tasks;
103         struct dirent dirent, *next;
104         pid_t tgid;
105
106         memset(&event->comm, 0, sizeof(event->comm));
107
108         tgid = perf_event__get_comm_tgid(pid, event->comm.comm,
109                                          sizeof(event->comm.comm));
110         if (tgid < 0)
111                 goto out;
112
113         event->comm.pid = tgid;
114         event->comm.header.type = PERF_RECORD_COMM;
115
116         size = strlen(event->comm.comm) + 1;
117         size = PERF_ALIGN(size, sizeof(u64));
118         memset(event->comm.comm + size, 0, machine->id_hdr_size);
119         event->comm.header.size = (sizeof(event->comm) -
120                                 (sizeof(event->comm.comm) - size) +
121                                 machine->id_hdr_size);
122         if (!full) {
123                 event->comm.tid = pid;
124
125                 if (process(tool, event, &synth_sample, machine) != 0)
126                         return -1;
127
128                 goto out;
129         }
130
131         snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
132
133         tasks = opendir(filename);
134         if (tasks == NULL) {
135                 pr_debug("couldn't open %s\n", filename);
136                 return 0;
137         }
138
139         while (!readdir_r(tasks, &dirent, &next) && next) {
140                 char *end;
141                 pid = strtol(dirent.d_name, &end, 10);
142                 if (*end)
143                         continue;
144
145                 /* already have tgid; jut want to update the comm */
146                 (void) perf_event__get_comm_tgid(pid, event->comm.comm,
147                                          sizeof(event->comm.comm));
148
149                 size = strlen(event->comm.comm) + 1;
150                 size = PERF_ALIGN(size, sizeof(u64));
151                 memset(event->comm.comm + size, 0, machine->id_hdr_size);
152                 event->comm.header.size = (sizeof(event->comm) -
153                                           (sizeof(event->comm.comm) - size) +
154                                           machine->id_hdr_size);
155
156                 event->comm.tid = pid;
157
158                 if (process(tool, event, &synth_sample, machine) != 0) {
159                         tgid = -1;
160                         break;
161                 }
162         }
163
164         closedir(tasks);
165 out:
166         return tgid;
167 }
168
169 static int perf_event__synthesize_mmap_events(struct perf_tool *tool,
170                                               union perf_event *event,
171                                               pid_t pid, pid_t tgid,
172                                               perf_event__handler_t process,
173                                               struct machine *machine)
174 {
175         char filename[PATH_MAX];
176         FILE *fp;
177         int rc = 0;
178
179         snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
180
181         fp = fopen(filename, "r");
182         if (fp == NULL) {
183                 /*
184                  * We raced with a task exiting - just return:
185                  */
186                 pr_debug("couldn't open %s\n", filename);
187                 return -1;
188         }
189
190         event->header.type = PERF_RECORD_MMAP;
191         /*
192          * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
193          */
194         event->header.misc = PERF_RECORD_MISC_USER;
195
196         while (1) {
197                 char bf[BUFSIZ];
198                 char prot[5];
199                 char execname[PATH_MAX];
200                 char anonstr[] = "//anon";
201                 size_t size;
202                 ssize_t n;
203
204                 if (fgets(bf, sizeof(bf), fp) == NULL)
205                         break;
206
207                 /* ensure null termination since stack will be reused. */
208                 strcpy(execname, "");
209
210                 /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
211                 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
212                        &event->mmap.start, &event->mmap.len, prot,
213                        &event->mmap.pgoff,
214                        execname);
215
216                 if (n != 8)
217                         continue;
218
219                 if (prot[2] != 'x')
220                         continue;
221
222                 if (!strcmp(execname, ""))
223                         strcpy(execname, anonstr);
224
225                 size = strlen(execname) + 1;
226                 memcpy(event->mmap.filename, execname, size);
227                 size = PERF_ALIGN(size, sizeof(u64));
228                 event->mmap.len -= event->mmap.start;
229                 event->mmap.header.size = (sizeof(event->mmap) -
230                                         (sizeof(event->mmap.filename) - size));
231                 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
232                 event->mmap.header.size += machine->id_hdr_size;
233                 event->mmap.pid = tgid;
234                 event->mmap.tid = pid;
235
236                 if (process(tool, event, &synth_sample, machine) != 0) {
237                         rc = -1;
238                         break;
239                 }
240         }
241
242         fclose(fp);
243         return rc;
244 }
245
246 int perf_event__synthesize_modules(struct perf_tool *tool,
247                                    perf_event__handler_t process,
248                                    struct machine *machine)
249 {
250         int rc = 0;
251         struct rb_node *nd;
252         struct map_groups *kmaps = &machine->kmaps;
253         union perf_event *event = zalloc((sizeof(event->mmap) +
254                                           machine->id_hdr_size));
255         if (event == NULL) {
256                 pr_debug("Not enough memory synthesizing mmap event "
257                          "for kernel modules\n");
258                 return -1;
259         }
260
261         event->header.type = PERF_RECORD_MMAP;
262
263         /*
264          * kernel uses 0 for user space maps, see kernel/perf_event.c
265          * __perf_event_mmap
266          */
267         if (machine__is_host(machine))
268                 event->header.misc = PERF_RECORD_MISC_KERNEL;
269         else
270                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
271
272         for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
273              nd; nd = rb_next(nd)) {
274                 size_t size;
275                 struct map *pos = rb_entry(nd, struct map, rb_node);
276
277                 if (pos->dso->kernel)
278                         continue;
279
280                 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
281                 event->mmap.header.type = PERF_RECORD_MMAP;
282                 event->mmap.header.size = (sizeof(event->mmap) -
283                                         (sizeof(event->mmap.filename) - size));
284                 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
285                 event->mmap.header.size += machine->id_hdr_size;
286                 event->mmap.start = pos->start;
287                 event->mmap.len   = pos->end - pos->start;
288                 event->mmap.pid   = machine->pid;
289
290                 memcpy(event->mmap.filename, pos->dso->long_name,
291                        pos->dso->long_name_len + 1);
292                 if (process(tool, event, &synth_sample, machine) != 0) {
293                         rc = -1;
294                         break;
295                 }
296         }
297
298         free(event);
299         return rc;
300 }
301
302 static int __event__synthesize_thread(union perf_event *comm_event,
303                                       union perf_event *mmap_event,
304                                       pid_t pid, int full,
305                                           perf_event__handler_t process,
306                                       struct perf_tool *tool,
307                                       struct machine *machine)
308 {
309         pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
310                                                  process, machine);
311         if (tgid == -1)
312                 return -1;
313         return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
314                                                   process, machine);
315 }
316
317 int perf_event__synthesize_thread_map(struct perf_tool *tool,
318                                       struct thread_map *threads,
319                                       perf_event__handler_t process,
320                                       struct machine *machine)
321 {
322         union perf_event *comm_event, *mmap_event;
323         int err = -1, thread, j;
324
325         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
326         if (comm_event == NULL)
327                 goto out;
328
329         mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
330         if (mmap_event == NULL)
331                 goto out_free_comm;
332
333         err = 0;
334         for (thread = 0; thread < threads->nr; ++thread) {
335                 if (__event__synthesize_thread(comm_event, mmap_event,
336                                                threads->map[thread], 0,
337                                                process, tool, machine)) {
338                         err = -1;
339                         break;
340                 }
341
342                 /*
343                  * comm.pid is set to thread group id by
344                  * perf_event__synthesize_comm
345                  */
346                 if ((int) comm_event->comm.pid != threads->map[thread]) {
347                         bool need_leader = true;
348
349                         /* is thread group leader in thread_map? */
350                         for (j = 0; j < threads->nr; ++j) {
351                                 if ((int) comm_event->comm.pid == threads->map[j]) {
352                                         need_leader = false;
353                                         break;
354                                 }
355                         }
356
357                         /* if not, generate events for it */
358                         if (need_leader &&
359                             __event__synthesize_thread(comm_event,
360                                                       mmap_event,
361                                                       comm_event->comm.pid, 0,
362                                                       process, tool, machine)) {
363                                 err = -1;
364                                 break;
365                         }
366                 }
367         }
368         free(mmap_event);
369 out_free_comm:
370         free(comm_event);
371 out:
372         return err;
373 }
374
375 int perf_event__synthesize_threads(struct perf_tool *tool,
376                                    perf_event__handler_t process,
377                                    struct machine *machine)
378 {
379         DIR *proc;
380         struct dirent dirent, *next;
381         union perf_event *comm_event, *mmap_event;
382         int err = -1;
383
384         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
385         if (comm_event == NULL)
386                 goto out;
387
388         mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
389         if (mmap_event == NULL)
390                 goto out_free_comm;
391
392         proc = opendir("/proc");
393         if (proc == NULL)
394                 goto out_free_mmap;
395
396         while (!readdir_r(proc, &dirent, &next) && next) {
397                 char *end;
398                 pid_t pid = strtol(dirent.d_name, &end, 10);
399
400                 if (*end) /* only interested in proper numerical dirents */
401                         continue;
402                 /*
403                  * We may race with exiting thread, so don't stop just because
404                  * one thread couldn't be synthesized.
405                  */
406                 __event__synthesize_thread(comm_event, mmap_event, pid, 1,
407                                            process, tool, machine);
408         }
409
410         err = 0;
411         closedir(proc);
412 out_free_mmap:
413         free(mmap_event);
414 out_free_comm:
415         free(comm_event);
416 out:
417         return err;
418 }
419
420 struct process_symbol_args {
421         const char *name;
422         u64        start;
423 };
424
425 static int find_symbol_cb(void *arg, const char *name, char type,
426                           u64 start)
427 {
428         struct process_symbol_args *args = arg;
429
430         /*
431          * Must be a function or at least an alias, as in PARISC64, where "_text" is
432          * an 'A' to the same address as "_stext".
433          */
434         if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
435               type == 'A') || strcmp(name, args->name))
436                 return 0;
437
438         args->start = start;
439         return 1;
440 }
441
442 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
443                                        perf_event__handler_t process,
444                                        struct machine *machine,
445                                        const char *symbol_name)
446 {
447         size_t size;
448         const char *filename, *mmap_name;
449         char path[PATH_MAX];
450         char name_buff[PATH_MAX];
451         struct map *map;
452         int err;
453         /*
454          * We should get this from /sys/kernel/sections/.text, but till that is
455          * available use this, and after it is use this as a fallback for older
456          * kernels.
457          */
458         struct process_symbol_args args = { .name = symbol_name, };
459         union perf_event *event = zalloc((sizeof(event->mmap) +
460                                           machine->id_hdr_size));
461         if (event == NULL) {
462                 pr_debug("Not enough memory synthesizing mmap event "
463                          "for kernel modules\n");
464                 return -1;
465         }
466
467         mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
468         if (machine__is_host(machine)) {
469                 /*
470                  * kernel uses PERF_RECORD_MISC_USER for user space maps,
471                  * see kernel/perf_event.c __perf_event_mmap
472                  */
473                 event->header.misc = PERF_RECORD_MISC_KERNEL;
474                 filename = "/proc/kallsyms";
475         } else {
476                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
477                 if (machine__is_default_guest(machine))
478                         filename = (char *) symbol_conf.default_guest_kallsyms;
479                 else {
480                         sprintf(path, "%s/proc/kallsyms", machine->root_dir);
481                         filename = path;
482                 }
483         }
484
485         if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) {
486                 free(event);
487                 return -ENOENT;
488         }
489
490         map = machine->vmlinux_maps[MAP__FUNCTION];
491         size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
492                         "%s%s", mmap_name, symbol_name) + 1;
493         size = PERF_ALIGN(size, sizeof(u64));
494         event->mmap.header.type = PERF_RECORD_MMAP;
495         event->mmap.header.size = (sizeof(event->mmap) -
496                         (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
497         event->mmap.pgoff = args.start;
498         event->mmap.start = map->start;
499         event->mmap.len   = map->end - event->mmap.start;
500         event->mmap.pid   = machine->pid;
501
502         err = process(tool, event, &synth_sample, machine);
503         free(event);
504
505         return err;
506 }
507
508 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
509 {
510         return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
511 }
512
513 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
514                              union perf_event *event,
515                              struct perf_sample *sample __maybe_unused,
516                              struct machine *machine)
517 {
518         return machine__process_comm_event(machine, event);
519 }
520
521 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
522                              union perf_event *event,
523                              struct perf_sample *sample __maybe_unused,
524                              struct machine *machine)
525 {
526         return machine__process_lost_event(machine, event);
527 }
528
529 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
530 {
531         return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
532                        event->mmap.pid, event->mmap.tid, event->mmap.start,
533                        event->mmap.len, event->mmap.pgoff, event->mmap.filename);
534 }
535
536 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
537 {
538         return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
539                            " %02x:%02x %"PRIu64" %"PRIu64"]: %s\n",
540                        event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
541                        event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
542                        event->mmap2.min, event->mmap2.ino,
543                        event->mmap2.ino_generation,
544                        event->mmap2.filename);
545 }
546
547 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
548                              union perf_event *event,
549                              struct perf_sample *sample __maybe_unused,
550                              struct machine *machine)
551 {
552         return machine__process_mmap_event(machine, event);
553 }
554
555 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
556                              union perf_event *event,
557                              struct perf_sample *sample __maybe_unused,
558                              struct machine *machine)
559 {
560         return machine__process_mmap2_event(machine, event);
561 }
562
563 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
564 {
565         return fprintf(fp, "(%d:%d):(%d:%d)\n",
566                        event->fork.pid, event->fork.tid,
567                        event->fork.ppid, event->fork.ptid);
568 }
569
570 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
571                              union perf_event *event,
572                              struct perf_sample *sample __maybe_unused,
573                              struct machine *machine)
574 {
575         return machine__process_fork_event(machine, event);
576 }
577
578 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
579                              union perf_event *event,
580                              struct perf_sample *sample __maybe_unused,
581                              struct machine *machine)
582 {
583         return machine__process_exit_event(machine, event);
584 }
585
586 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
587 {
588         size_t ret = fprintf(fp, "PERF_RECORD_%s",
589                              perf_event__name(event->header.type));
590
591         switch (event->header.type) {
592         case PERF_RECORD_COMM:
593                 ret += perf_event__fprintf_comm(event, fp);
594                 break;
595         case PERF_RECORD_FORK:
596         case PERF_RECORD_EXIT:
597                 ret += perf_event__fprintf_task(event, fp);
598                 break;
599         case PERF_RECORD_MMAP:
600                 ret += perf_event__fprintf_mmap(event, fp);
601                 break;
602         case PERF_RECORD_MMAP2:
603                 ret += perf_event__fprintf_mmap2(event, fp);
604                 break;
605         default:
606                 ret += fprintf(fp, "\n");
607         }
608
609         return ret;
610 }
611
612 int perf_event__process(struct perf_tool *tool __maybe_unused,
613                         union perf_event *event,
614                         struct perf_sample *sample __maybe_unused,
615                         struct machine *machine)
616 {
617         return machine__process_event(machine, event);
618 }
619
620 void thread__find_addr_map(struct thread *self,
621                            struct machine *machine, u8 cpumode,
622                            enum map_type type, u64 addr,
623                            struct addr_location *al)
624 {
625         struct map_groups *mg = &self->mg;
626         bool load_map = false;
627
628         al->thread = self;
629         al->addr = addr;
630         al->cpumode = cpumode;
631         al->filtered = false;
632
633         if (machine == NULL) {
634                 al->map = NULL;
635                 return;
636         }
637
638         if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
639                 al->level = 'k';
640                 mg = &machine->kmaps;
641                 load_map = true;
642         } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
643                 al->level = '.';
644         } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
645                 al->level = 'g';
646                 mg = &machine->kmaps;
647                 load_map = true;
648         } else {
649                 /*
650                  * 'u' means guest os user space.
651                  * TODO: We don't support guest user space. Might support late.
652                  */
653                 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
654                         al->level = 'u';
655                 else
656                         al->level = 'H';
657                 al->map = NULL;
658
659                 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
660                         cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
661                         !perf_guest)
662                         al->filtered = true;
663                 if ((cpumode == PERF_RECORD_MISC_USER ||
664                         cpumode == PERF_RECORD_MISC_KERNEL) &&
665                         !perf_host)
666                         al->filtered = true;
667
668                 return;
669         }
670 try_again:
671         al->map = map_groups__find(mg, type, al->addr);
672         if (al->map == NULL) {
673                 /*
674                  * If this is outside of all known maps, and is a negative
675                  * address, try to look it up in the kernel dso, as it might be
676                  * a vsyscall or vdso (which executes in user-mode).
677                  *
678                  * XXX This is nasty, we should have a symbol list in the
679                  * "[vdso]" dso, but for now lets use the old trick of looking
680                  * in the whole kernel symbol list.
681                  */
682                 if ((long long)al->addr < 0 &&
683                     cpumode == PERF_RECORD_MISC_USER &&
684                     machine && mg != &machine->kmaps) {
685                         mg = &machine->kmaps;
686                         goto try_again;
687                 }
688         } else {
689                 /*
690                  * Kernel maps might be changed when loading symbols so loading
691                  * must be done prior to using kernel maps.
692                  */
693                 if (load_map)
694                         map__load(al->map, machine->symbol_filter);
695                 al->addr = al->map->map_ip(al->map, al->addr);
696         }
697 }
698
699 void thread__find_addr_location(struct thread *thread, struct machine *machine,
700                                 u8 cpumode, enum map_type type, u64 addr,
701                                 struct addr_location *al)
702 {
703         thread__find_addr_map(thread, machine, cpumode, type, addr, al);
704         if (al->map != NULL)
705                 al->sym = map__find_symbol(al->map, al->addr,
706                                            machine->symbol_filter);
707         else
708                 al->sym = NULL;
709 }
710
711 int perf_event__preprocess_sample(const union perf_event *event,
712                                   struct machine *machine,
713                                   struct addr_location *al,
714                                   struct perf_sample *sample)
715 {
716         u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
717         struct thread *thread = machine__findnew_thread(machine, sample->pid,
718                                                         sample->pid);
719
720         if (thread == NULL)
721                 return -1;
722
723         if (symbol_conf.comm_list &&
724             !strlist__has_entry(symbol_conf.comm_list, thread->comm))
725                 goto out_filtered;
726
727         dump_printf(" ... thread: %s:%d\n", thread->comm, thread->tid);
728         /*
729          * Have we already created the kernel maps for this machine?
730          *
731          * This should have happened earlier, when we processed the kernel MMAP
732          * events, but for older perf.data files there was no such thing, so do
733          * it now.
734          */
735         if (cpumode == PERF_RECORD_MISC_KERNEL &&
736             machine->vmlinux_maps[MAP__FUNCTION] == NULL)
737                 machine__create_kernel_maps(machine);
738
739         thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
740                               sample->ip, al);
741         dump_printf(" ...... dso: %s\n",
742                     al->map ? al->map->dso->long_name :
743                         al->level == 'H' ? "[hypervisor]" : "<not found>");
744         al->sym = NULL;
745         al->cpu = sample->cpu;
746
747         if (al->map) {
748                 struct dso *dso = al->map->dso;
749
750                 if (symbol_conf.dso_list &&
751                     (!dso || !(strlist__has_entry(symbol_conf.dso_list,
752                                                   dso->short_name) ||
753                                (dso->short_name != dso->long_name &&
754                                 strlist__has_entry(symbol_conf.dso_list,
755                                                    dso->long_name)))))
756                         goto out_filtered;
757
758                 al->sym = map__find_symbol(al->map, al->addr,
759                                            machine->symbol_filter);
760         }
761
762         if (symbol_conf.sym_list &&
763                 (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
764                                                 al->sym->name)))
765                 goto out_filtered;
766
767         return 0;
768
769 out_filtered:
770         al->filtered = true;
771         return 0;
772 }