]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/event.c
b761b0eb60aff29c0c02c4e6366711c774e4b9e7
[karo-tx-linux.git] / tools / perf / util / event.c
1 #include <inttypes.h>
2 #include <linux/kernel.h>
3 #include <linux/types.h>
4 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
5 #include <api/fs/fs.h>
6 #include "event.h"
7 #include "debug.h"
8 #include "hist.h"
9 #include "machine.h"
10 #include "sort.h"
11 #include "string.h"
12 #include "strlist.h"
13 #include "thread.h"
14 #include "thread_map.h"
15 #include "symbol/kallsyms.h"
16 #include "asm/bug.h"
17 #include "stat.h"
18
19 static const char *perf_event__names[] = {
20         [0]                                     = "TOTAL",
21         [PERF_RECORD_MMAP]                      = "MMAP",
22         [PERF_RECORD_MMAP2]                     = "MMAP2",
23         [PERF_RECORD_LOST]                      = "LOST",
24         [PERF_RECORD_COMM]                      = "COMM",
25         [PERF_RECORD_EXIT]                      = "EXIT",
26         [PERF_RECORD_THROTTLE]                  = "THROTTLE",
27         [PERF_RECORD_UNTHROTTLE]                = "UNTHROTTLE",
28         [PERF_RECORD_FORK]                      = "FORK",
29         [PERF_RECORD_READ]                      = "READ",
30         [PERF_RECORD_SAMPLE]                    = "SAMPLE",
31         [PERF_RECORD_AUX]                       = "AUX",
32         [PERF_RECORD_ITRACE_START]              = "ITRACE_START",
33         [PERF_RECORD_LOST_SAMPLES]              = "LOST_SAMPLES",
34         [PERF_RECORD_SWITCH]                    = "SWITCH",
35         [PERF_RECORD_SWITCH_CPU_WIDE]           = "SWITCH_CPU_WIDE",
36         [PERF_RECORD_NAMESPACES]                = "NAMESPACES",
37         [PERF_RECORD_HEADER_ATTR]               = "ATTR",
38         [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
39         [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
40         [PERF_RECORD_HEADER_BUILD_ID]           = "BUILD_ID",
41         [PERF_RECORD_FINISHED_ROUND]            = "FINISHED_ROUND",
42         [PERF_RECORD_ID_INDEX]                  = "ID_INDEX",
43         [PERF_RECORD_AUXTRACE_INFO]             = "AUXTRACE_INFO",
44         [PERF_RECORD_AUXTRACE]                  = "AUXTRACE",
45         [PERF_RECORD_AUXTRACE_ERROR]            = "AUXTRACE_ERROR",
46         [PERF_RECORD_THREAD_MAP]                = "THREAD_MAP",
47         [PERF_RECORD_CPU_MAP]                   = "CPU_MAP",
48         [PERF_RECORD_STAT_CONFIG]               = "STAT_CONFIG",
49         [PERF_RECORD_STAT]                      = "STAT",
50         [PERF_RECORD_STAT_ROUND]                = "STAT_ROUND",
51         [PERF_RECORD_EVENT_UPDATE]              = "EVENT_UPDATE",
52         [PERF_RECORD_TIME_CONV]                 = "TIME_CONV",
53 };
54
55 static const char *perf_ns__names[] = {
56         [NET_NS_INDEX]          = "net",
57         [UTS_NS_INDEX]          = "uts",
58         [IPC_NS_INDEX]          = "ipc",
59         [PID_NS_INDEX]          = "pid",
60         [USER_NS_INDEX]         = "user",
61         [MNT_NS_INDEX]          = "mnt",
62         [CGROUP_NS_INDEX]       = "cgroup",
63 };
64
65 const char *perf_event__name(unsigned int id)
66 {
67         if (id >= ARRAY_SIZE(perf_event__names))
68                 return "INVALID";
69         if (!perf_event__names[id])
70                 return "UNKNOWN";
71         return perf_event__names[id];
72 }
73
74 static const char *perf_ns__name(unsigned int id)
75 {
76         if (id >= ARRAY_SIZE(perf_ns__names))
77                 return "UNKNOWN";
78         return perf_ns__names[id];
79 }
80
81 static int perf_tool__process_synth_event(struct perf_tool *tool,
82                                           union perf_event *event,
83                                           struct machine *machine,
84                                           perf_event__handler_t process)
85 {
86         struct perf_sample synth_sample = {
87         .pid       = -1,
88         .tid       = -1,
89         .time      = -1,
90         .stream_id = -1,
91         .cpu       = -1,
92         .period    = 1,
93         .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
94         };
95
96         return process(tool, event, &synth_sample, machine);
97 };
98
99 /*
100  * Assumes that the first 4095 bytes of /proc/pid/stat contains
101  * the comm, tgid and ppid.
102  */
103 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
104                                     pid_t *tgid, pid_t *ppid)
105 {
106         char filename[PATH_MAX];
107         char bf[4096];
108         int fd;
109         size_t size = 0;
110         ssize_t n;
111         char *name, *tgids, *ppids;
112
113         *tgid = -1;
114         *ppid = -1;
115
116         snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
117
118         fd = open(filename, O_RDONLY);
119         if (fd < 0) {
120                 pr_debug("couldn't open %s\n", filename);
121                 return -1;
122         }
123
124         n = read(fd, bf, sizeof(bf) - 1);
125         close(fd);
126         if (n <= 0) {
127                 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
128                            pid);
129                 return -1;
130         }
131         bf[n] = '\0';
132
133         name = strstr(bf, "Name:");
134         tgids = strstr(bf, "Tgid:");
135         ppids = strstr(bf, "PPid:");
136
137         if (name) {
138                 name += 5;  /* strlen("Name:") */
139                 name = rtrim(ltrim(name));
140                 size = strlen(name);
141                 if (size >= len)
142                         size = len - 1;
143                 memcpy(comm, name, size);
144                 comm[size] = '\0';
145         } else {
146                 pr_debug("Name: string not found for pid %d\n", pid);
147         }
148
149         if (tgids) {
150                 tgids += 5;  /* strlen("Tgid:") */
151                 *tgid = atoi(tgids);
152         } else {
153                 pr_debug("Tgid: string not found for pid %d\n", pid);
154         }
155
156         if (ppids) {
157                 ppids += 5;  /* strlen("PPid:") */
158                 *ppid = atoi(ppids);
159         } else {
160                 pr_debug("PPid: string not found for pid %d\n", pid);
161         }
162
163         return 0;
164 }
165
166 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
167                                     struct machine *machine,
168                                     pid_t *tgid, pid_t *ppid)
169 {
170         size_t size;
171
172         *ppid = -1;
173
174         memset(&event->comm, 0, sizeof(event->comm));
175
176         if (machine__is_host(machine)) {
177                 if (perf_event__get_comm_ids(pid, event->comm.comm,
178                                              sizeof(event->comm.comm),
179                                              tgid, ppid) != 0) {
180                         return -1;
181                 }
182         } else {
183                 *tgid = machine->pid;
184         }
185
186         if (*tgid < 0)
187                 return -1;
188
189         event->comm.pid = *tgid;
190         event->comm.header.type = PERF_RECORD_COMM;
191
192         size = strlen(event->comm.comm) + 1;
193         size = PERF_ALIGN(size, sizeof(u64));
194         memset(event->comm.comm + size, 0, machine->id_hdr_size);
195         event->comm.header.size = (sizeof(event->comm) -
196                                 (sizeof(event->comm.comm) - size) +
197                                 machine->id_hdr_size);
198         event->comm.tid = pid;
199
200         return 0;
201 }
202
203 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
204                                          union perf_event *event, pid_t pid,
205                                          perf_event__handler_t process,
206                                          struct machine *machine)
207 {
208         pid_t tgid, ppid;
209
210         if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
211                 return -1;
212
213         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
214                 return -1;
215
216         return tgid;
217 }
218
219 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
220                                          struct perf_ns_link_info *ns_link_info)
221 {
222         struct stat64 st;
223         char proc_ns[128];
224
225         sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
226         if (stat64(proc_ns, &st) == 0) {
227                 ns_link_info->dev = st.st_dev;
228                 ns_link_info->ino = st.st_ino;
229         }
230 }
231
232 int perf_event__synthesize_namespaces(struct perf_tool *tool,
233                                       union perf_event *event,
234                                       pid_t pid, pid_t tgid,
235                                       perf_event__handler_t process,
236                                       struct machine *machine)
237 {
238         u32 idx;
239         struct perf_ns_link_info *ns_link_info;
240
241         if (!tool || !tool->namespace_events)
242                 return 0;
243
244         memset(&event->namespaces, 0, (sizeof(event->namespaces) +
245                (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
246                machine->id_hdr_size));
247
248         event->namespaces.pid = tgid;
249         event->namespaces.tid = pid;
250
251         event->namespaces.nr_namespaces = NR_NAMESPACES;
252
253         ns_link_info = event->namespaces.link_info;
254
255         for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
256                 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
257                                              &ns_link_info[idx]);
258
259         event->namespaces.header.type = PERF_RECORD_NAMESPACES;
260
261         event->namespaces.header.size = (sizeof(event->namespaces) +
262                         (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
263                         machine->id_hdr_size);
264
265         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
266                 return -1;
267
268         return 0;
269 }
270
271 static int perf_event__synthesize_fork(struct perf_tool *tool,
272                                        union perf_event *event,
273                                        pid_t pid, pid_t tgid, pid_t ppid,
274                                        perf_event__handler_t process,
275                                        struct machine *machine)
276 {
277         memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
278
279         /*
280          * for main thread set parent to ppid from status file. For other
281          * threads set parent pid to main thread. ie., assume main thread
282          * spawns all threads in a process
283         */
284         if (tgid == pid) {
285                 event->fork.ppid = ppid;
286                 event->fork.ptid = ppid;
287         } else {
288                 event->fork.ppid = tgid;
289                 event->fork.ptid = tgid;
290         }
291         event->fork.pid  = tgid;
292         event->fork.tid  = pid;
293         event->fork.header.type = PERF_RECORD_FORK;
294
295         event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
296
297         if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
298                 return -1;
299
300         return 0;
301 }
302
303 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
304                                        union perf_event *event,
305                                        pid_t pid, pid_t tgid,
306                                        perf_event__handler_t process,
307                                        struct machine *machine,
308                                        bool mmap_data,
309                                        unsigned int proc_map_timeout)
310 {
311         char filename[PATH_MAX];
312         FILE *fp;
313         unsigned long long t;
314         bool truncation = false;
315         unsigned long long timeout = proc_map_timeout * 1000000ULL;
316         int rc = 0;
317         const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
318         int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
319
320         if (machine__is_default_guest(machine))
321                 return 0;
322
323         snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
324                  machine->root_dir, pid, pid);
325
326         fp = fopen(filename, "r");
327         if (fp == NULL) {
328                 /*
329                  * We raced with a task exiting - just return:
330                  */
331                 pr_debug("couldn't open %s\n", filename);
332                 return -1;
333         }
334
335         event->header.type = PERF_RECORD_MMAP2;
336         t = rdclock();
337
338         while (1) {
339                 char bf[BUFSIZ];
340                 char prot[5];
341                 char execname[PATH_MAX];
342                 char anonstr[] = "//anon";
343                 unsigned int ino;
344                 size_t size;
345                 ssize_t n;
346
347                 if (fgets(bf, sizeof(bf), fp) == NULL)
348                         break;
349
350                 if ((rdclock() - t) > timeout) {
351                         pr_warning("Reading %s time out. "
352                                    "You may want to increase "
353                                    "the time limit by --proc-map-timeout\n",
354                                    filename);
355                         truncation = true;
356                         goto out;
357                 }
358
359                 /* ensure null termination since stack will be reused. */
360                 strcpy(execname, "");
361
362                 /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
363                 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
364                        &event->mmap2.start, &event->mmap2.len, prot,
365                        &event->mmap2.pgoff, &event->mmap2.maj,
366                        &event->mmap2.min,
367                        &ino, execname);
368
369                 /*
370                  * Anon maps don't have the execname.
371                  */
372                 if (n < 7)
373                         continue;
374
375                 event->mmap2.ino = (u64)ino;
376
377                 /*
378                  * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
379                  */
380                 if (machine__is_host(machine))
381                         event->header.misc = PERF_RECORD_MISC_USER;
382                 else
383                         event->header.misc = PERF_RECORD_MISC_GUEST_USER;
384
385                 /* map protection and flags bits */
386                 event->mmap2.prot = 0;
387                 event->mmap2.flags = 0;
388                 if (prot[0] == 'r')
389                         event->mmap2.prot |= PROT_READ;
390                 if (prot[1] == 'w')
391                         event->mmap2.prot |= PROT_WRITE;
392                 if (prot[2] == 'x')
393                         event->mmap2.prot |= PROT_EXEC;
394
395                 if (prot[3] == 's')
396                         event->mmap2.flags |= MAP_SHARED;
397                 else
398                         event->mmap2.flags |= MAP_PRIVATE;
399
400                 if (prot[2] != 'x') {
401                         if (!mmap_data || prot[0] != 'r')
402                                 continue;
403
404                         event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
405                 }
406
407 out:
408                 if (truncation)
409                         event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
410
411                 if (!strcmp(execname, ""))
412                         strcpy(execname, anonstr);
413
414                 if (hugetlbfs_mnt_len &&
415                     !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
416                         strcpy(execname, anonstr);
417                         event->mmap2.flags |= MAP_HUGETLB;
418                 }
419
420                 size = strlen(execname) + 1;
421                 memcpy(event->mmap2.filename, execname, size);
422                 size = PERF_ALIGN(size, sizeof(u64));
423                 event->mmap2.len -= event->mmap.start;
424                 event->mmap2.header.size = (sizeof(event->mmap2) -
425                                         (sizeof(event->mmap2.filename) - size));
426                 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
427                 event->mmap2.header.size += machine->id_hdr_size;
428                 event->mmap2.pid = tgid;
429                 event->mmap2.tid = pid;
430
431                 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
432                         rc = -1;
433                         break;
434                 }
435
436                 if (truncation)
437                         break;
438         }
439
440         fclose(fp);
441         return rc;
442 }
443
444 int perf_event__synthesize_modules(struct perf_tool *tool,
445                                    perf_event__handler_t process,
446                                    struct machine *machine)
447 {
448         int rc = 0;
449         struct map *pos;
450         struct map_groups *kmaps = &machine->kmaps;
451         struct maps *maps = &kmaps->maps[MAP__FUNCTION];
452         union perf_event *event = zalloc((sizeof(event->mmap) +
453                                           machine->id_hdr_size));
454         if (event == NULL) {
455                 pr_debug("Not enough memory synthesizing mmap event "
456                          "for kernel modules\n");
457                 return -1;
458         }
459
460         event->header.type = PERF_RECORD_MMAP;
461
462         /*
463          * kernel uses 0 for user space maps, see kernel/perf_event.c
464          * __perf_event_mmap
465          */
466         if (machine__is_host(machine))
467                 event->header.misc = PERF_RECORD_MISC_KERNEL;
468         else
469                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
470
471         for (pos = maps__first(maps); pos; pos = map__next(pos)) {
472                 size_t size;
473
474                 if (__map__is_kernel(pos))
475                         continue;
476
477                 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
478                 event->mmap.header.type = PERF_RECORD_MMAP;
479                 event->mmap.header.size = (sizeof(event->mmap) -
480                                         (sizeof(event->mmap.filename) - size));
481                 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
482                 event->mmap.header.size += machine->id_hdr_size;
483                 event->mmap.start = pos->start;
484                 event->mmap.len   = pos->end - pos->start;
485                 event->mmap.pid   = machine->pid;
486
487                 memcpy(event->mmap.filename, pos->dso->long_name,
488                        pos->dso->long_name_len + 1);
489                 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
490                         rc = -1;
491                         break;
492                 }
493         }
494
495         free(event);
496         return rc;
497 }
498
499 static int __event__synthesize_thread(union perf_event *comm_event,
500                                       union perf_event *mmap_event,
501                                       union perf_event *fork_event,
502                                       union perf_event *namespaces_event,
503                                       pid_t pid, int full,
504                                       perf_event__handler_t process,
505                                       struct perf_tool *tool,
506                                       struct machine *machine,
507                                       bool mmap_data,
508                                       unsigned int proc_map_timeout)
509 {
510         char filename[PATH_MAX];
511         DIR *tasks;
512         struct dirent *dirent;
513         pid_t tgid, ppid;
514         int rc = 0;
515
516         /* special case: only send one comm event using passed in pid */
517         if (!full) {
518                 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
519                                                    process, machine);
520
521                 if (tgid == -1)
522                         return -1;
523
524                 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
525                                                       tgid, process, machine) < 0)
526                         return -1;
527
528
529                 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
530                                                           process, machine, mmap_data,
531                                                           proc_map_timeout);
532         }
533
534         if (machine__is_default_guest(machine))
535                 return 0;
536
537         snprintf(filename, sizeof(filename), "%s/proc/%d/task",
538                  machine->root_dir, pid);
539
540         tasks = opendir(filename);
541         if (tasks == NULL) {
542                 pr_debug("couldn't open %s\n", filename);
543                 return 0;
544         }
545
546         while ((dirent = readdir(tasks)) != NULL) {
547                 char *end;
548                 pid_t _pid;
549
550                 _pid = strtol(dirent->d_name, &end, 10);
551                 if (*end)
552                         continue;
553
554                 rc = -1;
555                 if (perf_event__prepare_comm(comm_event, _pid, machine,
556                                              &tgid, &ppid) != 0)
557                         break;
558
559                 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
560                                                 ppid, process, machine) < 0)
561                         break;
562
563                 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
564                                                       tgid, process, machine) < 0)
565                         break;
566
567                 /*
568                  * Send the prepared comm event
569                  */
570                 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
571                         break;
572
573                 rc = 0;
574                 if (_pid == pid) {
575                         /* process the parent's maps too */
576                         rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
577                                                 process, machine, mmap_data, proc_map_timeout);
578                         if (rc)
579                                 break;
580                 }
581         }
582
583         closedir(tasks);
584         return rc;
585 }
586
587 int perf_event__synthesize_thread_map(struct perf_tool *tool,
588                                       struct thread_map *threads,
589                                       perf_event__handler_t process,
590                                       struct machine *machine,
591                                       bool mmap_data,
592                                       unsigned int proc_map_timeout)
593 {
594         union perf_event *comm_event, *mmap_event, *fork_event;
595         union perf_event *namespaces_event;
596         int err = -1, thread, j;
597
598         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
599         if (comm_event == NULL)
600                 goto out;
601
602         mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
603         if (mmap_event == NULL)
604                 goto out_free_comm;
605
606         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
607         if (fork_event == NULL)
608                 goto out_free_mmap;
609
610         namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
611                                   (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
612                                   machine->id_hdr_size);
613         if (namespaces_event == NULL)
614                 goto out_free_fork;
615
616         err = 0;
617         for (thread = 0; thread < threads->nr; ++thread) {
618                 if (__event__synthesize_thread(comm_event, mmap_event,
619                                                fork_event, namespaces_event,
620                                                thread_map__pid(threads, thread), 0,
621                                                process, tool, machine,
622                                                mmap_data, proc_map_timeout)) {
623                         err = -1;
624                         break;
625                 }
626
627                 /*
628                  * comm.pid is set to thread group id by
629                  * perf_event__synthesize_comm
630                  */
631                 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
632                         bool need_leader = true;
633
634                         /* is thread group leader in thread_map? */
635                         for (j = 0; j < threads->nr; ++j) {
636                                 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
637                                         need_leader = false;
638                                         break;
639                                 }
640                         }
641
642                         /* if not, generate events for it */
643                         if (need_leader &&
644                             __event__synthesize_thread(comm_event, mmap_event,
645                                                        fork_event, namespaces_event,
646                                                        comm_event->comm.pid, 0,
647                                                        process, tool, machine,
648                                                        mmap_data, proc_map_timeout)) {
649                                 err = -1;
650                                 break;
651                         }
652                 }
653         }
654         free(namespaces_event);
655 out_free_fork:
656         free(fork_event);
657 out_free_mmap:
658         free(mmap_event);
659 out_free_comm:
660         free(comm_event);
661 out:
662         return err;
663 }
664
665 int perf_event__synthesize_threads(struct perf_tool *tool,
666                                    perf_event__handler_t process,
667                                    struct machine *machine,
668                                    bool mmap_data,
669                                    unsigned int proc_map_timeout)
670 {
671         DIR *proc;
672         char proc_path[PATH_MAX];
673         struct dirent *dirent;
674         union perf_event *comm_event, *mmap_event, *fork_event;
675         union perf_event *namespaces_event;
676         int err = -1;
677
678         if (machine__is_default_guest(machine))
679                 return 0;
680
681         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
682         if (comm_event == NULL)
683                 goto out;
684
685         mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
686         if (mmap_event == NULL)
687                 goto out_free_comm;
688
689         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
690         if (fork_event == NULL)
691                 goto out_free_mmap;
692
693         namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
694                                   (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
695                                   machine->id_hdr_size);
696         if (namespaces_event == NULL)
697                 goto out_free_fork;
698
699         snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
700         proc = opendir(proc_path);
701
702         if (proc == NULL)
703                 goto out_free_namespaces;
704
705         while ((dirent = readdir(proc)) != NULL) {
706                 char *end;
707                 pid_t pid = strtol(dirent->d_name, &end, 10);
708
709                 if (*end) /* only interested in proper numerical dirents */
710                         continue;
711                 /*
712                  * We may race with exiting thread, so don't stop just because
713                  * one thread couldn't be synthesized.
714                  */
715                 __event__synthesize_thread(comm_event, mmap_event, fork_event,
716                                            namespaces_event, pid, 1, process,
717                                            tool, machine, mmap_data,
718                                            proc_map_timeout);
719         }
720
721         err = 0;
722         closedir(proc);
723 out_free_namespaces:
724         free(namespaces_event);
725 out_free_fork:
726         free(fork_event);
727 out_free_mmap:
728         free(mmap_event);
729 out_free_comm:
730         free(comm_event);
731 out:
732         return err;
733 }
734
735 struct process_symbol_args {
736         const char *name;
737         u64        start;
738 };
739
740 static int find_symbol_cb(void *arg, const char *name, char type,
741                           u64 start)
742 {
743         struct process_symbol_args *args = arg;
744
745         /*
746          * Must be a function or at least an alias, as in PARISC64, where "_text" is
747          * an 'A' to the same address as "_stext".
748          */
749         if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
750               type == 'A') || strcmp(name, args->name))
751                 return 0;
752
753         args->start = start;
754         return 1;
755 }
756
757 u64 kallsyms__get_function_start(const char *kallsyms_filename,
758                                  const char *symbol_name)
759 {
760         struct process_symbol_args args = { .name = symbol_name, };
761
762         if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
763                 return 0;
764
765         return args.start;
766 }
767
768 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
769                                        perf_event__handler_t process,
770                                        struct machine *machine)
771 {
772         size_t size;
773         const char *mmap_name;
774         char name_buff[PATH_MAX];
775         struct map *map = machine__kernel_map(machine);
776         struct kmap *kmap;
777         int err;
778         union perf_event *event;
779
780         if (symbol_conf.kptr_restrict)
781                 return -1;
782         if (map == NULL)
783                 return -1;
784
785         /*
786          * We should get this from /sys/kernel/sections/.text, but till that is
787          * available use this, and after it is use this as a fallback for older
788          * kernels.
789          */
790         event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
791         if (event == NULL) {
792                 pr_debug("Not enough memory synthesizing mmap event "
793                          "for kernel modules\n");
794                 return -1;
795         }
796
797         mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
798         if (machine__is_host(machine)) {
799                 /*
800                  * kernel uses PERF_RECORD_MISC_USER for user space maps,
801                  * see kernel/perf_event.c __perf_event_mmap
802                  */
803                 event->header.misc = PERF_RECORD_MISC_KERNEL;
804         } else {
805                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
806         }
807
808         kmap = map__kmap(map);
809         size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
810                         "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
811         size = PERF_ALIGN(size, sizeof(u64));
812         event->mmap.header.type = PERF_RECORD_MMAP;
813         event->mmap.header.size = (sizeof(event->mmap) -
814                         (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
815         event->mmap.pgoff = kmap->ref_reloc_sym->addr;
816         event->mmap.start = map->start;
817         event->mmap.len   = map->end - event->mmap.start;
818         event->mmap.pid   = machine->pid;
819
820         err = perf_tool__process_synth_event(tool, event, machine, process);
821         free(event);
822
823         return err;
824 }
825
826 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
827                                       struct thread_map *threads,
828                                       perf_event__handler_t process,
829                                       struct machine *machine)
830 {
831         union perf_event *event;
832         int i, err, size;
833
834         size  = sizeof(event->thread_map);
835         size += threads->nr * sizeof(event->thread_map.entries[0]);
836
837         event = zalloc(size);
838         if (!event)
839                 return -ENOMEM;
840
841         event->header.type = PERF_RECORD_THREAD_MAP;
842         event->header.size = size;
843         event->thread_map.nr = threads->nr;
844
845         for (i = 0; i < threads->nr; i++) {
846                 struct thread_map_event_entry *entry = &event->thread_map.entries[i];
847                 char *comm = thread_map__comm(threads, i);
848
849                 if (!comm)
850                         comm = (char *) "";
851
852                 entry->pid = thread_map__pid(threads, i);
853                 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
854         }
855
856         err = process(tool, event, NULL, machine);
857
858         free(event);
859         return err;
860 }
861
862 static void synthesize_cpus(struct cpu_map_entries *cpus,
863                             struct cpu_map *map)
864 {
865         int i;
866
867         cpus->nr = map->nr;
868
869         for (i = 0; i < map->nr; i++)
870                 cpus->cpu[i] = map->map[i];
871 }
872
873 static void synthesize_mask(struct cpu_map_mask *mask,
874                             struct cpu_map *map, int max)
875 {
876         int i;
877
878         mask->nr = BITS_TO_LONGS(max);
879         mask->long_size = sizeof(long);
880
881         for (i = 0; i < map->nr; i++)
882                 set_bit(map->map[i], mask->mask);
883 }
884
885 static size_t cpus_size(struct cpu_map *map)
886 {
887         return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
888 }
889
890 static size_t mask_size(struct cpu_map *map, int *max)
891 {
892         int i;
893
894         *max = 0;
895
896         for (i = 0; i < map->nr; i++) {
897                 /* bit possition of the cpu is + 1 */
898                 int bit = map->map[i] + 1;
899
900                 if (bit > *max)
901                         *max = bit;
902         }
903
904         return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
905 }
906
907 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
908 {
909         size_t size_cpus, size_mask;
910         bool is_dummy = cpu_map__empty(map);
911
912         /*
913          * Both array and mask data have variable size based
914          * on the number of cpus and their actual values.
915          * The size of the 'struct cpu_map_data' is:
916          *
917          *   array = size of 'struct cpu_map_entries' +
918          *           number of cpus * sizeof(u64)
919          *
920          *   mask  = size of 'struct cpu_map_mask' +
921          *           maximum cpu bit converted to size of longs
922          *
923          * and finaly + the size of 'struct cpu_map_data'.
924          */
925         size_cpus = cpus_size(map);
926         size_mask = mask_size(map, max);
927
928         if (is_dummy || (size_cpus < size_mask)) {
929                 *size += size_cpus;
930                 *type  = PERF_CPU_MAP__CPUS;
931         } else {
932                 *size += size_mask;
933                 *type  = PERF_CPU_MAP__MASK;
934         }
935
936         *size += sizeof(struct cpu_map_data);
937         return zalloc(*size);
938 }
939
940 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
941                               u16 type, int max)
942 {
943         data->type = type;
944
945         switch (type) {
946         case PERF_CPU_MAP__CPUS:
947                 synthesize_cpus((struct cpu_map_entries *) data->data, map);
948                 break;
949         case PERF_CPU_MAP__MASK:
950                 synthesize_mask((struct cpu_map_mask *) data->data, map, max);
951         default:
952                 break;
953         };
954 }
955
956 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
957 {
958         size_t size = sizeof(struct cpu_map_event);
959         struct cpu_map_event *event;
960         int max;
961         u16 type;
962
963         event = cpu_map_data__alloc(map, &size, &type, &max);
964         if (!event)
965                 return NULL;
966
967         event->header.type = PERF_RECORD_CPU_MAP;
968         event->header.size = size;
969         event->data.type   = type;
970
971         cpu_map_data__synthesize(&event->data, map, type, max);
972         return event;
973 }
974
975 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
976                                    struct cpu_map *map,
977                                    perf_event__handler_t process,
978                                    struct machine *machine)
979 {
980         struct cpu_map_event *event;
981         int err;
982
983         event = cpu_map_event__new(map);
984         if (!event)
985                 return -ENOMEM;
986
987         err = process(tool, (union perf_event *) event, NULL, machine);
988
989         free(event);
990         return err;
991 }
992
993 int perf_event__synthesize_stat_config(struct perf_tool *tool,
994                                        struct perf_stat_config *config,
995                                        perf_event__handler_t process,
996                                        struct machine *machine)
997 {
998         struct stat_config_event *event;
999         int size, i = 0, err;
1000
1001         size  = sizeof(*event);
1002         size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1003
1004         event = zalloc(size);
1005         if (!event)
1006                 return -ENOMEM;
1007
1008         event->header.type = PERF_RECORD_STAT_CONFIG;
1009         event->header.size = size;
1010         event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1011
1012 #define ADD(__term, __val)                                      \
1013         event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
1014         event->data[i].val = __val;                             \
1015         i++;
1016
1017         ADD(AGGR_MODE,  config->aggr_mode)
1018         ADD(INTERVAL,   config->interval)
1019         ADD(SCALE,      config->scale)
1020
1021         WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1022                   "stat config terms unbalanced\n");
1023 #undef ADD
1024
1025         err = process(tool, (union perf_event *) event, NULL, machine);
1026
1027         free(event);
1028         return err;
1029 }
1030
1031 int perf_event__synthesize_stat(struct perf_tool *tool,
1032                                 u32 cpu, u32 thread, u64 id,
1033                                 struct perf_counts_values *count,
1034                                 perf_event__handler_t process,
1035                                 struct machine *machine)
1036 {
1037         struct stat_event event;
1038
1039         event.header.type = PERF_RECORD_STAT;
1040         event.header.size = sizeof(event);
1041         event.header.misc = 0;
1042
1043         event.id        = id;
1044         event.cpu       = cpu;
1045         event.thread    = thread;
1046         event.val       = count->val;
1047         event.ena       = count->ena;
1048         event.run       = count->run;
1049
1050         return process(tool, (union perf_event *) &event, NULL, machine);
1051 }
1052
1053 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1054                                       u64 evtime, u64 type,
1055                                       perf_event__handler_t process,
1056                                       struct machine *machine)
1057 {
1058         struct stat_round_event event;
1059
1060         event.header.type = PERF_RECORD_STAT_ROUND;
1061         event.header.size = sizeof(event);
1062         event.header.misc = 0;
1063
1064         event.time = evtime;
1065         event.type = type;
1066
1067         return process(tool, (union perf_event *) &event, NULL, machine);
1068 }
1069
1070 void perf_event__read_stat_config(struct perf_stat_config *config,
1071                                   struct stat_config_event *event)
1072 {
1073         unsigned i;
1074
1075         for (i = 0; i < event->nr; i++) {
1076
1077                 switch (event->data[i].tag) {
1078 #define CASE(__term, __val)                                     \
1079                 case PERF_STAT_CONFIG_TERM__##__term:           \
1080                         config->__val = event->data[i].val;     \
1081                         break;
1082
1083                 CASE(AGGR_MODE, aggr_mode)
1084                 CASE(SCALE,     scale)
1085                 CASE(INTERVAL,  interval)
1086 #undef CASE
1087                 default:
1088                         pr_warning("unknown stat config term %" PRIu64 "\n",
1089                                    event->data[i].tag);
1090                 }
1091         }
1092 }
1093
1094 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1095 {
1096         const char *s;
1097
1098         if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1099                 s = " exec";
1100         else
1101                 s = "";
1102
1103         return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1104 }
1105
1106 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
1107 {
1108         size_t ret = 0;
1109         struct perf_ns_link_info *ns_link_info;
1110         u32 nr_namespaces, idx;
1111
1112         ns_link_info = event->namespaces.link_info;
1113         nr_namespaces = event->namespaces.nr_namespaces;
1114
1115         ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
1116                        event->namespaces.pid,
1117                        event->namespaces.tid,
1118                        nr_namespaces);
1119
1120         for (idx = 0; idx < nr_namespaces; idx++) {
1121                 if (idx && (idx % 4 == 0))
1122                         ret += fprintf(fp, "\n\t\t ");
1123
1124                 ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
1125                                 perf_ns__name(idx), (u64)ns_link_info[idx].dev,
1126                                 (u64)ns_link_info[idx].ino,
1127                                 ((idx + 1) != nr_namespaces) ? ", " : "]\n");
1128         }
1129
1130         return ret;
1131 }
1132
1133 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1134                              union perf_event *event,
1135                              struct perf_sample *sample,
1136                              struct machine *machine)
1137 {
1138         return machine__process_comm_event(machine, event, sample);
1139 }
1140
1141 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
1142                                    union perf_event *event,
1143                                    struct perf_sample *sample,
1144                                    struct machine *machine)
1145 {
1146         return machine__process_namespaces_event(machine, event, sample);
1147 }
1148
1149 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1150                              union perf_event *event,
1151                              struct perf_sample *sample,
1152                              struct machine *machine)
1153 {
1154         return machine__process_lost_event(machine, event, sample);
1155 }
1156
1157 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1158                             union perf_event *event,
1159                             struct perf_sample *sample __maybe_unused,
1160                             struct machine *machine)
1161 {
1162         return machine__process_aux_event(machine, event);
1163 }
1164
1165 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1166                                      union perf_event *event,
1167                                      struct perf_sample *sample __maybe_unused,
1168                                      struct machine *machine)
1169 {
1170         return machine__process_itrace_start_event(machine, event);
1171 }
1172
1173 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1174                                      union perf_event *event,
1175                                      struct perf_sample *sample,
1176                                      struct machine *machine)
1177 {
1178         return machine__process_lost_samples_event(machine, event, sample);
1179 }
1180
1181 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1182                                union perf_event *event,
1183                                struct perf_sample *sample __maybe_unused,
1184                                struct machine *machine)
1185 {
1186         return machine__process_switch_event(machine, event);
1187 }
1188
1189 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1190 {
1191         return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1192                        event->mmap.pid, event->mmap.tid, event->mmap.start,
1193                        event->mmap.len, event->mmap.pgoff,
1194                        (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1195                        event->mmap.filename);
1196 }
1197
1198 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1199 {
1200         return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1201                            " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1202                        event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1203                        event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1204                        event->mmap2.min, event->mmap2.ino,
1205                        event->mmap2.ino_generation,
1206                        (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1207                        (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1208                        (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1209                        (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1210                        event->mmap2.filename);
1211 }
1212
1213 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1214 {
1215         struct thread_map *threads = thread_map__new_event(&event->thread_map);
1216         size_t ret;
1217
1218         ret = fprintf(fp, " nr: ");
1219
1220         if (threads)
1221                 ret += thread_map__fprintf(threads, fp);
1222         else
1223                 ret += fprintf(fp, "failed to get threads from event\n");
1224
1225         thread_map__put(threads);
1226         return ret;
1227 }
1228
1229 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1230 {
1231         struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1232         size_t ret;
1233
1234         ret = fprintf(fp, ": ");
1235
1236         if (cpus)
1237                 ret += cpu_map__fprintf(cpus, fp);
1238         else
1239                 ret += fprintf(fp, "failed to get cpumap from event\n");
1240
1241         cpu_map__put(cpus);
1242         return ret;
1243 }
1244
1245 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1246                              union perf_event *event,
1247                              struct perf_sample *sample,
1248                              struct machine *machine)
1249 {
1250         return machine__process_mmap_event(machine, event, sample);
1251 }
1252
1253 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1254                              union perf_event *event,
1255                              struct perf_sample *sample,
1256                              struct machine *machine)
1257 {
1258         return machine__process_mmap2_event(machine, event, sample);
1259 }
1260
1261 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1262 {
1263         return fprintf(fp, "(%d:%d):(%d:%d)\n",
1264                        event->fork.pid, event->fork.tid,
1265                        event->fork.ppid, event->fork.ptid);
1266 }
1267
1268 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1269                              union perf_event *event,
1270                              struct perf_sample *sample,
1271                              struct machine *machine)
1272 {
1273         return machine__process_fork_event(machine, event, sample);
1274 }
1275
1276 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1277                              union perf_event *event,
1278                              struct perf_sample *sample,
1279                              struct machine *machine)
1280 {
1281         return machine__process_exit_event(machine, event, sample);
1282 }
1283
1284 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1285 {
1286         return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1287                        event->aux.aux_offset, event->aux.aux_size,
1288                        event->aux.flags,
1289                        event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1290                        event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
1291                        event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
1292 }
1293
1294 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1295 {
1296         return fprintf(fp, " pid: %u tid: %u\n",
1297                        event->itrace_start.pid, event->itrace_start.tid);
1298 }
1299
1300 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1301 {
1302         bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1303         const char *in_out = out ? "OUT" : "IN ";
1304
1305         if (event->header.type == PERF_RECORD_SWITCH)
1306                 return fprintf(fp, " %s\n", in_out);
1307
1308         return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
1309                        in_out, out ? "next" : "prev",
1310                        event->context_switch.next_prev_pid,
1311                        event->context_switch.next_prev_tid);
1312 }
1313
1314 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1315 {
1316         size_t ret = fprintf(fp, "PERF_RECORD_%s",
1317                              perf_event__name(event->header.type));
1318
1319         switch (event->header.type) {
1320         case PERF_RECORD_COMM:
1321                 ret += perf_event__fprintf_comm(event, fp);
1322                 break;
1323         case PERF_RECORD_FORK:
1324         case PERF_RECORD_EXIT:
1325                 ret += perf_event__fprintf_task(event, fp);
1326                 break;
1327         case PERF_RECORD_MMAP:
1328                 ret += perf_event__fprintf_mmap(event, fp);
1329                 break;
1330         case PERF_RECORD_NAMESPACES:
1331                 ret += perf_event__fprintf_namespaces(event, fp);
1332                 break;
1333         case PERF_RECORD_MMAP2:
1334                 ret += perf_event__fprintf_mmap2(event, fp);
1335                 break;
1336         case PERF_RECORD_AUX:
1337                 ret += perf_event__fprintf_aux(event, fp);
1338                 break;
1339         case PERF_RECORD_ITRACE_START:
1340                 ret += perf_event__fprintf_itrace_start(event, fp);
1341                 break;
1342         case PERF_RECORD_SWITCH:
1343         case PERF_RECORD_SWITCH_CPU_WIDE:
1344                 ret += perf_event__fprintf_switch(event, fp);
1345                 break;
1346         default:
1347                 ret += fprintf(fp, "\n");
1348         }
1349
1350         return ret;
1351 }
1352
1353 int perf_event__process(struct perf_tool *tool __maybe_unused,
1354                         union perf_event *event,
1355                         struct perf_sample *sample,
1356                         struct machine *machine)
1357 {
1358         return machine__process_event(machine, event, sample);
1359 }
1360
1361 void thread__find_addr_map(struct thread *thread, u8 cpumode,
1362                            enum map_type type, u64 addr,
1363                            struct addr_location *al)
1364 {
1365         struct map_groups *mg = thread->mg;
1366         struct machine *machine = mg->machine;
1367         bool load_map = false;
1368
1369         al->machine = machine;
1370         al->thread = thread;
1371         al->addr = addr;
1372         al->cpumode = cpumode;
1373         al->filtered = 0;
1374
1375         if (machine == NULL) {
1376                 al->map = NULL;
1377                 return;
1378         }
1379
1380         if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1381                 al->level = 'k';
1382                 mg = &machine->kmaps;
1383                 load_map = true;
1384         } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1385                 al->level = '.';
1386         } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1387                 al->level = 'g';
1388                 mg = &machine->kmaps;
1389                 load_map = true;
1390         } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1391                 al->level = 'u';
1392         } else {
1393                 al->level = 'H';
1394                 al->map = NULL;
1395
1396                 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1397                         cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1398                         !perf_guest)
1399                         al->filtered |= (1 << HIST_FILTER__GUEST);
1400                 if ((cpumode == PERF_RECORD_MISC_USER ||
1401                         cpumode == PERF_RECORD_MISC_KERNEL) &&
1402                         !perf_host)
1403                         al->filtered |= (1 << HIST_FILTER__HOST);
1404
1405                 return;
1406         }
1407 try_again:
1408         al->map = map_groups__find(mg, type, al->addr);
1409         if (al->map == NULL) {
1410                 /*
1411                  * If this is outside of all known maps, and is a negative
1412                  * address, try to look it up in the kernel dso, as it might be
1413                  * a vsyscall or vdso (which executes in user-mode).
1414                  *
1415                  * XXX This is nasty, we should have a symbol list in the
1416                  * "[vdso]" dso, but for now lets use the old trick of looking
1417                  * in the whole kernel symbol list.
1418                  */
1419                 if (cpumode == PERF_RECORD_MISC_USER && machine &&
1420                     mg != &machine->kmaps &&
1421                     machine__kernel_ip(machine, al->addr)) {
1422                         mg = &machine->kmaps;
1423                         load_map = true;
1424                         goto try_again;
1425                 }
1426         } else {
1427                 /*
1428                  * Kernel maps might be changed when loading symbols so loading
1429                  * must be done prior to using kernel maps.
1430                  */
1431                 if (load_map)
1432                         map__load(al->map);
1433                 al->addr = al->map->map_ip(al->map, al->addr);
1434         }
1435 }
1436
1437 void thread__find_addr_location(struct thread *thread,
1438                                 u8 cpumode, enum map_type type, u64 addr,
1439                                 struct addr_location *al)
1440 {
1441         thread__find_addr_map(thread, cpumode, type, addr, al);
1442         if (al->map != NULL)
1443                 al->sym = map__find_symbol(al->map, al->addr);
1444         else
1445                 al->sym = NULL;
1446 }
1447
1448 /*
1449  * Callers need to drop the reference to al->thread, obtained in
1450  * machine__findnew_thread()
1451  */
1452 int machine__resolve(struct machine *machine, struct addr_location *al,
1453                      struct perf_sample *sample)
1454 {
1455         struct thread *thread = machine__findnew_thread(machine, sample->pid,
1456                                                         sample->tid);
1457
1458         if (thread == NULL)
1459                 return -1;
1460
1461         dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1462         /*
1463          * Have we already created the kernel maps for this machine?
1464          *
1465          * This should have happened earlier, when we processed the kernel MMAP
1466          * events, but for older perf.data files there was no such thing, so do
1467          * it now.
1468          */
1469         if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1470             machine__kernel_map(machine) == NULL)
1471                 machine__create_kernel_maps(machine);
1472
1473         thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1474         dump_printf(" ...... dso: %s\n",
1475                     al->map ? al->map->dso->long_name :
1476                         al->level == 'H' ? "[hypervisor]" : "<not found>");
1477
1478         if (thread__is_filtered(thread))
1479                 al->filtered |= (1 << HIST_FILTER__THREAD);
1480
1481         al->sym = NULL;
1482         al->cpu = sample->cpu;
1483         al->socket = -1;
1484
1485         if (al->cpu >= 0) {
1486                 struct perf_env *env = machine->env;
1487
1488                 if (env && env->cpu)
1489                         al->socket = env->cpu[al->cpu].socket_id;
1490         }
1491
1492         if (al->map) {
1493                 struct dso *dso = al->map->dso;
1494
1495                 if (symbol_conf.dso_list &&
1496                     (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1497                                                   dso->short_name) ||
1498                                (dso->short_name != dso->long_name &&
1499                                 strlist__has_entry(symbol_conf.dso_list,
1500                                                    dso->long_name))))) {
1501                         al->filtered |= (1 << HIST_FILTER__DSO);
1502                 }
1503
1504                 al->sym = map__find_symbol(al->map, al->addr);
1505         }
1506
1507         if (symbol_conf.sym_list &&
1508                 (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1509                                                 al->sym->name))) {
1510                 al->filtered |= (1 << HIST_FILTER__SYMBOL);
1511         }
1512
1513         return 0;
1514 }
1515
1516 /*
1517  * The preprocess_sample method will return with reference counts for the
1518  * in it, when done using (and perhaps getting ref counts if needing to
1519  * keep a pointer to one of those entries) it must be paired with
1520  * addr_location__put(), so that the refcounts can be decremented.
1521  */
1522 void addr_location__put(struct addr_location *al)
1523 {
1524         thread__zput(al->thread);
1525 }
1526
1527 bool is_bts_event(struct perf_event_attr *attr)
1528 {
1529         return attr->type == PERF_TYPE_HARDWARE &&
1530                (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1531                attr->sample_period == 1;
1532 }
1533
1534 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1535 {
1536         if (attr->type == PERF_TYPE_SOFTWARE &&
1537             (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1538              attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1539              attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1540                 return true;
1541
1542         if (is_bts_event(attr))
1543                 return true;
1544
1545         return false;
1546 }
1547
1548 void thread__resolve(struct thread *thread, struct addr_location *al,
1549                      struct perf_sample *sample)
1550 {
1551         thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1552         if (!al->map)
1553                 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1554                                       sample->addr, al);
1555
1556         al->cpu = sample->cpu;
1557         al->sym = NULL;
1558
1559         if (al->map)
1560                 al->sym = map__find_symbol(al->map, al->addr);
1561 }