]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/event.c
1ea693c2a14b70a2d0508ec2a2641dddc105c6b9
[karo-tx-linux.git] / tools / perf / util / event.c
1 #include <linux/types.h>
2 #include <sys/mman.h>
3 #include "event.h"
4 #include "debug.h"
5 #include "hist.h"
6 #include "machine.h"
7 #include "sort.h"
8 #include "string.h"
9 #include "strlist.h"
10 #include "thread.h"
11 #include "thread_map.h"
12 #include "symbol/kallsyms.h"
13 #include "asm/bug.h"
14 #include "stat.h"
15
16 static const char *perf_event__names[] = {
17         [0]                                     = "TOTAL",
18         [PERF_RECORD_MMAP]                      = "MMAP",
19         [PERF_RECORD_MMAP2]                     = "MMAP2",
20         [PERF_RECORD_LOST]                      = "LOST",
21         [PERF_RECORD_COMM]                      = "COMM",
22         [PERF_RECORD_EXIT]                      = "EXIT",
23         [PERF_RECORD_THROTTLE]                  = "THROTTLE",
24         [PERF_RECORD_UNTHROTTLE]                = "UNTHROTTLE",
25         [PERF_RECORD_FORK]                      = "FORK",
26         [PERF_RECORD_READ]                      = "READ",
27         [PERF_RECORD_SAMPLE]                    = "SAMPLE",
28         [PERF_RECORD_AUX]                       = "AUX",
29         [PERF_RECORD_ITRACE_START]              = "ITRACE_START",
30         [PERF_RECORD_LOST_SAMPLES]              = "LOST_SAMPLES",
31         [PERF_RECORD_SWITCH]                    = "SWITCH",
32         [PERF_RECORD_SWITCH_CPU_WIDE]           = "SWITCH_CPU_WIDE",
33         [PERF_RECORD_HEADER_ATTR]               = "ATTR",
34         [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
35         [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
36         [PERF_RECORD_HEADER_BUILD_ID]           = "BUILD_ID",
37         [PERF_RECORD_FINISHED_ROUND]            = "FINISHED_ROUND",
38         [PERF_RECORD_ID_INDEX]                  = "ID_INDEX",
39         [PERF_RECORD_AUXTRACE_INFO]             = "AUXTRACE_INFO",
40         [PERF_RECORD_AUXTRACE]                  = "AUXTRACE",
41         [PERF_RECORD_AUXTRACE_ERROR]            = "AUXTRACE_ERROR",
42         [PERF_RECORD_THREAD_MAP]                = "THREAD_MAP",
43         [PERF_RECORD_CPU_MAP]                   = "CPU_MAP",
44         [PERF_RECORD_STAT_CONFIG]               = "STAT_CONFIG",
45 };
46
47 const char *perf_event__name(unsigned int id)
48 {
49         if (id >= ARRAY_SIZE(perf_event__names))
50                 return "INVALID";
51         if (!perf_event__names[id])
52                 return "UNKNOWN";
53         return perf_event__names[id];
54 }
55
56 static struct perf_sample synth_sample = {
57         .pid       = -1,
58         .tid       = -1,
59         .time      = -1,
60         .stream_id = -1,
61         .cpu       = -1,
62         .period    = 1,
63 };
64
65 /*
66  * Assumes that the first 4095 bytes of /proc/pid/stat contains
67  * the comm, tgid and ppid.
68  */
69 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
70                                     pid_t *tgid, pid_t *ppid)
71 {
72         char filename[PATH_MAX];
73         char bf[4096];
74         int fd;
75         size_t size = 0;
76         ssize_t n;
77         char *nl, *name, *tgids, *ppids;
78
79         *tgid = -1;
80         *ppid = -1;
81
82         snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
83
84         fd = open(filename, O_RDONLY);
85         if (fd < 0) {
86                 pr_debug("couldn't open %s\n", filename);
87                 return -1;
88         }
89
90         n = read(fd, bf, sizeof(bf) - 1);
91         close(fd);
92         if (n <= 0) {
93                 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
94                            pid);
95                 return -1;
96         }
97         bf[n] = '\0';
98
99         name = strstr(bf, "Name:");
100         tgids = strstr(bf, "Tgid:");
101         ppids = strstr(bf, "PPid:");
102
103         if (name) {
104                 name += 5;  /* strlen("Name:") */
105
106                 while (*name && isspace(*name))
107                         ++name;
108
109                 nl = strchr(name, '\n');
110                 if (nl)
111                         *nl = '\0';
112
113                 size = strlen(name);
114                 if (size >= len)
115                         size = len - 1;
116                 memcpy(comm, name, size);
117                 comm[size] = '\0';
118         } else {
119                 pr_debug("Name: string not found for pid %d\n", pid);
120         }
121
122         if (tgids) {
123                 tgids += 5;  /* strlen("Tgid:") */
124                 *tgid = atoi(tgids);
125         } else {
126                 pr_debug("Tgid: string not found for pid %d\n", pid);
127         }
128
129         if (ppids) {
130                 ppids += 5;  /* strlen("PPid:") */
131                 *ppid = atoi(ppids);
132         } else {
133                 pr_debug("PPid: string not found for pid %d\n", pid);
134         }
135
136         return 0;
137 }
138
139 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
140                                     struct machine *machine,
141                                     pid_t *tgid, pid_t *ppid)
142 {
143         size_t size;
144
145         *ppid = -1;
146
147         memset(&event->comm, 0, sizeof(event->comm));
148
149         if (machine__is_host(machine)) {
150                 if (perf_event__get_comm_ids(pid, event->comm.comm,
151                                              sizeof(event->comm.comm),
152                                              tgid, ppid) != 0) {
153                         return -1;
154                 }
155         } else {
156                 *tgid = machine->pid;
157         }
158
159         if (*tgid < 0)
160                 return -1;
161
162         event->comm.pid = *tgid;
163         event->comm.header.type = PERF_RECORD_COMM;
164
165         size = strlen(event->comm.comm) + 1;
166         size = PERF_ALIGN(size, sizeof(u64));
167         memset(event->comm.comm + size, 0, machine->id_hdr_size);
168         event->comm.header.size = (sizeof(event->comm) -
169                                 (sizeof(event->comm.comm) - size) +
170                                 machine->id_hdr_size);
171         event->comm.tid = pid;
172
173         return 0;
174 }
175
176 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
177                                          union perf_event *event, pid_t pid,
178                                          perf_event__handler_t process,
179                                          struct machine *machine)
180 {
181         pid_t tgid, ppid;
182
183         if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
184                 return -1;
185
186         if (process(tool, event, &synth_sample, machine) != 0)
187                 return -1;
188
189         return tgid;
190 }
191
192 static int perf_event__synthesize_fork(struct perf_tool *tool,
193                                        union perf_event *event,
194                                        pid_t pid, pid_t tgid, pid_t ppid,
195                                        perf_event__handler_t process,
196                                        struct machine *machine)
197 {
198         memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
199
200         /*
201          * for main thread set parent to ppid from status file. For other
202          * threads set parent pid to main thread. ie., assume main thread
203          * spawns all threads in a process
204         */
205         if (tgid == pid) {
206                 event->fork.ppid = ppid;
207                 event->fork.ptid = ppid;
208         } else {
209                 event->fork.ppid = tgid;
210                 event->fork.ptid = tgid;
211         }
212         event->fork.pid  = tgid;
213         event->fork.tid  = pid;
214         event->fork.header.type = PERF_RECORD_FORK;
215
216         event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
217
218         if (process(tool, event, &synth_sample, machine) != 0)
219                 return -1;
220
221         return 0;
222 }
223
224 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
225                                        union perf_event *event,
226                                        pid_t pid, pid_t tgid,
227                                        perf_event__handler_t process,
228                                        struct machine *machine,
229                                        bool mmap_data,
230                                        unsigned int proc_map_timeout)
231 {
232         char filename[PATH_MAX];
233         FILE *fp;
234         unsigned long long t;
235         bool truncation = false;
236         unsigned long long timeout = proc_map_timeout * 1000000ULL;
237         int rc = 0;
238
239         if (machine__is_default_guest(machine))
240                 return 0;
241
242         snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
243                  machine->root_dir, pid);
244
245         fp = fopen(filename, "r");
246         if (fp == NULL) {
247                 /*
248                  * We raced with a task exiting - just return:
249                  */
250                 pr_debug("couldn't open %s\n", filename);
251                 return -1;
252         }
253
254         event->header.type = PERF_RECORD_MMAP2;
255         t = rdclock();
256
257         while (1) {
258                 char bf[BUFSIZ];
259                 char prot[5];
260                 char execname[PATH_MAX];
261                 char anonstr[] = "//anon";
262                 unsigned int ino;
263                 size_t size;
264                 ssize_t n;
265
266                 if (fgets(bf, sizeof(bf), fp) == NULL)
267                         break;
268
269                 if ((rdclock() - t) > timeout) {
270                         pr_warning("Reading %s time out. "
271                                    "You may want to increase "
272                                    "the time limit by --proc-map-timeout\n",
273                                    filename);
274                         truncation = true;
275                         goto out;
276                 }
277
278                 /* ensure null termination since stack will be reused. */
279                 strcpy(execname, "");
280
281                 /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
282                 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
283                        &event->mmap2.start, &event->mmap2.len, prot,
284                        &event->mmap2.pgoff, &event->mmap2.maj,
285                        &event->mmap2.min,
286                        &ino, execname);
287
288                 /*
289                  * Anon maps don't have the execname.
290                  */
291                 if (n < 7)
292                         continue;
293
294                 event->mmap2.ino = (u64)ino;
295
296                 /*
297                  * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
298                  */
299                 if (machine__is_host(machine))
300                         event->header.misc = PERF_RECORD_MISC_USER;
301                 else
302                         event->header.misc = PERF_RECORD_MISC_GUEST_USER;
303
304                 /* map protection and flags bits */
305                 event->mmap2.prot = 0;
306                 event->mmap2.flags = 0;
307                 if (prot[0] == 'r')
308                         event->mmap2.prot |= PROT_READ;
309                 if (prot[1] == 'w')
310                         event->mmap2.prot |= PROT_WRITE;
311                 if (prot[2] == 'x')
312                         event->mmap2.prot |= PROT_EXEC;
313
314                 if (prot[3] == 's')
315                         event->mmap2.flags |= MAP_SHARED;
316                 else
317                         event->mmap2.flags |= MAP_PRIVATE;
318
319                 if (prot[2] != 'x') {
320                         if (!mmap_data || prot[0] != 'r')
321                                 continue;
322
323                         event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
324                 }
325
326 out:
327                 if (truncation)
328                         event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
329
330                 if (!strcmp(execname, ""))
331                         strcpy(execname, anonstr);
332
333                 size = strlen(execname) + 1;
334                 memcpy(event->mmap2.filename, execname, size);
335                 size = PERF_ALIGN(size, sizeof(u64));
336                 event->mmap2.len -= event->mmap.start;
337                 event->mmap2.header.size = (sizeof(event->mmap2) -
338                                         (sizeof(event->mmap2.filename) - size));
339                 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
340                 event->mmap2.header.size += machine->id_hdr_size;
341                 event->mmap2.pid = tgid;
342                 event->mmap2.tid = pid;
343
344                 if (process(tool, event, &synth_sample, machine) != 0) {
345                         rc = -1;
346                         break;
347                 }
348
349                 if (truncation)
350                         break;
351         }
352
353         fclose(fp);
354         return rc;
355 }
356
357 int perf_event__synthesize_modules(struct perf_tool *tool,
358                                    perf_event__handler_t process,
359                                    struct machine *machine)
360 {
361         int rc = 0;
362         struct map *pos;
363         struct map_groups *kmaps = &machine->kmaps;
364         struct maps *maps = &kmaps->maps[MAP__FUNCTION];
365         union perf_event *event = zalloc((sizeof(event->mmap) +
366                                           machine->id_hdr_size));
367         if (event == NULL) {
368                 pr_debug("Not enough memory synthesizing mmap event "
369                          "for kernel modules\n");
370                 return -1;
371         }
372
373         event->header.type = PERF_RECORD_MMAP;
374
375         /*
376          * kernel uses 0 for user space maps, see kernel/perf_event.c
377          * __perf_event_mmap
378          */
379         if (machine__is_host(machine))
380                 event->header.misc = PERF_RECORD_MISC_KERNEL;
381         else
382                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
383
384         for (pos = maps__first(maps); pos; pos = map__next(pos)) {
385                 size_t size;
386
387                 if (__map__is_kernel(pos))
388                         continue;
389
390                 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
391                 event->mmap.header.type = PERF_RECORD_MMAP;
392                 event->mmap.header.size = (sizeof(event->mmap) -
393                                         (sizeof(event->mmap.filename) - size));
394                 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
395                 event->mmap.header.size += machine->id_hdr_size;
396                 event->mmap.start = pos->start;
397                 event->mmap.len   = pos->end - pos->start;
398                 event->mmap.pid   = machine->pid;
399
400                 memcpy(event->mmap.filename, pos->dso->long_name,
401                        pos->dso->long_name_len + 1);
402                 if (process(tool, event, &synth_sample, machine) != 0) {
403                         rc = -1;
404                         break;
405                 }
406         }
407
408         free(event);
409         return rc;
410 }
411
412 static int __event__synthesize_thread(union perf_event *comm_event,
413                                       union perf_event *mmap_event,
414                                       union perf_event *fork_event,
415                                       pid_t pid, int full,
416                                           perf_event__handler_t process,
417                                       struct perf_tool *tool,
418                                       struct machine *machine,
419                                       bool mmap_data,
420                                       unsigned int proc_map_timeout)
421 {
422         char filename[PATH_MAX];
423         DIR *tasks;
424         struct dirent dirent, *next;
425         pid_t tgid, ppid;
426         int rc = 0;
427
428         /* special case: only send one comm event using passed in pid */
429         if (!full) {
430                 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
431                                                    process, machine);
432
433                 if (tgid == -1)
434                         return -1;
435
436                 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
437                                                           process, machine, mmap_data,
438                                                           proc_map_timeout);
439         }
440
441         if (machine__is_default_guest(machine))
442                 return 0;
443
444         snprintf(filename, sizeof(filename), "%s/proc/%d/task",
445                  machine->root_dir, pid);
446
447         tasks = opendir(filename);
448         if (tasks == NULL) {
449                 pr_debug("couldn't open %s\n", filename);
450                 return 0;
451         }
452
453         while (!readdir_r(tasks, &dirent, &next) && next) {
454                 char *end;
455                 pid_t _pid;
456
457                 _pid = strtol(dirent.d_name, &end, 10);
458                 if (*end)
459                         continue;
460
461                 rc = -1;
462                 if (perf_event__prepare_comm(comm_event, _pid, machine,
463                                              &tgid, &ppid) != 0)
464                         break;
465
466                 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
467                                                 ppid, process, machine) < 0)
468                         break;
469                 /*
470                  * Send the prepared comm event
471                  */
472                 if (process(tool, comm_event, &synth_sample, machine) != 0)
473                         break;
474
475                 rc = 0;
476                 if (_pid == pid) {
477                         /* process the parent's maps too */
478                         rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
479                                                 process, machine, mmap_data, proc_map_timeout);
480                         if (rc)
481                                 break;
482                 }
483         }
484
485         closedir(tasks);
486         return rc;
487 }
488
489 int perf_event__synthesize_thread_map(struct perf_tool *tool,
490                                       struct thread_map *threads,
491                                       perf_event__handler_t process,
492                                       struct machine *machine,
493                                       bool mmap_data,
494                                       unsigned int proc_map_timeout)
495 {
496         union perf_event *comm_event, *mmap_event, *fork_event;
497         int err = -1, thread, j;
498
499         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
500         if (comm_event == NULL)
501                 goto out;
502
503         mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
504         if (mmap_event == NULL)
505                 goto out_free_comm;
506
507         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
508         if (fork_event == NULL)
509                 goto out_free_mmap;
510
511         err = 0;
512         for (thread = 0; thread < threads->nr; ++thread) {
513                 if (__event__synthesize_thread(comm_event, mmap_event,
514                                                fork_event,
515                                                thread_map__pid(threads, thread), 0,
516                                                process, tool, machine,
517                                                mmap_data, proc_map_timeout)) {
518                         err = -1;
519                         break;
520                 }
521
522                 /*
523                  * comm.pid is set to thread group id by
524                  * perf_event__synthesize_comm
525                  */
526                 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
527                         bool need_leader = true;
528
529                         /* is thread group leader in thread_map? */
530                         for (j = 0; j < threads->nr; ++j) {
531                                 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
532                                         need_leader = false;
533                                         break;
534                                 }
535                         }
536
537                         /* if not, generate events for it */
538                         if (need_leader &&
539                             __event__synthesize_thread(comm_event, mmap_event,
540                                                        fork_event,
541                                                        comm_event->comm.pid, 0,
542                                                        process, tool, machine,
543                                                        mmap_data, proc_map_timeout)) {
544                                 err = -1;
545                                 break;
546                         }
547                 }
548         }
549         free(fork_event);
550 out_free_mmap:
551         free(mmap_event);
552 out_free_comm:
553         free(comm_event);
554 out:
555         return err;
556 }
557
558 int perf_event__synthesize_threads(struct perf_tool *tool,
559                                    perf_event__handler_t process,
560                                    struct machine *machine,
561                                    bool mmap_data,
562                                    unsigned int proc_map_timeout)
563 {
564         DIR *proc;
565         char proc_path[PATH_MAX];
566         struct dirent dirent, *next;
567         union perf_event *comm_event, *mmap_event, *fork_event;
568         int err = -1;
569
570         if (machine__is_default_guest(machine))
571                 return 0;
572
573         comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
574         if (comm_event == NULL)
575                 goto out;
576
577         mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size);
578         if (mmap_event == NULL)
579                 goto out_free_comm;
580
581         fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
582         if (fork_event == NULL)
583                 goto out_free_mmap;
584
585         snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
586         proc = opendir(proc_path);
587
588         if (proc == NULL)
589                 goto out_free_fork;
590
591         while (!readdir_r(proc, &dirent, &next) && next) {
592                 char *end;
593                 pid_t pid = strtol(dirent.d_name, &end, 10);
594
595                 if (*end) /* only interested in proper numerical dirents */
596                         continue;
597                 /*
598                  * We may race with exiting thread, so don't stop just because
599                  * one thread couldn't be synthesized.
600                  */
601                 __event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
602                                            1, process, tool, machine, mmap_data,
603                                            proc_map_timeout);
604         }
605
606         err = 0;
607         closedir(proc);
608 out_free_fork:
609         free(fork_event);
610 out_free_mmap:
611         free(mmap_event);
612 out_free_comm:
613         free(comm_event);
614 out:
615         return err;
616 }
617
618 struct process_symbol_args {
619         const char *name;
620         u64        start;
621 };
622
623 static int find_symbol_cb(void *arg, const char *name, char type,
624                           u64 start)
625 {
626         struct process_symbol_args *args = arg;
627
628         /*
629          * Must be a function or at least an alias, as in PARISC64, where "_text" is
630          * an 'A' to the same address as "_stext".
631          */
632         if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
633               type == 'A') || strcmp(name, args->name))
634                 return 0;
635
636         args->start = start;
637         return 1;
638 }
639
640 u64 kallsyms__get_function_start(const char *kallsyms_filename,
641                                  const char *symbol_name)
642 {
643         struct process_symbol_args args = { .name = symbol_name, };
644
645         if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
646                 return 0;
647
648         return args.start;
649 }
650
651 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
652                                        perf_event__handler_t process,
653                                        struct machine *machine)
654 {
655         size_t size;
656         const char *mmap_name;
657         char name_buff[PATH_MAX];
658         struct map *map = machine__kernel_map(machine);
659         struct kmap *kmap;
660         int err;
661         union perf_event *event;
662
663         if (map == NULL)
664                 return -1;
665
666         /*
667          * We should get this from /sys/kernel/sections/.text, but till that is
668          * available use this, and after it is use this as a fallback for older
669          * kernels.
670          */
671         event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
672         if (event == NULL) {
673                 pr_debug("Not enough memory synthesizing mmap event "
674                          "for kernel modules\n");
675                 return -1;
676         }
677
678         mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
679         if (machine__is_host(machine)) {
680                 /*
681                  * kernel uses PERF_RECORD_MISC_USER for user space maps,
682                  * see kernel/perf_event.c __perf_event_mmap
683                  */
684                 event->header.misc = PERF_RECORD_MISC_KERNEL;
685         } else {
686                 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
687         }
688
689         kmap = map__kmap(map);
690         size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
691                         "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
692         size = PERF_ALIGN(size, sizeof(u64));
693         event->mmap.header.type = PERF_RECORD_MMAP;
694         event->mmap.header.size = (sizeof(event->mmap) -
695                         (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
696         event->mmap.pgoff = kmap->ref_reloc_sym->addr;
697         event->mmap.start = map->start;
698         event->mmap.len   = map->end - event->mmap.start;
699         event->mmap.pid   = machine->pid;
700
701         err = process(tool, event, &synth_sample, machine);
702         free(event);
703
704         return err;
705 }
706
707 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
708                                       struct thread_map *threads,
709                                       perf_event__handler_t process,
710                                       struct machine *machine)
711 {
712         union perf_event *event;
713         int i, err, size;
714
715         size  = sizeof(event->thread_map);
716         size += threads->nr * sizeof(event->thread_map.entries[0]);
717
718         event = zalloc(size);
719         if (!event)
720                 return -ENOMEM;
721
722         event->header.type = PERF_RECORD_THREAD_MAP;
723         event->header.size = size;
724         event->thread_map.nr = threads->nr;
725
726         for (i = 0; i < threads->nr; i++) {
727                 struct thread_map_event_entry *entry = &event->thread_map.entries[i];
728                 char *comm = thread_map__comm(threads, i);
729
730                 if (!comm)
731                         comm = (char *) "";
732
733                 entry->pid = thread_map__pid(threads, i);
734                 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
735         }
736
737         err = process(tool, event, NULL, machine);
738
739         free(event);
740         return err;
741 }
742
743 static void synthesize_cpus(struct cpu_map_entries *cpus,
744                             struct cpu_map *map)
745 {
746         int i;
747
748         cpus->nr = map->nr;
749
750         for (i = 0; i < map->nr; i++)
751                 cpus->cpu[i] = map->map[i];
752 }
753
754 static void synthesize_mask(struct cpu_map_mask *mask,
755                             struct cpu_map *map, int max)
756 {
757         int i;
758
759         mask->nr = BITS_TO_LONGS(max);
760         mask->long_size = sizeof(long);
761
762         for (i = 0; i < map->nr; i++)
763                 set_bit(map->map[i], mask->mask);
764 }
765
766 static size_t cpus_size(struct cpu_map *map)
767 {
768         return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
769 }
770
771 static size_t mask_size(struct cpu_map *map, int *max)
772 {
773         int i;
774
775         *max = 0;
776
777         for (i = 0; i < map->nr; i++) {
778                 /* bit possition of the cpu is + 1 */
779                 int bit = map->map[i] + 1;
780
781                 if (bit > *max)
782                         *max = bit;
783         }
784
785         return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
786 }
787
788 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
789 {
790         size_t size_cpus, size_mask;
791         bool is_dummy = cpu_map__empty(map);
792
793         /*
794          * Both array and mask data have variable size based
795          * on the number of cpus and their actual values.
796          * The size of the 'struct cpu_map_data' is:
797          *
798          *   array = size of 'struct cpu_map_entries' +
799          *           number of cpus * sizeof(u64)
800          *
801          *   mask  = size of 'struct cpu_map_mask' +
802          *           maximum cpu bit converted to size of longs
803          *
804          * and finaly + the size of 'struct cpu_map_data'.
805          */
806         size_cpus = cpus_size(map);
807         size_mask = mask_size(map, max);
808
809         if (is_dummy || (size_cpus < size_mask)) {
810                 *size += size_cpus;
811                 *type  = PERF_CPU_MAP__CPUS;
812         } else {
813                 *size += size_mask;
814                 *type  = PERF_CPU_MAP__MASK;
815         }
816
817         *size += sizeof(struct cpu_map_data);
818         return zalloc(*size);
819 }
820
821 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
822                               u16 type, int max)
823 {
824         data->type = type;
825
826         switch (type) {
827         case PERF_CPU_MAP__CPUS:
828                 synthesize_cpus((struct cpu_map_entries *) data->data, map);
829                 break;
830         case PERF_CPU_MAP__MASK:
831                 synthesize_mask((struct cpu_map_mask *) data->data, map, max);
832         default:
833                 break;
834         };
835 }
836
837 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
838 {
839         size_t size = sizeof(struct cpu_map_event);
840         struct cpu_map_event *event;
841         int max;
842         u16 type;
843
844         event = cpu_map_data__alloc(map, &size, &type, &max);
845         if (!event)
846                 return NULL;
847
848         event->header.type = PERF_RECORD_CPU_MAP;
849         event->header.size = size;
850         event->data.type   = type;
851
852         cpu_map_data__synthesize(&event->data, map, type, max);
853         return event;
854 }
855
856 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
857                                    struct cpu_map *map,
858                                    perf_event__handler_t process,
859                                    struct machine *machine)
860 {
861         struct cpu_map_event *event;
862         int err;
863
864         event = cpu_map_event__new(map);
865         if (!event)
866                 return -ENOMEM;
867
868         err = process(tool, (union perf_event *) event, NULL, machine);
869
870         free(event);
871         return err;
872 }
873
874 int perf_event__synthesize_stat_config(struct perf_tool *tool,
875                                        struct perf_stat_config *config,
876                                        perf_event__handler_t process,
877                                        struct machine *machine)
878 {
879         struct stat_config_event *event;
880         int size, i = 0, err;
881
882         size  = sizeof(*event);
883         size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
884
885         event = zalloc(size);
886         if (!event)
887                 return -ENOMEM;
888
889         event->header.type = PERF_RECORD_STAT_CONFIG;
890         event->header.size = size;
891         event->nr          = PERF_STAT_CONFIG_TERM__MAX;
892
893 #define ADD(__term, __val)                                      \
894         event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
895         event->data[i].val = __val;                             \
896         i++;
897
898         ADD(AGGR_MODE,  config->aggr_mode)
899         ADD(INTERVAL,   config->interval)
900         ADD(SCALE,      config->scale)
901
902         WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
903                   "stat config terms unbalanced\n");
904 #undef ADD
905
906         err = process(tool, (union perf_event *) event, NULL, machine);
907
908         free(event);
909         return err;
910 }
911
912 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
913 {
914         const char *s;
915
916         if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
917                 s = " exec";
918         else
919                 s = "";
920
921         return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
922 }
923
924 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
925                              union perf_event *event,
926                              struct perf_sample *sample,
927                              struct machine *machine)
928 {
929         return machine__process_comm_event(machine, event, sample);
930 }
931
932 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
933                              union perf_event *event,
934                              struct perf_sample *sample,
935                              struct machine *machine)
936 {
937         return machine__process_lost_event(machine, event, sample);
938 }
939
940 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
941                             union perf_event *event,
942                             struct perf_sample *sample __maybe_unused,
943                             struct machine *machine)
944 {
945         return machine__process_aux_event(machine, event);
946 }
947
948 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
949                                      union perf_event *event,
950                                      struct perf_sample *sample __maybe_unused,
951                                      struct machine *machine)
952 {
953         return machine__process_itrace_start_event(machine, event);
954 }
955
956 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
957                                      union perf_event *event,
958                                      struct perf_sample *sample,
959                                      struct machine *machine)
960 {
961         return machine__process_lost_samples_event(machine, event, sample);
962 }
963
964 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
965                                union perf_event *event,
966                                struct perf_sample *sample __maybe_unused,
967                                struct machine *machine)
968 {
969         return machine__process_switch_event(machine, event);
970 }
971
972 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
973 {
974         return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
975                        event->mmap.pid, event->mmap.tid, event->mmap.start,
976                        event->mmap.len, event->mmap.pgoff,
977                        (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
978                        event->mmap.filename);
979 }
980
981 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
982 {
983         return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
984                            " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
985                        event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
986                        event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
987                        event->mmap2.min, event->mmap2.ino,
988                        event->mmap2.ino_generation,
989                        (event->mmap2.prot & PROT_READ) ? 'r' : '-',
990                        (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
991                        (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
992                        (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
993                        event->mmap2.filename);
994 }
995
996 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
997 {
998         struct thread_map *threads = thread_map__new_event(&event->thread_map);
999         size_t ret;
1000
1001         ret = fprintf(fp, " nr: ");
1002
1003         if (threads)
1004                 ret += thread_map__fprintf(threads, fp);
1005         else
1006                 ret += fprintf(fp, "failed to get threads from event\n");
1007
1008         thread_map__put(threads);
1009         return ret;
1010 }
1011
1012 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1013 {
1014         struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1015         size_t ret;
1016
1017         ret = fprintf(fp, " nr: ");
1018
1019         if (cpus)
1020                 ret += cpu_map__fprintf(cpus, fp);
1021         else
1022                 ret += fprintf(fp, "failed to get cpumap from event\n");
1023
1024         cpu_map__put(cpus);
1025         return ret;
1026 }
1027
1028 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1029                              union perf_event *event,
1030                              struct perf_sample *sample,
1031                              struct machine *machine)
1032 {
1033         return machine__process_mmap_event(machine, event, sample);
1034 }
1035
1036 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1037                              union perf_event *event,
1038                              struct perf_sample *sample,
1039                              struct machine *machine)
1040 {
1041         return machine__process_mmap2_event(machine, event, sample);
1042 }
1043
1044 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1045 {
1046         return fprintf(fp, "(%d:%d):(%d:%d)\n",
1047                        event->fork.pid, event->fork.tid,
1048                        event->fork.ppid, event->fork.ptid);
1049 }
1050
1051 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1052                              union perf_event *event,
1053                              struct perf_sample *sample,
1054                              struct machine *machine)
1055 {
1056         return machine__process_fork_event(machine, event, sample);
1057 }
1058
1059 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1060                              union perf_event *event,
1061                              struct perf_sample *sample,
1062                              struct machine *machine)
1063 {
1064         return machine__process_exit_event(machine, event, sample);
1065 }
1066
1067 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1068 {
1069         return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
1070                        event->aux.aux_offset, event->aux.aux_size,
1071                        event->aux.flags,
1072                        event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1073                        event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
1074 }
1075
1076 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1077 {
1078         return fprintf(fp, " pid: %u tid: %u\n",
1079                        event->itrace_start.pid, event->itrace_start.tid);
1080 }
1081
1082 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1083 {
1084         bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1085         const char *in_out = out ? "OUT" : "IN ";
1086
1087         if (event->header.type == PERF_RECORD_SWITCH)
1088                 return fprintf(fp, " %s\n", in_out);
1089
1090         return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
1091                        in_out, out ? "next" : "prev",
1092                        event->context_switch.next_prev_pid,
1093                        event->context_switch.next_prev_tid);
1094 }
1095
1096 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1097 {
1098         size_t ret = fprintf(fp, "PERF_RECORD_%s",
1099                              perf_event__name(event->header.type));
1100
1101         switch (event->header.type) {
1102         case PERF_RECORD_COMM:
1103                 ret += perf_event__fprintf_comm(event, fp);
1104                 break;
1105         case PERF_RECORD_FORK:
1106         case PERF_RECORD_EXIT:
1107                 ret += perf_event__fprintf_task(event, fp);
1108                 break;
1109         case PERF_RECORD_MMAP:
1110                 ret += perf_event__fprintf_mmap(event, fp);
1111                 break;
1112         case PERF_RECORD_MMAP2:
1113                 ret += perf_event__fprintf_mmap2(event, fp);
1114                 break;
1115         case PERF_RECORD_AUX:
1116                 ret += perf_event__fprintf_aux(event, fp);
1117                 break;
1118         case PERF_RECORD_ITRACE_START:
1119                 ret += perf_event__fprintf_itrace_start(event, fp);
1120                 break;
1121         case PERF_RECORD_SWITCH:
1122         case PERF_RECORD_SWITCH_CPU_WIDE:
1123                 ret += perf_event__fprintf_switch(event, fp);
1124                 break;
1125         default:
1126                 ret += fprintf(fp, "\n");
1127         }
1128
1129         return ret;
1130 }
1131
1132 int perf_event__process(struct perf_tool *tool __maybe_unused,
1133                         union perf_event *event,
1134                         struct perf_sample *sample,
1135                         struct machine *machine)
1136 {
1137         return machine__process_event(machine, event, sample);
1138 }
1139
1140 void thread__find_addr_map(struct thread *thread, u8 cpumode,
1141                            enum map_type type, u64 addr,
1142                            struct addr_location *al)
1143 {
1144         struct map_groups *mg = thread->mg;
1145         struct machine *machine = mg->machine;
1146         bool load_map = false;
1147
1148         al->machine = machine;
1149         al->thread = thread;
1150         al->addr = addr;
1151         al->cpumode = cpumode;
1152         al->filtered = 0;
1153
1154         if (machine == NULL) {
1155                 al->map = NULL;
1156                 return;
1157         }
1158
1159         if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1160                 al->level = 'k';
1161                 mg = &machine->kmaps;
1162                 load_map = true;
1163         } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1164                 al->level = '.';
1165         } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1166                 al->level = 'g';
1167                 mg = &machine->kmaps;
1168                 load_map = true;
1169         } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1170                 al->level = 'u';
1171         } else {
1172                 al->level = 'H';
1173                 al->map = NULL;
1174
1175                 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1176                         cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1177                         !perf_guest)
1178                         al->filtered |= (1 << HIST_FILTER__GUEST);
1179                 if ((cpumode == PERF_RECORD_MISC_USER ||
1180                         cpumode == PERF_RECORD_MISC_KERNEL) &&
1181                         !perf_host)
1182                         al->filtered |= (1 << HIST_FILTER__HOST);
1183
1184                 return;
1185         }
1186 try_again:
1187         al->map = map_groups__find(mg, type, al->addr);
1188         if (al->map == NULL) {
1189                 /*
1190                  * If this is outside of all known maps, and is a negative
1191                  * address, try to look it up in the kernel dso, as it might be
1192                  * a vsyscall or vdso (which executes in user-mode).
1193                  *
1194                  * XXX This is nasty, we should have a symbol list in the
1195                  * "[vdso]" dso, but for now lets use the old trick of looking
1196                  * in the whole kernel symbol list.
1197                  */
1198                 if (cpumode == PERF_RECORD_MISC_USER && machine &&
1199                     mg != &machine->kmaps &&
1200                     machine__kernel_ip(machine, al->addr)) {
1201                         mg = &machine->kmaps;
1202                         load_map = true;
1203                         goto try_again;
1204                 }
1205         } else {
1206                 /*
1207                  * Kernel maps might be changed when loading symbols so loading
1208                  * must be done prior to using kernel maps.
1209                  */
1210                 if (load_map)
1211                         map__load(al->map, machine->symbol_filter);
1212                 al->addr = al->map->map_ip(al->map, al->addr);
1213         }
1214 }
1215
1216 void thread__find_addr_location(struct thread *thread,
1217                                 u8 cpumode, enum map_type type, u64 addr,
1218                                 struct addr_location *al)
1219 {
1220         thread__find_addr_map(thread, cpumode, type, addr, al);
1221         if (al->map != NULL)
1222                 al->sym = map__find_symbol(al->map, al->addr,
1223                                            thread->mg->machine->symbol_filter);
1224         else
1225                 al->sym = NULL;
1226 }
1227
1228 /*
1229  * Callers need to drop the reference to al->thread, obtained in
1230  * machine__findnew_thread()
1231  */
1232 int perf_event__preprocess_sample(const union perf_event *event,
1233                                   struct machine *machine,
1234                                   struct addr_location *al,
1235                                   struct perf_sample *sample)
1236 {
1237         u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1238         struct thread *thread = machine__findnew_thread(machine, sample->pid,
1239                                                         sample->tid);
1240
1241         if (thread == NULL)
1242                 return -1;
1243
1244         dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1245         /*
1246          * Have we already created the kernel maps for this machine?
1247          *
1248          * This should have happened earlier, when we processed the kernel MMAP
1249          * events, but for older perf.data files there was no such thing, so do
1250          * it now.
1251          */
1252         if (cpumode == PERF_RECORD_MISC_KERNEL &&
1253             machine__kernel_map(machine) == NULL)
1254                 machine__create_kernel_maps(machine);
1255
1256         thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al);
1257         dump_printf(" ...... dso: %s\n",
1258                     al->map ? al->map->dso->long_name :
1259                         al->level == 'H' ? "[hypervisor]" : "<not found>");
1260
1261         if (thread__is_filtered(thread))
1262                 al->filtered |= (1 << HIST_FILTER__THREAD);
1263
1264         al->sym = NULL;
1265         al->cpu = sample->cpu;
1266         al->socket = -1;
1267
1268         if (al->cpu >= 0) {
1269                 struct perf_env *env = machine->env;
1270
1271                 if (env && env->cpu)
1272                         al->socket = env->cpu[al->cpu].socket_id;
1273         }
1274
1275         if (al->map) {
1276                 struct dso *dso = al->map->dso;
1277
1278                 if (symbol_conf.dso_list &&
1279                     (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1280                                                   dso->short_name) ||
1281                                (dso->short_name != dso->long_name &&
1282                                 strlist__has_entry(symbol_conf.dso_list,
1283                                                    dso->long_name))))) {
1284                         al->filtered |= (1 << HIST_FILTER__DSO);
1285                 }
1286
1287                 al->sym = map__find_symbol(al->map, al->addr,
1288                                            machine->symbol_filter);
1289         }
1290
1291         if (symbol_conf.sym_list &&
1292                 (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1293                                                 al->sym->name))) {
1294                 al->filtered |= (1 << HIST_FILTER__SYMBOL);
1295         }
1296
1297         return 0;
1298 }
1299
1300 /*
1301  * The preprocess_sample method will return with reference counts for the
1302  * in it, when done using (and perhaps getting ref counts if needing to
1303  * keep a pointer to one of those entries) it must be paired with
1304  * addr_location__put(), so that the refcounts can be decremented.
1305  */
1306 void addr_location__put(struct addr_location *al)
1307 {
1308         thread__zput(al->thread);
1309 }
1310
1311 bool is_bts_event(struct perf_event_attr *attr)
1312 {
1313         return attr->type == PERF_TYPE_HARDWARE &&
1314                (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1315                attr->sample_period == 1;
1316 }
1317
1318 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1319 {
1320         if (attr->type == PERF_TYPE_SOFTWARE &&
1321             (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1322              attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1323              attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1324                 return true;
1325
1326         if (is_bts_event(attr))
1327                 return true;
1328
1329         return false;
1330 }
1331
1332 void perf_event__preprocess_sample_addr(union perf_event *event,
1333                                         struct perf_sample *sample,
1334                                         struct thread *thread,
1335                                         struct addr_location *al)
1336 {
1337         u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1338
1339         thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al);
1340         if (!al->map)
1341                 thread__find_addr_map(thread, cpumode, MAP__VARIABLE,
1342                                       sample->addr, al);
1343
1344         al->cpu = sample->cpu;
1345         al->sym = NULL;
1346
1347         if (al->map)
1348                 al->sym = map__find_symbol(al->map, al->addr, NULL);
1349 }