]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/header.c
perf tools: Include sys/param.h where needed
[karo-tx-linux.git] / tools / perf / util / header.c
1 #include <errno.h>
2 #include <inttypes.h>
3 #include "util.h"
4 #include "string2.h"
5 #include <sys/param.h>
6 #include <sys/types.h>
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <linux/list.h>
12 #include <linux/kernel.h>
13 #include <linux/bitops.h>
14 #include <sys/utsname.h>
15
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "header.h"
19 #include "../perf.h"
20 #include "trace-event.h"
21 #include "session.h"
22 #include "symbol.h"
23 #include "debug.h"
24 #include "cpumap.h"
25 #include "pmu.h"
26 #include "vdso.h"
27 #include "strbuf.h"
28 #include "build-id.h"
29 #include "data.h"
30 #include <api/fs/fs.h>
31 #include "asm/bug.h"
32
33 #include "sane_ctype.h"
34
35 /*
36  * magic2 = "PERFILE2"
37  * must be a numerical value to let the endianness
38  * determine the memory layout. That way we are able
39  * to detect endianness when reading the perf.data file
40  * back.
41  *
42  * we check for legacy (PERFFILE) format.
43  */
44 static const char *__perf_magic1 = "PERFFILE";
45 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
46 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
47
48 #define PERF_MAGIC      __perf_magic2
49
50 const char perf_version_string[] = PERF_VERSION;
51
52 struct perf_file_attr {
53         struct perf_event_attr  attr;
54         struct perf_file_section        ids;
55 };
56
57 void perf_header__set_feat(struct perf_header *header, int feat)
58 {
59         set_bit(feat, header->adds_features);
60 }
61
62 void perf_header__clear_feat(struct perf_header *header, int feat)
63 {
64         clear_bit(feat, header->adds_features);
65 }
66
67 bool perf_header__has_feat(const struct perf_header *header, int feat)
68 {
69         return test_bit(feat, header->adds_features);
70 }
71
72 static int do_write(int fd, const void *buf, size_t size)
73 {
74         while (size) {
75                 int ret = write(fd, buf, size);
76
77                 if (ret < 0)
78                         return -errno;
79
80                 size -= ret;
81                 buf += ret;
82         }
83
84         return 0;
85 }
86
87 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
88 {
89         static const char zero_buf[NAME_ALIGN];
90         int err = do_write(fd, bf, count);
91
92         if (!err)
93                 err = do_write(fd, zero_buf, count_aligned - count);
94
95         return err;
96 }
97
98 #define string_size(str)                                                \
99         (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
100
101 static int do_write_string(int fd, const char *str)
102 {
103         u32 len, olen;
104         int ret;
105
106         olen = strlen(str) + 1;
107         len = PERF_ALIGN(olen, NAME_ALIGN);
108
109         /* write len, incl. \0 */
110         ret = do_write(fd, &len, sizeof(len));
111         if (ret < 0)
112                 return ret;
113
114         return write_padded(fd, str, olen, len);
115 }
116
117 static char *do_read_string(int fd, struct perf_header *ph)
118 {
119         ssize_t sz, ret;
120         u32 len;
121         char *buf;
122
123         sz = readn(fd, &len, sizeof(len));
124         if (sz < (ssize_t)sizeof(len))
125                 return NULL;
126
127         if (ph->needs_swap)
128                 len = bswap_32(len);
129
130         buf = malloc(len);
131         if (!buf)
132                 return NULL;
133
134         ret = readn(fd, buf, len);
135         if (ret == (ssize_t)len) {
136                 /*
137                  * strings are padded by zeroes
138                  * thus the actual strlen of buf
139                  * may be less than len
140                  */
141                 return buf;
142         }
143
144         free(buf);
145         return NULL;
146 }
147
148 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
149                             struct perf_evlist *evlist)
150 {
151         return read_tracing_data(fd, &evlist->entries);
152 }
153
154
155 static int write_build_id(int fd, struct perf_header *h,
156                           struct perf_evlist *evlist __maybe_unused)
157 {
158         struct perf_session *session;
159         int err;
160
161         session = container_of(h, struct perf_session, header);
162
163         if (!perf_session__read_build_ids(session, true))
164                 return -1;
165
166         err = perf_session__write_buildid_table(session, fd);
167         if (err < 0) {
168                 pr_debug("failed to write buildid table\n");
169                 return err;
170         }
171         perf_session__cache_build_ids(session);
172
173         return 0;
174 }
175
176 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
177                           struct perf_evlist *evlist __maybe_unused)
178 {
179         struct utsname uts;
180         int ret;
181
182         ret = uname(&uts);
183         if (ret < 0)
184                 return -1;
185
186         return do_write_string(fd, uts.nodename);
187 }
188
189 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
190                            struct perf_evlist *evlist __maybe_unused)
191 {
192         struct utsname uts;
193         int ret;
194
195         ret = uname(&uts);
196         if (ret < 0)
197                 return -1;
198
199         return do_write_string(fd, uts.release);
200 }
201
202 static int write_arch(int fd, struct perf_header *h __maybe_unused,
203                       struct perf_evlist *evlist __maybe_unused)
204 {
205         struct utsname uts;
206         int ret;
207
208         ret = uname(&uts);
209         if (ret < 0)
210                 return -1;
211
212         return do_write_string(fd, uts.machine);
213 }
214
215 static int write_version(int fd, struct perf_header *h __maybe_unused,
216                          struct perf_evlist *evlist __maybe_unused)
217 {
218         return do_write_string(fd, perf_version_string);
219 }
220
221 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
222 {
223         FILE *file;
224         char *buf = NULL;
225         char *s, *p;
226         const char *search = cpuinfo_proc;
227         size_t len = 0;
228         int ret = -1;
229
230         if (!search)
231                 return -1;
232
233         file = fopen("/proc/cpuinfo", "r");
234         if (!file)
235                 return -1;
236
237         while (getline(&buf, &len, file) > 0) {
238                 ret = strncmp(buf, search, strlen(search));
239                 if (!ret)
240                         break;
241         }
242
243         if (ret) {
244                 ret = -1;
245                 goto done;
246         }
247
248         s = buf;
249
250         p = strchr(buf, ':');
251         if (p && *(p+1) == ' ' && *(p+2))
252                 s = p + 2;
253         p = strchr(s, '\n');
254         if (p)
255                 *p = '\0';
256
257         /* squash extra space characters (branding string) */
258         p = s;
259         while (*p) {
260                 if (isspace(*p)) {
261                         char *r = p + 1;
262                         char *q = r;
263                         *p = ' ';
264                         while (*q && isspace(*q))
265                                 q++;
266                         if (q != (p+1))
267                                 while ((*r++ = *q++));
268                 }
269                 p++;
270         }
271         ret = do_write_string(fd, s);
272 done:
273         free(buf);
274         fclose(file);
275         return ret;
276 }
277
278 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
279                        struct perf_evlist *evlist __maybe_unused)
280 {
281 #ifndef CPUINFO_PROC
282 #define CPUINFO_PROC {"model name", }
283 #endif
284         const char *cpuinfo_procs[] = CPUINFO_PROC;
285         unsigned int i;
286
287         for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
288                 int ret;
289                 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
290                 if (ret >= 0)
291                         return ret;
292         }
293         return -1;
294 }
295
296
297 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
298                         struct perf_evlist *evlist __maybe_unused)
299 {
300         long nr;
301         u32 nrc, nra;
302         int ret;
303
304         nrc = cpu__max_present_cpu();
305
306         nr = sysconf(_SC_NPROCESSORS_ONLN);
307         if (nr < 0)
308                 return -1;
309
310         nra = (u32)(nr & UINT_MAX);
311
312         ret = do_write(fd, &nrc, sizeof(nrc));
313         if (ret < 0)
314                 return ret;
315
316         return do_write(fd, &nra, sizeof(nra));
317 }
318
319 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
320                             struct perf_evlist *evlist)
321 {
322         struct perf_evsel *evsel;
323         u32 nre, nri, sz;
324         int ret;
325
326         nre = evlist->nr_entries;
327
328         /*
329          * write number of events
330          */
331         ret = do_write(fd, &nre, sizeof(nre));
332         if (ret < 0)
333                 return ret;
334
335         /*
336          * size of perf_event_attr struct
337          */
338         sz = (u32)sizeof(evsel->attr);
339         ret = do_write(fd, &sz, sizeof(sz));
340         if (ret < 0)
341                 return ret;
342
343         evlist__for_each_entry(evlist, evsel) {
344                 ret = do_write(fd, &evsel->attr, sz);
345                 if (ret < 0)
346                         return ret;
347                 /*
348                  * write number of unique id per event
349                  * there is one id per instance of an event
350                  *
351                  * copy into an nri to be independent of the
352                  * type of ids,
353                  */
354                 nri = evsel->ids;
355                 ret = do_write(fd, &nri, sizeof(nri));
356                 if (ret < 0)
357                         return ret;
358
359                 /*
360                  * write event string as passed on cmdline
361                  */
362                 ret = do_write_string(fd, perf_evsel__name(evsel));
363                 if (ret < 0)
364                         return ret;
365                 /*
366                  * write unique ids for this event
367                  */
368                 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
369                 if (ret < 0)
370                         return ret;
371         }
372         return 0;
373 }
374
375 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
376                          struct perf_evlist *evlist __maybe_unused)
377 {
378         char buf[MAXPATHLEN];
379         u32 n;
380         int i, ret;
381
382         /* actual path to perf binary */
383         ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
384         if (ret <= 0)
385                 return -1;
386
387         /* readlink() does not add null termination */
388         buf[ret] = '\0';
389
390         /* account for binary path */
391         n = perf_env.nr_cmdline + 1;
392
393         ret = do_write(fd, &n, sizeof(n));
394         if (ret < 0)
395                 return ret;
396
397         ret = do_write_string(fd, buf);
398         if (ret < 0)
399                 return ret;
400
401         for (i = 0 ; i < perf_env.nr_cmdline; i++) {
402                 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
403                 if (ret < 0)
404                         return ret;
405         }
406         return 0;
407 }
408
409 #define CORE_SIB_FMT \
410         "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
411 #define THRD_SIB_FMT \
412         "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
413
414 struct cpu_topo {
415         u32 cpu_nr;
416         u32 core_sib;
417         u32 thread_sib;
418         char **core_siblings;
419         char **thread_siblings;
420 };
421
422 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
423 {
424         FILE *fp;
425         char filename[MAXPATHLEN];
426         char *buf = NULL, *p;
427         size_t len = 0;
428         ssize_t sret;
429         u32 i = 0;
430         int ret = -1;
431
432         sprintf(filename, CORE_SIB_FMT, cpu);
433         fp = fopen(filename, "r");
434         if (!fp)
435                 goto try_threads;
436
437         sret = getline(&buf, &len, fp);
438         fclose(fp);
439         if (sret <= 0)
440                 goto try_threads;
441
442         p = strchr(buf, '\n');
443         if (p)
444                 *p = '\0';
445
446         for (i = 0; i < tp->core_sib; i++) {
447                 if (!strcmp(buf, tp->core_siblings[i]))
448                         break;
449         }
450         if (i == tp->core_sib) {
451                 tp->core_siblings[i] = buf;
452                 tp->core_sib++;
453                 buf = NULL;
454                 len = 0;
455         }
456         ret = 0;
457
458 try_threads:
459         sprintf(filename, THRD_SIB_FMT, cpu);
460         fp = fopen(filename, "r");
461         if (!fp)
462                 goto done;
463
464         if (getline(&buf, &len, fp) <= 0)
465                 goto done;
466
467         p = strchr(buf, '\n');
468         if (p)
469                 *p = '\0';
470
471         for (i = 0; i < tp->thread_sib; i++) {
472                 if (!strcmp(buf, tp->thread_siblings[i]))
473                         break;
474         }
475         if (i == tp->thread_sib) {
476                 tp->thread_siblings[i] = buf;
477                 tp->thread_sib++;
478                 buf = NULL;
479         }
480         ret = 0;
481 done:
482         if(fp)
483                 fclose(fp);
484         free(buf);
485         return ret;
486 }
487
488 static void free_cpu_topo(struct cpu_topo *tp)
489 {
490         u32 i;
491
492         if (!tp)
493                 return;
494
495         for (i = 0 ; i < tp->core_sib; i++)
496                 zfree(&tp->core_siblings[i]);
497
498         for (i = 0 ; i < tp->thread_sib; i++)
499                 zfree(&tp->thread_siblings[i]);
500
501         free(tp);
502 }
503
504 static struct cpu_topo *build_cpu_topology(void)
505 {
506         struct cpu_topo *tp = NULL;
507         void *addr;
508         u32 nr, i;
509         size_t sz;
510         long ncpus;
511         int ret = -1;
512         struct cpu_map *map;
513
514         ncpus = cpu__max_present_cpu();
515
516         /* build online CPU map */
517         map = cpu_map__new(NULL);
518         if (map == NULL) {
519                 pr_debug("failed to get system cpumap\n");
520                 return NULL;
521         }
522
523         nr = (u32)(ncpus & UINT_MAX);
524
525         sz = nr * sizeof(char *);
526         addr = calloc(1, sizeof(*tp) + 2 * sz);
527         if (!addr)
528                 goto out_free;
529
530         tp = addr;
531         tp->cpu_nr = nr;
532         addr += sizeof(*tp);
533         tp->core_siblings = addr;
534         addr += sz;
535         tp->thread_siblings = addr;
536
537         for (i = 0; i < nr; i++) {
538                 if (!cpu_map__has(map, i))
539                         continue;
540
541                 ret = build_cpu_topo(tp, i);
542                 if (ret < 0)
543                         break;
544         }
545
546 out_free:
547         cpu_map__put(map);
548         if (ret) {
549                 free_cpu_topo(tp);
550                 tp = NULL;
551         }
552         return tp;
553 }
554
555 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
556                           struct perf_evlist *evlist __maybe_unused)
557 {
558         struct cpu_topo *tp;
559         u32 i;
560         int ret, j;
561
562         tp = build_cpu_topology();
563         if (!tp)
564                 return -1;
565
566         ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
567         if (ret < 0)
568                 goto done;
569
570         for (i = 0; i < tp->core_sib; i++) {
571                 ret = do_write_string(fd, tp->core_siblings[i]);
572                 if (ret < 0)
573                         goto done;
574         }
575         ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
576         if (ret < 0)
577                 goto done;
578
579         for (i = 0; i < tp->thread_sib; i++) {
580                 ret = do_write_string(fd, tp->thread_siblings[i]);
581                 if (ret < 0)
582                         break;
583         }
584
585         ret = perf_env__read_cpu_topology_map(&perf_env);
586         if (ret < 0)
587                 goto done;
588
589         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
590                 ret = do_write(fd, &perf_env.cpu[j].core_id,
591                                sizeof(perf_env.cpu[j].core_id));
592                 if (ret < 0)
593                         return ret;
594                 ret = do_write(fd, &perf_env.cpu[j].socket_id,
595                                sizeof(perf_env.cpu[j].socket_id));
596                 if (ret < 0)
597                         return ret;
598         }
599 done:
600         free_cpu_topo(tp);
601         return ret;
602 }
603
604
605
606 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
607                           struct perf_evlist *evlist __maybe_unused)
608 {
609         char *buf = NULL;
610         FILE *fp;
611         size_t len = 0;
612         int ret = -1, n;
613         uint64_t mem;
614
615         fp = fopen("/proc/meminfo", "r");
616         if (!fp)
617                 return -1;
618
619         while (getline(&buf, &len, fp) > 0) {
620                 ret = strncmp(buf, "MemTotal:", 9);
621                 if (!ret)
622                         break;
623         }
624         if (!ret) {
625                 n = sscanf(buf, "%*s %"PRIu64, &mem);
626                 if (n == 1)
627                         ret = do_write(fd, &mem, sizeof(mem));
628         } else
629                 ret = -1;
630         free(buf);
631         fclose(fp);
632         return ret;
633 }
634
635 static int write_topo_node(int fd, int node)
636 {
637         char str[MAXPATHLEN];
638         char field[32];
639         char *buf = NULL, *p;
640         size_t len = 0;
641         FILE *fp;
642         u64 mem_total, mem_free, mem;
643         int ret = -1;
644
645         sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
646         fp = fopen(str, "r");
647         if (!fp)
648                 return -1;
649
650         while (getline(&buf, &len, fp) > 0) {
651                 /* skip over invalid lines */
652                 if (!strchr(buf, ':'))
653                         continue;
654                 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
655                         goto done;
656                 if (!strcmp(field, "MemTotal:"))
657                         mem_total = mem;
658                 if (!strcmp(field, "MemFree:"))
659                         mem_free = mem;
660         }
661
662         fclose(fp);
663         fp = NULL;
664
665         ret = do_write(fd, &mem_total, sizeof(u64));
666         if (ret)
667                 goto done;
668
669         ret = do_write(fd, &mem_free, sizeof(u64));
670         if (ret)
671                 goto done;
672
673         ret = -1;
674         sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
675
676         fp = fopen(str, "r");
677         if (!fp)
678                 goto done;
679
680         if (getline(&buf, &len, fp) <= 0)
681                 goto done;
682
683         p = strchr(buf, '\n');
684         if (p)
685                 *p = '\0';
686
687         ret = do_write_string(fd, buf);
688 done:
689         free(buf);
690         if (fp)
691                 fclose(fp);
692         return ret;
693 }
694
695 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
696                           struct perf_evlist *evlist __maybe_unused)
697 {
698         char *buf = NULL;
699         size_t len = 0;
700         FILE *fp;
701         struct cpu_map *node_map = NULL;
702         char *c;
703         u32 nr, i, j;
704         int ret = -1;
705
706         fp = fopen("/sys/devices/system/node/online", "r");
707         if (!fp)
708                 return -1;
709
710         if (getline(&buf, &len, fp) <= 0)
711                 goto done;
712
713         c = strchr(buf, '\n');
714         if (c)
715                 *c = '\0';
716
717         node_map = cpu_map__new(buf);
718         if (!node_map)
719                 goto done;
720
721         nr = (u32)node_map->nr;
722
723         ret = do_write(fd, &nr, sizeof(nr));
724         if (ret < 0)
725                 goto done;
726
727         for (i = 0; i < nr; i++) {
728                 j = (u32)node_map->map[i];
729                 ret = do_write(fd, &j, sizeof(j));
730                 if (ret < 0)
731                         break;
732
733                 ret = write_topo_node(fd, i);
734                 if (ret < 0)
735                         break;
736         }
737 done:
738         free(buf);
739         fclose(fp);
740         cpu_map__put(node_map);
741         return ret;
742 }
743
744 /*
745  * File format:
746  *
747  * struct pmu_mappings {
748  *      u32     pmu_num;
749  *      struct pmu_map {
750  *              u32     type;
751  *              char    name[];
752  *      }[pmu_num];
753  * };
754  */
755
756 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
757                               struct perf_evlist *evlist __maybe_unused)
758 {
759         struct perf_pmu *pmu = NULL;
760         off_t offset = lseek(fd, 0, SEEK_CUR);
761         __u32 pmu_num = 0;
762         int ret;
763
764         /* write real pmu_num later */
765         ret = do_write(fd, &pmu_num, sizeof(pmu_num));
766         if (ret < 0)
767                 return ret;
768
769         while ((pmu = perf_pmu__scan(pmu))) {
770                 if (!pmu->name)
771                         continue;
772                 pmu_num++;
773
774                 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
775                 if (ret < 0)
776                         return ret;
777
778                 ret = do_write_string(fd, pmu->name);
779                 if (ret < 0)
780                         return ret;
781         }
782
783         if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
784                 /* discard all */
785                 lseek(fd, offset, SEEK_SET);
786                 return -1;
787         }
788
789         return 0;
790 }
791
792 /*
793  * File format:
794  *
795  * struct group_descs {
796  *      u32     nr_groups;
797  *      struct group_desc {
798  *              char    name[];
799  *              u32     leader_idx;
800  *              u32     nr_members;
801  *      }[nr_groups];
802  * };
803  */
804 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
805                             struct perf_evlist *evlist)
806 {
807         u32 nr_groups = evlist->nr_groups;
808         struct perf_evsel *evsel;
809         int ret;
810
811         ret = do_write(fd, &nr_groups, sizeof(nr_groups));
812         if (ret < 0)
813                 return ret;
814
815         evlist__for_each_entry(evlist, evsel) {
816                 if (perf_evsel__is_group_leader(evsel) &&
817                     evsel->nr_members > 1) {
818                         const char *name = evsel->group_name ?: "{anon_group}";
819                         u32 leader_idx = evsel->idx;
820                         u32 nr_members = evsel->nr_members;
821
822                         ret = do_write_string(fd, name);
823                         if (ret < 0)
824                                 return ret;
825
826                         ret = do_write(fd, &leader_idx, sizeof(leader_idx));
827                         if (ret < 0)
828                                 return ret;
829
830                         ret = do_write(fd, &nr_members, sizeof(nr_members));
831                         if (ret < 0)
832                                 return ret;
833                 }
834         }
835         return 0;
836 }
837
838 /*
839  * default get_cpuid(): nothing gets recorded
840  * actual implementation must be in arch/$(ARCH)/util/header.c
841  */
842 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
843 {
844         return -1;
845 }
846
847 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
848                        struct perf_evlist *evlist __maybe_unused)
849 {
850         char buffer[64];
851         int ret;
852
853         ret = get_cpuid(buffer, sizeof(buffer));
854         if (!ret)
855                 goto write_it;
856
857         return -1;
858 write_it:
859         return do_write_string(fd, buffer);
860 }
861
862 static int write_branch_stack(int fd __maybe_unused,
863                               struct perf_header *h __maybe_unused,
864                        struct perf_evlist *evlist __maybe_unused)
865 {
866         return 0;
867 }
868
869 static int write_auxtrace(int fd, struct perf_header *h,
870                           struct perf_evlist *evlist __maybe_unused)
871 {
872         struct perf_session *session;
873         int err;
874
875         session = container_of(h, struct perf_session, header);
876
877         err = auxtrace_index__write(fd, &session->auxtrace_index);
878         if (err < 0)
879                 pr_err("Failed to write auxtrace index\n");
880         return err;
881 }
882
883 static int cpu_cache_level__sort(const void *a, const void *b)
884 {
885         struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
886         struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
887
888         return cache_a->level - cache_b->level;
889 }
890
891 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
892 {
893         if (a->level != b->level)
894                 return false;
895
896         if (a->line_size != b->line_size)
897                 return false;
898
899         if (a->sets != b->sets)
900                 return false;
901
902         if (a->ways != b->ways)
903                 return false;
904
905         if (strcmp(a->type, b->type))
906                 return false;
907
908         if (strcmp(a->size, b->size))
909                 return false;
910
911         if (strcmp(a->map, b->map))
912                 return false;
913
914         return true;
915 }
916
917 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
918 {
919         char path[PATH_MAX], file[PATH_MAX];
920         struct stat st;
921         size_t len;
922
923         scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
924         scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
925
926         if (stat(file, &st))
927                 return 1;
928
929         scnprintf(file, PATH_MAX, "%s/level", path);
930         if (sysfs__read_int(file, (int *) &cache->level))
931                 return -1;
932
933         scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
934         if (sysfs__read_int(file, (int *) &cache->line_size))
935                 return -1;
936
937         scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
938         if (sysfs__read_int(file, (int *) &cache->sets))
939                 return -1;
940
941         scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
942         if (sysfs__read_int(file, (int *) &cache->ways))
943                 return -1;
944
945         scnprintf(file, PATH_MAX, "%s/type", path);
946         if (sysfs__read_str(file, &cache->type, &len))
947                 return -1;
948
949         cache->type[len] = 0;
950         cache->type = rtrim(cache->type);
951
952         scnprintf(file, PATH_MAX, "%s/size", path);
953         if (sysfs__read_str(file, &cache->size, &len)) {
954                 free(cache->type);
955                 return -1;
956         }
957
958         cache->size[len] = 0;
959         cache->size = rtrim(cache->size);
960
961         scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
962         if (sysfs__read_str(file, &cache->map, &len)) {
963                 free(cache->map);
964                 free(cache->type);
965                 return -1;
966         }
967
968         cache->map[len] = 0;
969         cache->map = rtrim(cache->map);
970         return 0;
971 }
972
973 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
974 {
975         fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
976 }
977
978 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
979 {
980         u32 i, cnt = 0;
981         long ncpus;
982         u32 nr, cpu;
983         u16 level;
984
985         ncpus = sysconf(_SC_NPROCESSORS_CONF);
986         if (ncpus < 0)
987                 return -1;
988
989         nr = (u32)(ncpus & UINT_MAX);
990
991         for (cpu = 0; cpu < nr; cpu++) {
992                 for (level = 0; level < 10; level++) {
993                         struct cpu_cache_level c;
994                         int err;
995
996                         err = cpu_cache_level__read(&c, cpu, level);
997                         if (err < 0)
998                                 return err;
999
1000                         if (err == 1)
1001                                 break;
1002
1003                         for (i = 0; i < cnt; i++) {
1004                                 if (cpu_cache_level__cmp(&c, &caches[i]))
1005                                         break;
1006                         }
1007
1008                         if (i == cnt)
1009                                 caches[cnt++] = c;
1010                         else
1011                                 cpu_cache_level__free(&c);
1012
1013                         if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1014                                 goto out;
1015                 }
1016         }
1017  out:
1018         *cntp = cnt;
1019         return 0;
1020 }
1021
1022 #define MAX_CACHES 2000
1023
1024 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1025                           struct perf_evlist *evlist __maybe_unused)
1026 {
1027         struct cpu_cache_level caches[MAX_CACHES];
1028         u32 cnt = 0, i, version = 1;
1029         int ret;
1030
1031         ret = build_caches(caches, MAX_CACHES, &cnt);
1032         if (ret)
1033                 goto out;
1034
1035         qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1036
1037         ret = do_write(fd, &version, sizeof(u32));
1038         if (ret < 0)
1039                 goto out;
1040
1041         ret = do_write(fd, &cnt, sizeof(u32));
1042         if (ret < 0)
1043                 goto out;
1044
1045         for (i = 0; i < cnt; i++) {
1046                 struct cpu_cache_level *c = &caches[i];
1047
1048                 #define _W(v)                                   \
1049                         ret = do_write(fd, &c->v, sizeof(u32)); \
1050                         if (ret < 0)                            \
1051                                 goto out;
1052
1053                 _W(level)
1054                 _W(line_size)
1055                 _W(sets)
1056                 _W(ways)
1057                 #undef _W
1058
1059                 #define _W(v)                                           \
1060                         ret = do_write_string(fd, (const char *) c->v); \
1061                         if (ret < 0)                                    \
1062                                 goto out;
1063
1064                 _W(type)
1065                 _W(size)
1066                 _W(map)
1067                 #undef _W
1068         }
1069
1070 out:
1071         for (i = 0; i < cnt; i++)
1072                 cpu_cache_level__free(&caches[i]);
1073         return ret;
1074 }
1075
1076 static int write_stat(int fd __maybe_unused,
1077                       struct perf_header *h __maybe_unused,
1078                       struct perf_evlist *evlist __maybe_unused)
1079 {
1080         return 0;
1081 }
1082
1083 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1084                            FILE *fp)
1085 {
1086         fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1087 }
1088
1089 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1090                             FILE *fp)
1091 {
1092         fprintf(fp, "# os release : %s\n", ph->env.os_release);
1093 }
1094
1095 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1096 {
1097         fprintf(fp, "# arch : %s\n", ph->env.arch);
1098 }
1099
1100 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1101                           FILE *fp)
1102 {
1103         fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1104 }
1105
1106 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1107                          FILE *fp)
1108 {
1109         fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1110         fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1111 }
1112
1113 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1114                           FILE *fp)
1115 {
1116         fprintf(fp, "# perf version : %s\n", ph->env.version);
1117 }
1118
1119 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1120                           FILE *fp)
1121 {
1122         int nr, i;
1123
1124         nr = ph->env.nr_cmdline;
1125
1126         fprintf(fp, "# cmdline : ");
1127
1128         for (i = 0; i < nr; i++)
1129                 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1130         fputc('\n', fp);
1131 }
1132
1133 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1134                                FILE *fp)
1135 {
1136         int nr, i;
1137         char *str;
1138         int cpu_nr = ph->env.nr_cpus_avail;
1139
1140         nr = ph->env.nr_sibling_cores;
1141         str = ph->env.sibling_cores;
1142
1143         for (i = 0; i < nr; i++) {
1144                 fprintf(fp, "# sibling cores   : %s\n", str);
1145                 str += strlen(str) + 1;
1146         }
1147
1148         nr = ph->env.nr_sibling_threads;
1149         str = ph->env.sibling_threads;
1150
1151         for (i = 0; i < nr; i++) {
1152                 fprintf(fp, "# sibling threads : %s\n", str);
1153                 str += strlen(str) + 1;
1154         }
1155
1156         if (ph->env.cpu != NULL) {
1157                 for (i = 0; i < cpu_nr; i++)
1158                         fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1159                                 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1160         } else
1161                 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1162 }
1163
1164 static void free_event_desc(struct perf_evsel *events)
1165 {
1166         struct perf_evsel *evsel;
1167
1168         if (!events)
1169                 return;
1170
1171         for (evsel = events; evsel->attr.size; evsel++) {
1172                 zfree(&evsel->name);
1173                 zfree(&evsel->id);
1174         }
1175
1176         free(events);
1177 }
1178
1179 static struct perf_evsel *
1180 read_event_desc(struct perf_header *ph, int fd)
1181 {
1182         struct perf_evsel *evsel, *events = NULL;
1183         u64 *id;
1184         void *buf = NULL;
1185         u32 nre, sz, nr, i, j;
1186         ssize_t ret;
1187         size_t msz;
1188
1189         /* number of events */
1190         ret = readn(fd, &nre, sizeof(nre));
1191         if (ret != (ssize_t)sizeof(nre))
1192                 goto error;
1193
1194         if (ph->needs_swap)
1195                 nre = bswap_32(nre);
1196
1197         ret = readn(fd, &sz, sizeof(sz));
1198         if (ret != (ssize_t)sizeof(sz))
1199                 goto error;
1200
1201         if (ph->needs_swap)
1202                 sz = bswap_32(sz);
1203
1204         /* buffer to hold on file attr struct */
1205         buf = malloc(sz);
1206         if (!buf)
1207                 goto error;
1208
1209         /* the last event terminates with evsel->attr.size == 0: */
1210         events = calloc(nre + 1, sizeof(*events));
1211         if (!events)
1212                 goto error;
1213
1214         msz = sizeof(evsel->attr);
1215         if (sz < msz)
1216                 msz = sz;
1217
1218         for (i = 0, evsel = events; i < nre; evsel++, i++) {
1219                 evsel->idx = i;
1220
1221                 /*
1222                  * must read entire on-file attr struct to
1223                  * sync up with layout.
1224                  */
1225                 ret = readn(fd, buf, sz);
1226                 if (ret != (ssize_t)sz)
1227                         goto error;
1228
1229                 if (ph->needs_swap)
1230                         perf_event__attr_swap(buf);
1231
1232                 memcpy(&evsel->attr, buf, msz);
1233
1234                 ret = readn(fd, &nr, sizeof(nr));
1235                 if (ret != (ssize_t)sizeof(nr))
1236                         goto error;
1237
1238                 if (ph->needs_swap) {
1239                         nr = bswap_32(nr);
1240                         evsel->needs_swap = true;
1241                 }
1242
1243                 evsel->name = do_read_string(fd, ph);
1244
1245                 if (!nr)
1246                         continue;
1247
1248                 id = calloc(nr, sizeof(*id));
1249                 if (!id)
1250                         goto error;
1251                 evsel->ids = nr;
1252                 evsel->id = id;
1253
1254                 for (j = 0 ; j < nr; j++) {
1255                         ret = readn(fd, id, sizeof(*id));
1256                         if (ret != (ssize_t)sizeof(*id))
1257                                 goto error;
1258                         if (ph->needs_swap)
1259                                 *id = bswap_64(*id);
1260                         id++;
1261                 }
1262         }
1263 out:
1264         free(buf);
1265         return events;
1266 error:
1267         free_event_desc(events);
1268         events = NULL;
1269         goto out;
1270 }
1271
1272 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1273                                 void *priv __attribute__((unused)))
1274 {
1275         return fprintf(fp, ", %s = %s", name, val);
1276 }
1277
1278 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1279 {
1280         struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1281         u32 j;
1282         u64 *id;
1283
1284         if (!events) {
1285                 fprintf(fp, "# event desc: not available or unable to read\n");
1286                 return;
1287         }
1288
1289         for (evsel = events; evsel->attr.size; evsel++) {
1290                 fprintf(fp, "# event : name = %s, ", evsel->name);
1291
1292                 if (evsel->ids) {
1293                         fprintf(fp, ", id = {");
1294                         for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1295                                 if (j)
1296                                         fputc(',', fp);
1297                                 fprintf(fp, " %"PRIu64, *id);
1298                         }
1299                         fprintf(fp, " }");
1300                 }
1301
1302                 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1303
1304                 fputc('\n', fp);
1305         }
1306
1307         free_event_desc(events);
1308 }
1309
1310 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1311                             FILE *fp)
1312 {
1313         fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1314 }
1315
1316 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1317                                 FILE *fp)
1318 {
1319         int i;
1320         struct numa_node *n;
1321
1322         for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1323                 n = &ph->env.numa_nodes[i];
1324
1325                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1326                             " free = %"PRIu64" kB\n",
1327                         n->node, n->mem_total, n->mem_free);
1328
1329                 fprintf(fp, "# node%u cpu list : ", n->node);
1330                 cpu_map__fprintf(n->map, fp);
1331         }
1332 }
1333
1334 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1335 {
1336         fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1337 }
1338
1339 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1340                                int fd __maybe_unused, FILE *fp)
1341 {
1342         fprintf(fp, "# contains samples with branch stack\n");
1343 }
1344
1345 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1346                            int fd __maybe_unused, FILE *fp)
1347 {
1348         fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1349 }
1350
1351 static void print_stat(struct perf_header *ph __maybe_unused,
1352                        int fd __maybe_unused, FILE *fp)
1353 {
1354         fprintf(fp, "# contains stat data\n");
1355 }
1356
1357 static void print_cache(struct perf_header *ph __maybe_unused,
1358                         int fd __maybe_unused, FILE *fp __maybe_unused)
1359 {
1360         int i;
1361
1362         fprintf(fp, "# CPU cache info:\n");
1363         for (i = 0; i < ph->env.caches_cnt; i++) {
1364                 fprintf(fp, "#  ");
1365                 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1366         }
1367 }
1368
1369 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1370                                FILE *fp)
1371 {
1372         const char *delimiter = "# pmu mappings: ";
1373         char *str, *tmp;
1374         u32 pmu_num;
1375         u32 type;
1376
1377         pmu_num = ph->env.nr_pmu_mappings;
1378         if (!pmu_num) {
1379                 fprintf(fp, "# pmu mappings: not available\n");
1380                 return;
1381         }
1382
1383         str = ph->env.pmu_mappings;
1384
1385         while (pmu_num) {
1386                 type = strtoul(str, &tmp, 0);
1387                 if (*tmp != ':')
1388                         goto error;
1389
1390                 str = tmp + 1;
1391                 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1392
1393                 delimiter = ", ";
1394                 str += strlen(str) + 1;
1395                 pmu_num--;
1396         }
1397
1398         fprintf(fp, "\n");
1399
1400         if (!pmu_num)
1401                 return;
1402 error:
1403         fprintf(fp, "# pmu mappings: unable to read\n");
1404 }
1405
1406 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1407                              FILE *fp)
1408 {
1409         struct perf_session *session;
1410         struct perf_evsel *evsel;
1411         u32 nr = 0;
1412
1413         session = container_of(ph, struct perf_session, header);
1414
1415         evlist__for_each_entry(session->evlist, evsel) {
1416                 if (perf_evsel__is_group_leader(evsel) &&
1417                     evsel->nr_members > 1) {
1418                         fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1419                                 perf_evsel__name(evsel));
1420
1421                         nr = evsel->nr_members - 1;
1422                 } else if (nr) {
1423                         fprintf(fp, ",%s", perf_evsel__name(evsel));
1424
1425                         if (--nr == 0)
1426                                 fprintf(fp, "}\n");
1427                 }
1428         }
1429 }
1430
1431 static int __event_process_build_id(struct build_id_event *bev,
1432                                     char *filename,
1433                                     struct perf_session *session)
1434 {
1435         int err = -1;
1436         struct machine *machine;
1437         u16 cpumode;
1438         struct dso *dso;
1439         enum dso_kernel_type dso_type;
1440
1441         machine = perf_session__findnew_machine(session, bev->pid);
1442         if (!machine)
1443                 goto out;
1444
1445         cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1446
1447         switch (cpumode) {
1448         case PERF_RECORD_MISC_KERNEL:
1449                 dso_type = DSO_TYPE_KERNEL;
1450                 break;
1451         case PERF_RECORD_MISC_GUEST_KERNEL:
1452                 dso_type = DSO_TYPE_GUEST_KERNEL;
1453                 break;
1454         case PERF_RECORD_MISC_USER:
1455         case PERF_RECORD_MISC_GUEST_USER:
1456                 dso_type = DSO_TYPE_USER;
1457                 break;
1458         default:
1459                 goto out;
1460         }
1461
1462         dso = machine__findnew_dso(machine, filename);
1463         if (dso != NULL) {
1464                 char sbuild_id[SBUILD_ID_SIZE];
1465
1466                 dso__set_build_id(dso, &bev->build_id);
1467
1468                 if (!is_kernel_module(filename, cpumode))
1469                         dso->kernel = dso_type;
1470
1471                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1472                                   sbuild_id);
1473                 pr_debug("build id event received for %s: %s\n",
1474                          dso->long_name, sbuild_id);
1475                 dso__put(dso);
1476         }
1477
1478         err = 0;
1479 out:
1480         return err;
1481 }
1482
1483 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1484                                                  int input, u64 offset, u64 size)
1485 {
1486         struct perf_session *session = container_of(header, struct perf_session, header);
1487         struct {
1488                 struct perf_event_header   header;
1489                 u8                         build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1490                 char                       filename[0];
1491         } old_bev;
1492         struct build_id_event bev;
1493         char filename[PATH_MAX];
1494         u64 limit = offset + size;
1495
1496         while (offset < limit) {
1497                 ssize_t len;
1498
1499                 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1500                         return -1;
1501
1502                 if (header->needs_swap)
1503                         perf_event_header__bswap(&old_bev.header);
1504
1505                 len = old_bev.header.size - sizeof(old_bev);
1506                 if (readn(input, filename, len) != len)
1507                         return -1;
1508
1509                 bev.header = old_bev.header;
1510
1511                 /*
1512                  * As the pid is the missing value, we need to fill
1513                  * it properly. The header.misc value give us nice hint.
1514                  */
1515                 bev.pid = HOST_KERNEL_ID;
1516                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1517                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1518                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1519
1520                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1521                 __event_process_build_id(&bev, filename, session);
1522
1523                 offset += bev.header.size;
1524         }
1525
1526         return 0;
1527 }
1528
1529 static int perf_header__read_build_ids(struct perf_header *header,
1530                                        int input, u64 offset, u64 size)
1531 {
1532         struct perf_session *session = container_of(header, struct perf_session, header);
1533         struct build_id_event bev;
1534         char filename[PATH_MAX];
1535         u64 limit = offset + size, orig_offset = offset;
1536         int err = -1;
1537
1538         while (offset < limit) {
1539                 ssize_t len;
1540
1541                 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1542                         goto out;
1543
1544                 if (header->needs_swap)
1545                         perf_event_header__bswap(&bev.header);
1546
1547                 len = bev.header.size - sizeof(bev);
1548                 if (readn(input, filename, len) != len)
1549                         goto out;
1550                 /*
1551                  * The a1645ce1 changeset:
1552                  *
1553                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
1554                  *
1555                  * Added a field to struct build_id_event that broke the file
1556                  * format.
1557                  *
1558                  * Since the kernel build-id is the first entry, process the
1559                  * table using the old format if the well known
1560                  * '[kernel.kallsyms]' string for the kernel build-id has the
1561                  * first 4 characters chopped off (where the pid_t sits).
1562                  */
1563                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1564                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1565                                 return -1;
1566                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1567                 }
1568
1569                 __event_process_build_id(&bev, filename, session);
1570
1571                 offset += bev.header.size;
1572         }
1573         err = 0;
1574 out:
1575         return err;
1576 }
1577
1578 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1579                                 struct perf_header *ph __maybe_unused,
1580                                 int fd, void *data)
1581 {
1582         ssize_t ret = trace_report(fd, data, false);
1583         return ret < 0 ? -1 : 0;
1584 }
1585
1586 static int process_build_id(struct perf_file_section *section,
1587                             struct perf_header *ph, int fd,
1588                             void *data __maybe_unused)
1589 {
1590         if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1591                 pr_debug("Failed to read buildids, continuing...\n");
1592         return 0;
1593 }
1594
1595 static int process_hostname(struct perf_file_section *section __maybe_unused,
1596                             struct perf_header *ph, int fd,
1597                             void *data __maybe_unused)
1598 {
1599         ph->env.hostname = do_read_string(fd, ph);
1600         return ph->env.hostname ? 0 : -ENOMEM;
1601 }
1602
1603 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1604                              struct perf_header *ph, int fd,
1605                              void *data __maybe_unused)
1606 {
1607         ph->env.os_release = do_read_string(fd, ph);
1608         return ph->env.os_release ? 0 : -ENOMEM;
1609 }
1610
1611 static int process_version(struct perf_file_section *section __maybe_unused,
1612                            struct perf_header *ph, int fd,
1613                            void *data __maybe_unused)
1614 {
1615         ph->env.version = do_read_string(fd, ph);
1616         return ph->env.version ? 0 : -ENOMEM;
1617 }
1618
1619 static int process_arch(struct perf_file_section *section __maybe_unused,
1620                         struct perf_header *ph, int fd,
1621                         void *data __maybe_unused)
1622 {
1623         ph->env.arch = do_read_string(fd, ph);
1624         return ph->env.arch ? 0 : -ENOMEM;
1625 }
1626
1627 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1628                           struct perf_header *ph, int fd,
1629                           void *data __maybe_unused)
1630 {
1631         ssize_t ret;
1632         u32 nr;
1633
1634         ret = readn(fd, &nr, sizeof(nr));
1635         if (ret != sizeof(nr))
1636                 return -1;
1637
1638         if (ph->needs_swap)
1639                 nr = bswap_32(nr);
1640
1641         ph->env.nr_cpus_avail = nr;
1642
1643         ret = readn(fd, &nr, sizeof(nr));
1644         if (ret != sizeof(nr))
1645                 return -1;
1646
1647         if (ph->needs_swap)
1648                 nr = bswap_32(nr);
1649
1650         ph->env.nr_cpus_online = nr;
1651         return 0;
1652 }
1653
1654 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1655                            struct perf_header *ph, int fd,
1656                            void *data __maybe_unused)
1657 {
1658         ph->env.cpu_desc = do_read_string(fd, ph);
1659         return ph->env.cpu_desc ? 0 : -ENOMEM;
1660 }
1661
1662 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1663                          struct perf_header *ph,  int fd,
1664                          void *data __maybe_unused)
1665 {
1666         ph->env.cpuid = do_read_string(fd, ph);
1667         return ph->env.cpuid ? 0 : -ENOMEM;
1668 }
1669
1670 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1671                              struct perf_header *ph, int fd,
1672                              void *data __maybe_unused)
1673 {
1674         uint64_t mem;
1675         ssize_t ret;
1676
1677         ret = readn(fd, &mem, sizeof(mem));
1678         if (ret != sizeof(mem))
1679                 return -1;
1680
1681         if (ph->needs_swap)
1682                 mem = bswap_64(mem);
1683
1684         ph->env.total_mem = mem;
1685         return 0;
1686 }
1687
1688 static struct perf_evsel *
1689 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1690 {
1691         struct perf_evsel *evsel;
1692
1693         evlist__for_each_entry(evlist, evsel) {
1694                 if (evsel->idx == idx)
1695                         return evsel;
1696         }
1697
1698         return NULL;
1699 }
1700
1701 static void
1702 perf_evlist__set_event_name(struct perf_evlist *evlist,
1703                             struct perf_evsel *event)
1704 {
1705         struct perf_evsel *evsel;
1706
1707         if (!event->name)
1708                 return;
1709
1710         evsel = perf_evlist__find_by_index(evlist, event->idx);
1711         if (!evsel)
1712                 return;
1713
1714         if (evsel->name)
1715                 return;
1716
1717         evsel->name = strdup(event->name);
1718 }
1719
1720 static int
1721 process_event_desc(struct perf_file_section *section __maybe_unused,
1722                    struct perf_header *header, int fd,
1723                    void *data __maybe_unused)
1724 {
1725         struct perf_session *session;
1726         struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1727
1728         if (!events)
1729                 return 0;
1730
1731         session = container_of(header, struct perf_session, header);
1732         for (evsel = events; evsel->attr.size; evsel++)
1733                 perf_evlist__set_event_name(session->evlist, evsel);
1734
1735         free_event_desc(events);
1736
1737         return 0;
1738 }
1739
1740 static int process_cmdline(struct perf_file_section *section,
1741                            struct perf_header *ph, int fd,
1742                            void *data __maybe_unused)
1743 {
1744         ssize_t ret;
1745         char *str, *cmdline = NULL, **argv = NULL;
1746         u32 nr, i, len = 0;
1747
1748         ret = readn(fd, &nr, sizeof(nr));
1749         if (ret != sizeof(nr))
1750                 return -1;
1751
1752         if (ph->needs_swap)
1753                 nr = bswap_32(nr);
1754
1755         ph->env.nr_cmdline = nr;
1756
1757         cmdline = zalloc(section->size + nr + 1);
1758         if (!cmdline)
1759                 return -1;
1760
1761         argv = zalloc(sizeof(char *) * (nr + 1));
1762         if (!argv)
1763                 goto error;
1764
1765         for (i = 0; i < nr; i++) {
1766                 str = do_read_string(fd, ph);
1767                 if (!str)
1768                         goto error;
1769
1770                 argv[i] = cmdline + len;
1771                 memcpy(argv[i], str, strlen(str) + 1);
1772                 len += strlen(str) + 1;
1773                 free(str);
1774         }
1775         ph->env.cmdline = cmdline;
1776         ph->env.cmdline_argv = (const char **) argv;
1777         return 0;
1778
1779 error:
1780         free(argv);
1781         free(cmdline);
1782         return -1;
1783 }
1784
1785 static int process_cpu_topology(struct perf_file_section *section,
1786                                 struct perf_header *ph, int fd,
1787                                 void *data __maybe_unused)
1788 {
1789         ssize_t ret;
1790         u32 nr, i;
1791         char *str;
1792         struct strbuf sb;
1793         int cpu_nr = ph->env.nr_cpus_avail;
1794         u64 size = 0;
1795
1796         ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1797         if (!ph->env.cpu)
1798                 return -1;
1799
1800         ret = readn(fd, &nr, sizeof(nr));
1801         if (ret != sizeof(nr))
1802                 goto free_cpu;
1803
1804         if (ph->needs_swap)
1805                 nr = bswap_32(nr);
1806
1807         ph->env.nr_sibling_cores = nr;
1808         size += sizeof(u32);
1809         if (strbuf_init(&sb, 128) < 0)
1810                 goto free_cpu;
1811
1812         for (i = 0; i < nr; i++) {
1813                 str = do_read_string(fd, ph);
1814                 if (!str)
1815                         goto error;
1816
1817                 /* include a NULL character at the end */
1818                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1819                         goto error;
1820                 size += string_size(str);
1821                 free(str);
1822         }
1823         ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1824
1825         ret = readn(fd, &nr, sizeof(nr));
1826         if (ret != sizeof(nr))
1827                 return -1;
1828
1829         if (ph->needs_swap)
1830                 nr = bswap_32(nr);
1831
1832         ph->env.nr_sibling_threads = nr;
1833         size += sizeof(u32);
1834
1835         for (i = 0; i < nr; i++) {
1836                 str = do_read_string(fd, ph);
1837                 if (!str)
1838                         goto error;
1839
1840                 /* include a NULL character at the end */
1841                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1842                         goto error;
1843                 size += string_size(str);
1844                 free(str);
1845         }
1846         ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1847
1848         /*
1849          * The header may be from old perf,
1850          * which doesn't include core id and socket id information.
1851          */
1852         if (section->size <= size) {
1853                 zfree(&ph->env.cpu);
1854                 return 0;
1855         }
1856
1857         for (i = 0; i < (u32)cpu_nr; i++) {
1858                 ret = readn(fd, &nr, sizeof(nr));
1859                 if (ret != sizeof(nr))
1860                         goto free_cpu;
1861
1862                 if (ph->needs_swap)
1863                         nr = bswap_32(nr);
1864
1865                 ph->env.cpu[i].core_id = nr;
1866
1867                 ret = readn(fd, &nr, sizeof(nr));
1868                 if (ret != sizeof(nr))
1869                         goto free_cpu;
1870
1871                 if (ph->needs_swap)
1872                         nr = bswap_32(nr);
1873
1874                 if (nr != (u32)-1 && nr > (u32)cpu_nr) {
1875                         pr_debug("socket_id number is too big."
1876                                  "You may need to upgrade the perf tool.\n");
1877                         goto free_cpu;
1878                 }
1879
1880                 ph->env.cpu[i].socket_id = nr;
1881         }
1882
1883         return 0;
1884
1885 error:
1886         strbuf_release(&sb);
1887 free_cpu:
1888         zfree(&ph->env.cpu);
1889         return -1;
1890 }
1891
1892 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1893                                  struct perf_header *ph, int fd,
1894                                  void *data __maybe_unused)
1895 {
1896         struct numa_node *nodes, *n;
1897         ssize_t ret;
1898         u32 nr, i;
1899         char *str;
1900
1901         /* nr nodes */
1902         ret = readn(fd, &nr, sizeof(nr));
1903         if (ret != sizeof(nr))
1904                 return -1;
1905
1906         if (ph->needs_swap)
1907                 nr = bswap_32(nr);
1908
1909         nodes = zalloc(sizeof(*nodes) * nr);
1910         if (!nodes)
1911                 return -ENOMEM;
1912
1913         for (i = 0; i < nr; i++) {
1914                 n = &nodes[i];
1915
1916                 /* node number */
1917                 ret = readn(fd, &n->node, sizeof(u32));
1918                 if (ret != sizeof(n->node))
1919                         goto error;
1920
1921                 ret = readn(fd, &n->mem_total, sizeof(u64));
1922                 if (ret != sizeof(u64))
1923                         goto error;
1924
1925                 ret = readn(fd, &n->mem_free, sizeof(u64));
1926                 if (ret != sizeof(u64))
1927                         goto error;
1928
1929                 if (ph->needs_swap) {
1930                         n->node      = bswap_32(n->node);
1931                         n->mem_total = bswap_64(n->mem_total);
1932                         n->mem_free  = bswap_64(n->mem_free);
1933                 }
1934
1935                 str = do_read_string(fd, ph);
1936                 if (!str)
1937                         goto error;
1938
1939                 n->map = cpu_map__new(str);
1940                 if (!n->map)
1941                         goto error;
1942
1943                 free(str);
1944         }
1945         ph->env.nr_numa_nodes = nr;
1946         ph->env.numa_nodes = nodes;
1947         return 0;
1948
1949 error:
1950         free(nodes);
1951         return -1;
1952 }
1953
1954 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1955                                 struct perf_header *ph, int fd,
1956                                 void *data __maybe_unused)
1957 {
1958         ssize_t ret;
1959         char *name;
1960         u32 pmu_num;
1961         u32 type;
1962         struct strbuf sb;
1963
1964         ret = readn(fd, &pmu_num, sizeof(pmu_num));
1965         if (ret != sizeof(pmu_num))
1966                 return -1;
1967
1968         if (ph->needs_swap)
1969                 pmu_num = bswap_32(pmu_num);
1970
1971         if (!pmu_num) {
1972                 pr_debug("pmu mappings not available\n");
1973                 return 0;
1974         }
1975
1976         ph->env.nr_pmu_mappings = pmu_num;
1977         if (strbuf_init(&sb, 128) < 0)
1978                 return -1;
1979
1980         while (pmu_num) {
1981                 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1982                         goto error;
1983                 if (ph->needs_swap)
1984                         type = bswap_32(type);
1985
1986                 name = do_read_string(fd, ph);
1987                 if (!name)
1988                         goto error;
1989
1990                 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1991                         goto error;
1992                 /* include a NULL character at the end */
1993                 if (strbuf_add(&sb, "", 1) < 0)
1994                         goto error;
1995
1996                 if (!strcmp(name, "msr"))
1997                         ph->env.msr_pmu_type = type;
1998
1999                 free(name);
2000                 pmu_num--;
2001         }
2002         ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2003         return 0;
2004
2005 error:
2006         strbuf_release(&sb);
2007         return -1;
2008 }
2009
2010 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2011                               struct perf_header *ph, int fd,
2012                               void *data __maybe_unused)
2013 {
2014         size_t ret = -1;
2015         u32 i, nr, nr_groups;
2016         struct perf_session *session;
2017         struct perf_evsel *evsel, *leader = NULL;
2018         struct group_desc {
2019                 char *name;
2020                 u32 leader_idx;
2021                 u32 nr_members;
2022         } *desc;
2023
2024         if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2025                 return -1;
2026
2027         if (ph->needs_swap)
2028                 nr_groups = bswap_32(nr_groups);
2029
2030         ph->env.nr_groups = nr_groups;
2031         if (!nr_groups) {
2032                 pr_debug("group desc not available\n");
2033                 return 0;
2034         }
2035
2036         desc = calloc(nr_groups, sizeof(*desc));
2037         if (!desc)
2038                 return -1;
2039
2040         for (i = 0; i < nr_groups; i++) {
2041                 desc[i].name = do_read_string(fd, ph);
2042                 if (!desc[i].name)
2043                         goto out_free;
2044
2045                 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2046                         goto out_free;
2047
2048                 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2049                         goto out_free;
2050
2051                 if (ph->needs_swap) {
2052                         desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2053                         desc[i].nr_members = bswap_32(desc[i].nr_members);
2054                 }
2055         }
2056
2057         /*
2058          * Rebuild group relationship based on the group_desc
2059          */
2060         session = container_of(ph, struct perf_session, header);
2061         session->evlist->nr_groups = nr_groups;
2062
2063         i = nr = 0;
2064         evlist__for_each_entry(session->evlist, evsel) {
2065                 if (evsel->idx == (int) desc[i].leader_idx) {
2066                         evsel->leader = evsel;
2067                         /* {anon_group} is a dummy name */
2068                         if (strcmp(desc[i].name, "{anon_group}")) {
2069                                 evsel->group_name = desc[i].name;
2070                                 desc[i].name = NULL;
2071                         }
2072                         evsel->nr_members = desc[i].nr_members;
2073
2074                         if (i >= nr_groups || nr > 0) {
2075                                 pr_debug("invalid group desc\n");
2076                                 goto out_free;
2077                         }
2078
2079                         leader = evsel;
2080                         nr = evsel->nr_members - 1;
2081                         i++;
2082                 } else if (nr) {
2083                         /* This is a group member */
2084                         evsel->leader = leader;
2085
2086                         nr--;
2087                 }
2088         }
2089
2090         if (i != nr_groups || nr != 0) {
2091                 pr_debug("invalid group desc\n");
2092                 goto out_free;
2093         }
2094
2095         ret = 0;
2096 out_free:
2097         for (i = 0; i < nr_groups; i++)
2098                 zfree(&desc[i].name);
2099         free(desc);
2100
2101         return ret;
2102 }
2103
2104 static int process_auxtrace(struct perf_file_section *section,
2105                             struct perf_header *ph, int fd,
2106                             void *data __maybe_unused)
2107 {
2108         struct perf_session *session;
2109         int err;
2110
2111         session = container_of(ph, struct perf_session, header);
2112
2113         err = auxtrace_index__process(fd, section->size, session,
2114                                       ph->needs_swap);
2115         if (err < 0)
2116                 pr_err("Failed to process auxtrace index\n");
2117         return err;
2118 }
2119
2120 static int process_cache(struct perf_file_section *section __maybe_unused,
2121                          struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2122                          void *data __maybe_unused)
2123 {
2124         struct cpu_cache_level *caches;
2125         u32 cnt, i, version;
2126
2127         if (readn(fd, &version, sizeof(version)) != sizeof(version))
2128                 return -1;
2129
2130         if (ph->needs_swap)
2131                 version = bswap_32(version);
2132
2133         if (version != 1)
2134                 return -1;
2135
2136         if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2137                 return -1;
2138
2139         if (ph->needs_swap)
2140                 cnt = bswap_32(cnt);
2141
2142         caches = zalloc(sizeof(*caches) * cnt);
2143         if (!caches)
2144                 return -1;
2145
2146         for (i = 0; i < cnt; i++) {
2147                 struct cpu_cache_level c;
2148
2149                 #define _R(v)                                           \
2150                         if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2151                                 goto out_free_caches;                   \
2152                         if (ph->needs_swap)                             \
2153                                 c.v = bswap_32(c.v);                    \
2154
2155                 _R(level)
2156                 _R(line_size)
2157                 _R(sets)
2158                 _R(ways)
2159                 #undef _R
2160
2161                 #define _R(v)                           \
2162                         c.v = do_read_string(fd, ph);   \
2163                         if (!c.v)                       \
2164                                 goto out_free_caches;
2165
2166                 _R(type)
2167                 _R(size)
2168                 _R(map)
2169                 #undef _R
2170
2171                 caches[i] = c;
2172         }
2173
2174         ph->env.caches = caches;
2175         ph->env.caches_cnt = cnt;
2176         return 0;
2177 out_free_caches:
2178         free(caches);
2179         return -1;
2180 }
2181
2182 struct feature_ops {
2183         int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2184         void (*print)(struct perf_header *h, int fd, FILE *fp);
2185         int (*process)(struct perf_file_section *section,
2186                        struct perf_header *h, int fd, void *data);
2187         const char *name;
2188         bool full_only;
2189 };
2190
2191 #define FEAT_OPA(n, func) \
2192         [n] = { .name = #n, .write = write_##func, .print = print_##func }
2193 #define FEAT_OPP(n, func) \
2194         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2195                 .process = process_##func }
2196 #define FEAT_OPF(n, func) \
2197         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2198                 .process = process_##func, .full_only = true }
2199
2200 /* feature_ops not implemented: */
2201 #define print_tracing_data      NULL
2202 #define print_build_id          NULL
2203
2204 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2205         FEAT_OPP(HEADER_TRACING_DATA,   tracing_data),
2206         FEAT_OPP(HEADER_BUILD_ID,       build_id),
2207         FEAT_OPP(HEADER_HOSTNAME,       hostname),
2208         FEAT_OPP(HEADER_OSRELEASE,      osrelease),
2209         FEAT_OPP(HEADER_VERSION,        version),
2210         FEAT_OPP(HEADER_ARCH,           arch),
2211         FEAT_OPP(HEADER_NRCPUS,         nrcpus),
2212         FEAT_OPP(HEADER_CPUDESC,        cpudesc),
2213         FEAT_OPP(HEADER_CPUID,          cpuid),
2214         FEAT_OPP(HEADER_TOTAL_MEM,      total_mem),
2215         FEAT_OPP(HEADER_EVENT_DESC,     event_desc),
2216         FEAT_OPP(HEADER_CMDLINE,        cmdline),
2217         FEAT_OPF(HEADER_CPU_TOPOLOGY,   cpu_topology),
2218         FEAT_OPF(HEADER_NUMA_TOPOLOGY,  numa_topology),
2219         FEAT_OPA(HEADER_BRANCH_STACK,   branch_stack),
2220         FEAT_OPP(HEADER_PMU_MAPPINGS,   pmu_mappings),
2221         FEAT_OPP(HEADER_GROUP_DESC,     group_desc),
2222         FEAT_OPP(HEADER_AUXTRACE,       auxtrace),
2223         FEAT_OPA(HEADER_STAT,           stat),
2224         FEAT_OPF(HEADER_CACHE,          cache),
2225 };
2226
2227 struct header_print_data {
2228         FILE *fp;
2229         bool full; /* extended list of headers */
2230 };
2231
2232 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2233                                            struct perf_header *ph,
2234                                            int feat, int fd, void *data)
2235 {
2236         struct header_print_data *hd = data;
2237
2238         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2239                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2240                                 "%d, continuing...\n", section->offset, feat);
2241                 return 0;
2242         }
2243         if (feat >= HEADER_LAST_FEATURE) {
2244                 pr_warning("unknown feature %d\n", feat);
2245                 return 0;
2246         }
2247         if (!feat_ops[feat].print)
2248                 return 0;
2249
2250         if (!feat_ops[feat].full_only || hd->full)
2251                 feat_ops[feat].print(ph, fd, hd->fp);
2252         else
2253                 fprintf(hd->fp, "# %s info available, use -I to display\n",
2254                         feat_ops[feat].name);
2255
2256         return 0;
2257 }
2258
2259 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2260 {
2261         struct header_print_data hd;
2262         struct perf_header *header = &session->header;
2263         int fd = perf_data_file__fd(session->file);
2264         struct stat st;
2265         int ret, bit;
2266
2267         hd.fp = fp;
2268         hd.full = full;
2269
2270         ret = fstat(fd, &st);
2271         if (ret == -1)
2272                 return -1;
2273
2274         fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2275
2276         perf_header__process_sections(header, fd, &hd,
2277                                       perf_file_section__fprintf_info);
2278
2279         if (session->file->is_pipe)
2280                 return 0;
2281
2282         fprintf(fp, "# missing features: ");
2283         for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2284                 if (bit)
2285                         fprintf(fp, "%s ", feat_ops[bit].name);
2286         }
2287
2288         fprintf(fp, "\n");
2289         return 0;
2290 }
2291
2292 static int do_write_feat(int fd, struct perf_header *h, int type,
2293                          struct perf_file_section **p,
2294                          struct perf_evlist *evlist)
2295 {
2296         int err;
2297         int ret = 0;
2298
2299         if (perf_header__has_feat(h, type)) {
2300                 if (!feat_ops[type].write)
2301                         return -1;
2302
2303                 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2304
2305                 err = feat_ops[type].write(fd, h, evlist);
2306                 if (err < 0) {
2307                         pr_debug("failed to write feature %s\n", feat_ops[type].name);
2308
2309                         /* undo anything written */
2310                         lseek(fd, (*p)->offset, SEEK_SET);
2311
2312                         return -1;
2313                 }
2314                 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2315                 (*p)++;
2316         }
2317         return ret;
2318 }
2319
2320 static int perf_header__adds_write(struct perf_header *header,
2321                                    struct perf_evlist *evlist, int fd)
2322 {
2323         int nr_sections;
2324         struct perf_file_section *feat_sec, *p;
2325         int sec_size;
2326         u64 sec_start;
2327         int feat;
2328         int err;
2329
2330         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2331         if (!nr_sections)
2332                 return 0;
2333
2334         feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2335         if (feat_sec == NULL)
2336                 return -ENOMEM;
2337
2338         sec_size = sizeof(*feat_sec) * nr_sections;
2339
2340         sec_start = header->feat_offset;
2341         lseek(fd, sec_start + sec_size, SEEK_SET);
2342
2343         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2344                 if (do_write_feat(fd, header, feat, &p, evlist))
2345                         perf_header__clear_feat(header, feat);
2346         }
2347
2348         lseek(fd, sec_start, SEEK_SET);
2349         /*
2350          * may write more than needed due to dropped feature, but
2351          * this is okay, reader will skip the mising entries
2352          */
2353         err = do_write(fd, feat_sec, sec_size);
2354         if (err < 0)
2355                 pr_debug("failed to write feature section\n");
2356         free(feat_sec);
2357         return err;
2358 }
2359
2360 int perf_header__write_pipe(int fd)
2361 {
2362         struct perf_pipe_file_header f_header;
2363         int err;
2364
2365         f_header = (struct perf_pipe_file_header){
2366                 .magic     = PERF_MAGIC,
2367                 .size      = sizeof(f_header),
2368         };
2369
2370         err = do_write(fd, &f_header, sizeof(f_header));
2371         if (err < 0) {
2372                 pr_debug("failed to write perf pipe header\n");
2373                 return err;
2374         }
2375
2376         return 0;
2377 }
2378
2379 int perf_session__write_header(struct perf_session *session,
2380                                struct perf_evlist *evlist,
2381                                int fd, bool at_exit)
2382 {
2383         struct perf_file_header f_header;
2384         struct perf_file_attr   f_attr;
2385         struct perf_header *header = &session->header;
2386         struct perf_evsel *evsel;
2387         u64 attr_offset;
2388         int err;
2389
2390         lseek(fd, sizeof(f_header), SEEK_SET);
2391
2392         evlist__for_each_entry(session->evlist, evsel) {
2393                 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2394                 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2395                 if (err < 0) {
2396                         pr_debug("failed to write perf header\n");
2397                         return err;
2398                 }
2399         }
2400
2401         attr_offset = lseek(fd, 0, SEEK_CUR);
2402
2403         evlist__for_each_entry(evlist, evsel) {
2404                 f_attr = (struct perf_file_attr){
2405                         .attr = evsel->attr,
2406                         .ids  = {
2407                                 .offset = evsel->id_offset,
2408                                 .size   = evsel->ids * sizeof(u64),
2409                         }
2410                 };
2411                 err = do_write(fd, &f_attr, sizeof(f_attr));
2412                 if (err < 0) {
2413                         pr_debug("failed to write perf header attribute\n");
2414                         return err;
2415                 }
2416         }
2417
2418         if (!header->data_offset)
2419                 header->data_offset = lseek(fd, 0, SEEK_CUR);
2420         header->feat_offset = header->data_offset + header->data_size;
2421
2422         if (at_exit) {
2423                 err = perf_header__adds_write(header, evlist, fd);
2424                 if (err < 0)
2425                         return err;
2426         }
2427
2428         f_header = (struct perf_file_header){
2429                 .magic     = PERF_MAGIC,
2430                 .size      = sizeof(f_header),
2431                 .attr_size = sizeof(f_attr),
2432                 .attrs = {
2433                         .offset = attr_offset,
2434                         .size   = evlist->nr_entries * sizeof(f_attr),
2435                 },
2436                 .data = {
2437                         .offset = header->data_offset,
2438                         .size   = header->data_size,
2439                 },
2440                 /* event_types is ignored, store zeros */
2441         };
2442
2443         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2444
2445         lseek(fd, 0, SEEK_SET);
2446         err = do_write(fd, &f_header, sizeof(f_header));
2447         if (err < 0) {
2448                 pr_debug("failed to write perf header\n");
2449                 return err;
2450         }
2451         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2452
2453         return 0;
2454 }
2455
2456 static int perf_header__getbuffer64(struct perf_header *header,
2457                                     int fd, void *buf, size_t size)
2458 {
2459         if (readn(fd, buf, size) <= 0)
2460                 return -1;
2461
2462         if (header->needs_swap)
2463                 mem_bswap_64(buf, size);
2464
2465         return 0;
2466 }
2467
2468 int perf_header__process_sections(struct perf_header *header, int fd,
2469                                   void *data,
2470                                   int (*process)(struct perf_file_section *section,
2471                                                  struct perf_header *ph,
2472                                                  int feat, int fd, void *data))
2473 {
2474         struct perf_file_section *feat_sec, *sec;
2475         int nr_sections;
2476         int sec_size;
2477         int feat;
2478         int err;
2479
2480         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2481         if (!nr_sections)
2482                 return 0;
2483
2484         feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2485         if (!feat_sec)
2486                 return -1;
2487
2488         sec_size = sizeof(*feat_sec) * nr_sections;
2489
2490         lseek(fd, header->feat_offset, SEEK_SET);
2491
2492         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2493         if (err < 0)
2494                 goto out_free;
2495
2496         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2497                 err = process(sec++, header, feat, fd, data);
2498                 if (err < 0)
2499                         goto out_free;
2500         }
2501         err = 0;
2502 out_free:
2503         free(feat_sec);
2504         return err;
2505 }
2506
2507 static const int attr_file_abi_sizes[] = {
2508         [0] = PERF_ATTR_SIZE_VER0,
2509         [1] = PERF_ATTR_SIZE_VER1,
2510         [2] = PERF_ATTR_SIZE_VER2,
2511         [3] = PERF_ATTR_SIZE_VER3,
2512         [4] = PERF_ATTR_SIZE_VER4,
2513         0,
2514 };
2515
2516 /*
2517  * In the legacy file format, the magic number is not used to encode endianness.
2518  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2519  * on ABI revisions, we need to try all combinations for all endianness to
2520  * detect the endianness.
2521  */
2522 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2523 {
2524         uint64_t ref_size, attr_size;
2525         int i;
2526
2527         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2528                 ref_size = attr_file_abi_sizes[i]
2529                          + sizeof(struct perf_file_section);
2530                 if (hdr_sz != ref_size) {
2531                         attr_size = bswap_64(hdr_sz);
2532                         if (attr_size != ref_size)
2533                                 continue;
2534
2535                         ph->needs_swap = true;
2536                 }
2537                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2538                          i,
2539                          ph->needs_swap);
2540                 return 0;
2541         }
2542         /* could not determine endianness */
2543         return -1;
2544 }
2545
2546 #define PERF_PIPE_HDR_VER0      16
2547
2548 static const size_t attr_pipe_abi_sizes[] = {
2549         [0] = PERF_PIPE_HDR_VER0,
2550         0,
2551 };
2552
2553 /*
2554  * In the legacy pipe format, there is an implicit assumption that endiannesss
2555  * between host recording the samples, and host parsing the samples is the
2556  * same. This is not always the case given that the pipe output may always be
2557  * redirected into a file and analyzed on a different machine with possibly a
2558  * different endianness and perf_event ABI revsions in the perf tool itself.
2559  */
2560 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2561 {
2562         u64 attr_size;
2563         int i;
2564
2565         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2566                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2567                         attr_size = bswap_64(hdr_sz);
2568                         if (attr_size != hdr_sz)
2569                                 continue;
2570
2571                         ph->needs_swap = true;
2572                 }
2573                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2574                 return 0;
2575         }
2576         return -1;
2577 }
2578
2579 bool is_perf_magic(u64 magic)
2580 {
2581         if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2582                 || magic == __perf_magic2
2583                 || magic == __perf_magic2_sw)
2584                 return true;
2585
2586         return false;
2587 }
2588
2589 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2590                               bool is_pipe, struct perf_header *ph)
2591 {
2592         int ret;
2593
2594         /* check for legacy format */
2595         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2596         if (ret == 0) {
2597                 ph->version = PERF_HEADER_VERSION_1;
2598                 pr_debug("legacy perf.data format\n");
2599                 if (is_pipe)
2600                         return try_all_pipe_abis(hdr_sz, ph);
2601
2602                 return try_all_file_abis(hdr_sz, ph);
2603         }
2604         /*
2605          * the new magic number serves two purposes:
2606          * - unique number to identify actual perf.data files
2607          * - encode endianness of file
2608          */
2609         ph->version = PERF_HEADER_VERSION_2;
2610
2611         /* check magic number with one endianness */
2612         if (magic == __perf_magic2)
2613                 return 0;
2614
2615         /* check magic number with opposite endianness */
2616         if (magic != __perf_magic2_sw)
2617                 return -1;
2618
2619         ph->needs_swap = true;
2620
2621         return 0;
2622 }
2623
2624 int perf_file_header__read(struct perf_file_header *header,
2625                            struct perf_header *ph, int fd)
2626 {
2627         ssize_t ret;
2628
2629         lseek(fd, 0, SEEK_SET);
2630
2631         ret = readn(fd, header, sizeof(*header));
2632         if (ret <= 0)
2633                 return -1;
2634
2635         if (check_magic_endian(header->magic,
2636                                header->attr_size, false, ph) < 0) {
2637                 pr_debug("magic/endian check failed\n");
2638                 return -1;
2639         }
2640
2641         if (ph->needs_swap) {
2642                 mem_bswap_64(header, offsetof(struct perf_file_header,
2643                              adds_features));
2644         }
2645
2646         if (header->size != sizeof(*header)) {
2647                 /* Support the previous format */
2648                 if (header->size == offsetof(typeof(*header), adds_features))
2649                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2650                 else
2651                         return -1;
2652         } else if (ph->needs_swap) {
2653                 /*
2654                  * feature bitmap is declared as an array of unsigned longs --
2655                  * not good since its size can differ between the host that
2656                  * generated the data file and the host analyzing the file.
2657                  *
2658                  * We need to handle endianness, but we don't know the size of
2659                  * the unsigned long where the file was generated. Take a best
2660                  * guess at determining it: try 64-bit swap first (ie., file
2661                  * created on a 64-bit host), and check if the hostname feature
2662                  * bit is set (this feature bit is forced on as of fbe96f2).
2663                  * If the bit is not, undo the 64-bit swap and try a 32-bit
2664                  * swap. If the hostname bit is still not set (e.g., older data
2665                  * file), punt and fallback to the original behavior --
2666                  * clearing all feature bits and setting buildid.
2667                  */
2668                 mem_bswap_64(&header->adds_features,
2669                             BITS_TO_U64(HEADER_FEAT_BITS));
2670
2671                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2672                         /* unswap as u64 */
2673                         mem_bswap_64(&header->adds_features,
2674                                     BITS_TO_U64(HEADER_FEAT_BITS));
2675
2676                         /* unswap as u32 */
2677                         mem_bswap_32(&header->adds_features,
2678                                     BITS_TO_U32(HEADER_FEAT_BITS));
2679                 }
2680
2681                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2682                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2683                         set_bit(HEADER_BUILD_ID, header->adds_features);
2684                 }
2685         }
2686
2687         memcpy(&ph->adds_features, &header->adds_features,
2688                sizeof(ph->adds_features));
2689
2690         ph->data_offset  = header->data.offset;
2691         ph->data_size    = header->data.size;
2692         ph->feat_offset  = header->data.offset + header->data.size;
2693         return 0;
2694 }
2695
2696 static int perf_file_section__process(struct perf_file_section *section,
2697                                       struct perf_header *ph,
2698                                       int feat, int fd, void *data)
2699 {
2700         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2701                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2702                           "%d, continuing...\n", section->offset, feat);
2703                 return 0;
2704         }
2705
2706         if (feat >= HEADER_LAST_FEATURE) {
2707                 pr_debug("unknown feature %d, continuing...\n", feat);
2708                 return 0;
2709         }
2710
2711         if (!feat_ops[feat].process)
2712                 return 0;
2713
2714         return feat_ops[feat].process(section, ph, fd, data);
2715 }
2716
2717 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2718                                        struct perf_header *ph, int fd,
2719                                        bool repipe)
2720 {
2721         ssize_t ret;
2722
2723         ret = readn(fd, header, sizeof(*header));
2724         if (ret <= 0)
2725                 return -1;
2726
2727         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2728                 pr_debug("endian/magic failed\n");
2729                 return -1;
2730         }
2731
2732         if (ph->needs_swap)
2733                 header->size = bswap_64(header->size);
2734
2735         if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2736                 return -1;
2737
2738         return 0;
2739 }
2740
2741 static int perf_header__read_pipe(struct perf_session *session)
2742 {
2743         struct perf_header *header = &session->header;
2744         struct perf_pipe_file_header f_header;
2745
2746         if (perf_file_header__read_pipe(&f_header, header,
2747                                         perf_data_file__fd(session->file),
2748                                         session->repipe) < 0) {
2749                 pr_debug("incompatible file format\n");
2750                 return -EINVAL;
2751         }
2752
2753         return 0;
2754 }
2755
2756 static int read_attr(int fd, struct perf_header *ph,
2757                      struct perf_file_attr *f_attr)
2758 {
2759         struct perf_event_attr *attr = &f_attr->attr;
2760         size_t sz, left;
2761         size_t our_sz = sizeof(f_attr->attr);
2762         ssize_t ret;
2763
2764         memset(f_attr, 0, sizeof(*f_attr));
2765
2766         /* read minimal guaranteed structure */
2767         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2768         if (ret <= 0) {
2769                 pr_debug("cannot read %d bytes of header attr\n",
2770                          PERF_ATTR_SIZE_VER0);
2771                 return -1;
2772         }
2773
2774         /* on file perf_event_attr size */
2775         sz = attr->size;
2776
2777         if (ph->needs_swap)
2778                 sz = bswap_32(sz);
2779
2780         if (sz == 0) {
2781                 /* assume ABI0 */
2782                 sz =  PERF_ATTR_SIZE_VER0;
2783         } else if (sz > our_sz) {
2784                 pr_debug("file uses a more recent and unsupported ABI"
2785                          " (%zu bytes extra)\n", sz - our_sz);
2786                 return -1;
2787         }
2788         /* what we have not yet read and that we know about */
2789         left = sz - PERF_ATTR_SIZE_VER0;
2790         if (left) {
2791                 void *ptr = attr;
2792                 ptr += PERF_ATTR_SIZE_VER0;
2793
2794                 ret = readn(fd, ptr, left);
2795         }
2796         /* read perf_file_section, ids are read in caller */
2797         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2798
2799         return ret <= 0 ? -1 : 0;
2800 }
2801
2802 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2803                                                 struct pevent *pevent)
2804 {
2805         struct event_format *event;
2806         char bf[128];
2807
2808         /* already prepared */
2809         if (evsel->tp_format)
2810                 return 0;
2811
2812         if (pevent == NULL) {
2813                 pr_debug("broken or missing trace data\n");
2814                 return -1;
2815         }
2816
2817         event = pevent_find_event(pevent, evsel->attr.config);
2818         if (event == NULL) {
2819                 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
2820                 return -1;
2821         }
2822
2823         if (!evsel->name) {
2824                 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2825                 evsel->name = strdup(bf);
2826                 if (evsel->name == NULL)
2827                         return -1;
2828         }
2829
2830         evsel->tp_format = event;
2831         return 0;
2832 }
2833
2834 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2835                                                   struct pevent *pevent)
2836 {
2837         struct perf_evsel *pos;
2838
2839         evlist__for_each_entry(evlist, pos) {
2840                 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2841                     perf_evsel__prepare_tracepoint_event(pos, pevent))
2842                         return -1;
2843         }
2844
2845         return 0;
2846 }
2847
2848 int perf_session__read_header(struct perf_session *session)
2849 {
2850         struct perf_data_file *file = session->file;
2851         struct perf_header *header = &session->header;
2852         struct perf_file_header f_header;
2853         struct perf_file_attr   f_attr;
2854         u64                     f_id;
2855         int nr_attrs, nr_ids, i, j;
2856         int fd = perf_data_file__fd(file);
2857
2858         session->evlist = perf_evlist__new();
2859         if (session->evlist == NULL)
2860                 return -ENOMEM;
2861
2862         session->evlist->env = &header->env;
2863         session->machines.host.env = &header->env;
2864         if (perf_data_file__is_pipe(file))
2865                 return perf_header__read_pipe(session);
2866
2867         if (perf_file_header__read(&f_header, header, fd) < 0)
2868                 return -EINVAL;
2869
2870         /*
2871          * Sanity check that perf.data was written cleanly; data size is
2872          * initialized to 0 and updated only if the on_exit function is run.
2873          * If data size is still 0 then the file contains only partial
2874          * information.  Just warn user and process it as much as it can.
2875          */
2876         if (f_header.data.size == 0) {
2877                 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2878                            "Was the 'perf record' command properly terminated?\n",
2879                            file->path);
2880         }
2881
2882         nr_attrs = f_header.attrs.size / f_header.attr_size;
2883         lseek(fd, f_header.attrs.offset, SEEK_SET);
2884
2885         for (i = 0; i < nr_attrs; i++) {
2886                 struct perf_evsel *evsel;
2887                 off_t tmp;
2888
2889                 if (read_attr(fd, header, &f_attr) < 0)
2890                         goto out_errno;
2891
2892                 if (header->needs_swap) {
2893                         f_attr.ids.size   = bswap_64(f_attr.ids.size);
2894                         f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2895                         perf_event__attr_swap(&f_attr.attr);
2896                 }
2897
2898                 tmp = lseek(fd, 0, SEEK_CUR);
2899                 evsel = perf_evsel__new(&f_attr.attr);
2900
2901                 if (evsel == NULL)
2902                         goto out_delete_evlist;
2903
2904                 evsel->needs_swap = header->needs_swap;
2905                 /*
2906                  * Do it before so that if perf_evsel__alloc_id fails, this
2907                  * entry gets purged too at perf_evlist__delete().
2908                  */
2909                 perf_evlist__add(session->evlist, evsel);
2910
2911                 nr_ids = f_attr.ids.size / sizeof(u64);
2912                 /*
2913                  * We don't have the cpu and thread maps on the header, so
2914                  * for allocating the perf_sample_id table we fake 1 cpu and
2915                  * hattr->ids threads.
2916                  */
2917                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2918                         goto out_delete_evlist;
2919
2920                 lseek(fd, f_attr.ids.offset, SEEK_SET);
2921
2922                 for (j = 0; j < nr_ids; j++) {
2923                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2924                                 goto out_errno;
2925
2926                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2927                 }
2928
2929                 lseek(fd, tmp, SEEK_SET);
2930         }
2931
2932         symbol_conf.nr_events = nr_attrs;
2933
2934         perf_header__process_sections(header, fd, &session->tevent,
2935                                       perf_file_section__process);
2936
2937         if (perf_evlist__prepare_tracepoint_events(session->evlist,
2938                                                    session->tevent.pevent))
2939                 goto out_delete_evlist;
2940
2941         return 0;
2942 out_errno:
2943         return -errno;
2944
2945 out_delete_evlist:
2946         perf_evlist__delete(session->evlist);
2947         session->evlist = NULL;
2948         return -ENOMEM;
2949 }
2950
2951 int perf_event__synthesize_attr(struct perf_tool *tool,
2952                                 struct perf_event_attr *attr, u32 ids, u64 *id,
2953                                 perf_event__handler_t process)
2954 {
2955         union perf_event *ev;
2956         size_t size;
2957         int err;
2958
2959         size = sizeof(struct perf_event_attr);
2960         size = PERF_ALIGN(size, sizeof(u64));
2961         size += sizeof(struct perf_event_header);
2962         size += ids * sizeof(u64);
2963
2964         ev = malloc(size);
2965
2966         if (ev == NULL)
2967                 return -ENOMEM;
2968
2969         ev->attr.attr = *attr;
2970         memcpy(ev->attr.id, id, ids * sizeof(u64));
2971
2972         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2973         ev->attr.header.size = (u16)size;
2974
2975         if (ev->attr.header.size == size)
2976                 err = process(tool, ev, NULL, NULL);
2977         else
2978                 err = -E2BIG;
2979
2980         free(ev);
2981
2982         return err;
2983 }
2984
2985 static struct event_update_event *
2986 event_update_event__new(size_t size, u64 type, u64 id)
2987 {
2988         struct event_update_event *ev;
2989
2990         size += sizeof(*ev);
2991         size  = PERF_ALIGN(size, sizeof(u64));
2992
2993         ev = zalloc(size);
2994         if (ev) {
2995                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2996                 ev->header.size = (u16)size;
2997                 ev->type = type;
2998                 ev->id = id;
2999         }
3000         return ev;
3001 }
3002
3003 int
3004 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3005                                          struct perf_evsel *evsel,
3006                                          perf_event__handler_t process)
3007 {
3008         struct event_update_event *ev;
3009         size_t size = strlen(evsel->unit);
3010         int err;
3011
3012         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3013         if (ev == NULL)
3014                 return -ENOMEM;
3015
3016         strncpy(ev->data, evsel->unit, size);
3017         err = process(tool, (union perf_event *)ev, NULL, NULL);
3018         free(ev);
3019         return err;
3020 }
3021
3022 int
3023 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3024                                           struct perf_evsel *evsel,
3025                                           perf_event__handler_t process)
3026 {
3027         struct event_update_event *ev;
3028         struct event_update_event_scale *ev_data;
3029         int err;
3030
3031         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3032         if (ev == NULL)
3033                 return -ENOMEM;
3034
3035         ev_data = (struct event_update_event_scale *) ev->data;
3036         ev_data->scale = evsel->scale;
3037         err = process(tool, (union perf_event*) ev, NULL, NULL);
3038         free(ev);
3039         return err;
3040 }
3041
3042 int
3043 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3044                                          struct perf_evsel *evsel,
3045                                          perf_event__handler_t process)
3046 {
3047         struct event_update_event *ev;
3048         size_t len = strlen(evsel->name);
3049         int err;
3050
3051         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3052         if (ev == NULL)
3053                 return -ENOMEM;
3054
3055         strncpy(ev->data, evsel->name, len);
3056         err = process(tool, (union perf_event*) ev, NULL, NULL);
3057         free(ev);
3058         return err;
3059 }
3060
3061 int
3062 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3063                                         struct perf_evsel *evsel,
3064                                         perf_event__handler_t process)
3065 {
3066         size_t size = sizeof(struct event_update_event);
3067         struct event_update_event *ev;
3068         int max, err;
3069         u16 type;
3070
3071         if (!evsel->own_cpus)
3072                 return 0;
3073
3074         ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3075         if (!ev)
3076                 return -ENOMEM;
3077
3078         ev->header.type = PERF_RECORD_EVENT_UPDATE;
3079         ev->header.size = (u16)size;
3080         ev->type = PERF_EVENT_UPDATE__CPUS;
3081         ev->id   = evsel->id[0];
3082
3083         cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3084                                  evsel->own_cpus,
3085                                  type, max);
3086
3087         err = process(tool, (union perf_event*) ev, NULL, NULL);
3088         free(ev);
3089         return err;
3090 }
3091
3092 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3093 {
3094         struct event_update_event *ev = &event->event_update;
3095         struct event_update_event_scale *ev_scale;
3096         struct event_update_event_cpus *ev_cpus;
3097         struct cpu_map *map;
3098         size_t ret;
3099
3100         ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3101
3102         switch (ev->type) {
3103         case PERF_EVENT_UPDATE__SCALE:
3104                 ev_scale = (struct event_update_event_scale *) ev->data;
3105                 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3106                 break;
3107         case PERF_EVENT_UPDATE__UNIT:
3108                 ret += fprintf(fp, "... unit:  %s\n", ev->data);
3109                 break;
3110         case PERF_EVENT_UPDATE__NAME:
3111                 ret += fprintf(fp, "... name:  %s\n", ev->data);
3112                 break;
3113         case PERF_EVENT_UPDATE__CPUS:
3114                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3115                 ret += fprintf(fp, "... ");
3116
3117                 map = cpu_map__new_data(&ev_cpus->cpus);
3118                 if (map)
3119                         ret += cpu_map__fprintf(map, fp);
3120                 else
3121                         ret += fprintf(fp, "failed to get cpus\n");
3122                 break;
3123         default:
3124                 ret += fprintf(fp, "... unknown type\n");
3125                 break;
3126         }
3127
3128         return ret;
3129 }
3130
3131 int perf_event__synthesize_attrs(struct perf_tool *tool,
3132                                    struct perf_session *session,
3133                                    perf_event__handler_t process)
3134 {
3135         struct perf_evsel *evsel;
3136         int err = 0;
3137
3138         evlist__for_each_entry(session->evlist, evsel) {
3139                 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3140                                                   evsel->id, process);
3141                 if (err) {
3142                         pr_debug("failed to create perf header attribute\n");
3143                         return err;
3144                 }
3145         }
3146
3147         return err;
3148 }
3149
3150 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3151                              union perf_event *event,
3152                              struct perf_evlist **pevlist)
3153 {
3154         u32 i, ids, n_ids;
3155         struct perf_evsel *evsel;
3156         struct perf_evlist *evlist = *pevlist;
3157
3158         if (evlist == NULL) {
3159                 *pevlist = evlist = perf_evlist__new();
3160                 if (evlist == NULL)
3161                         return -ENOMEM;
3162         }
3163
3164         evsel = perf_evsel__new(&event->attr.attr);
3165         if (evsel == NULL)
3166                 return -ENOMEM;
3167
3168         perf_evlist__add(evlist, evsel);
3169
3170         ids = event->header.size;
3171         ids -= (void *)&event->attr.id - (void *)event;
3172         n_ids = ids / sizeof(u64);
3173         /*
3174          * We don't have the cpu and thread maps on the header, so
3175          * for allocating the perf_sample_id table we fake 1 cpu and
3176          * hattr->ids threads.
3177          */
3178         if (perf_evsel__alloc_id(evsel, 1, n_ids))
3179                 return -ENOMEM;
3180
3181         for (i = 0; i < n_ids; i++) {
3182                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3183         }
3184
3185         symbol_conf.nr_events = evlist->nr_entries;
3186
3187         return 0;
3188 }
3189
3190 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3191                                      union perf_event *event,
3192                                      struct perf_evlist **pevlist)
3193 {
3194         struct event_update_event *ev = &event->event_update;
3195         struct event_update_event_scale *ev_scale;
3196         struct event_update_event_cpus *ev_cpus;
3197         struct perf_evlist *evlist;
3198         struct perf_evsel *evsel;
3199         struct cpu_map *map;
3200
3201         if (!pevlist || *pevlist == NULL)
3202                 return -EINVAL;
3203
3204         evlist = *pevlist;
3205
3206         evsel = perf_evlist__id2evsel(evlist, ev->id);
3207         if (evsel == NULL)
3208                 return -EINVAL;
3209
3210         switch (ev->type) {
3211         case PERF_EVENT_UPDATE__UNIT:
3212                 evsel->unit = strdup(ev->data);
3213                 break;
3214         case PERF_EVENT_UPDATE__NAME:
3215                 evsel->name = strdup(ev->data);
3216                 break;
3217         case PERF_EVENT_UPDATE__SCALE:
3218                 ev_scale = (struct event_update_event_scale *) ev->data;
3219                 evsel->scale = ev_scale->scale;
3220                 break;
3221         case PERF_EVENT_UPDATE__CPUS:
3222                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3223
3224                 map = cpu_map__new_data(&ev_cpus->cpus);
3225                 if (map)
3226                         evsel->own_cpus = map;
3227                 else
3228                         pr_err("failed to get event_update cpus\n");
3229         default:
3230                 break;
3231         }
3232
3233         return 0;
3234 }
3235
3236 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3237                                         struct perf_evlist *evlist,
3238                                         perf_event__handler_t process)
3239 {
3240         union perf_event ev;
3241         struct tracing_data *tdata;
3242         ssize_t size = 0, aligned_size = 0, padding;
3243         int err __maybe_unused = 0;
3244
3245         /*
3246          * We are going to store the size of the data followed
3247          * by the data contents. Since the fd descriptor is a pipe,
3248          * we cannot seek back to store the size of the data once
3249          * we know it. Instead we:
3250          *
3251          * - write the tracing data to the temp file
3252          * - get/write the data size to pipe
3253          * - write the tracing data from the temp file
3254          *   to the pipe
3255          */
3256         tdata = tracing_data_get(&evlist->entries, fd, true);
3257         if (!tdata)
3258                 return -1;
3259
3260         memset(&ev, 0, sizeof(ev));
3261
3262         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3263         size = tdata->size;
3264         aligned_size = PERF_ALIGN(size, sizeof(u64));
3265         padding = aligned_size - size;
3266         ev.tracing_data.header.size = sizeof(ev.tracing_data);
3267         ev.tracing_data.size = aligned_size;
3268
3269         process(tool, &ev, NULL, NULL);
3270
3271         /*
3272          * The put function will copy all the tracing data
3273          * stored in temp file to the pipe.
3274          */
3275         tracing_data_put(tdata);
3276
3277         write_padded(fd, NULL, 0, padding);
3278
3279         return aligned_size;
3280 }
3281
3282 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3283                                      union perf_event *event,
3284                                      struct perf_session *session)
3285 {
3286         ssize_t size_read, padding, size = event->tracing_data.size;
3287         int fd = perf_data_file__fd(session->file);
3288         off_t offset = lseek(fd, 0, SEEK_CUR);
3289         char buf[BUFSIZ];
3290
3291         /* setup for reading amidst mmap */
3292         lseek(fd, offset + sizeof(struct tracing_data_event),
3293               SEEK_SET);
3294
3295         size_read = trace_report(fd, &session->tevent,
3296                                  session->repipe);
3297         padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3298
3299         if (readn(fd, buf, padding) < 0) {
3300                 pr_err("%s: reading input file", __func__);
3301                 return -1;
3302         }
3303         if (session->repipe) {
3304                 int retw = write(STDOUT_FILENO, buf, padding);
3305                 if (retw <= 0 || retw != padding) {
3306                         pr_err("%s: repiping tracing data padding", __func__);
3307                         return -1;
3308                 }
3309         }
3310
3311         if (size_read + padding != size) {
3312                 pr_err("%s: tracing data size mismatch", __func__);
3313                 return -1;
3314         }
3315
3316         perf_evlist__prepare_tracepoint_events(session->evlist,
3317                                                session->tevent.pevent);
3318
3319         return size_read + padding;
3320 }
3321
3322 int perf_event__synthesize_build_id(struct perf_tool *tool,
3323                                     struct dso *pos, u16 misc,
3324                                     perf_event__handler_t process,
3325                                     struct machine *machine)
3326 {
3327         union perf_event ev;
3328         size_t len;
3329         int err = 0;
3330
3331         if (!pos->hit)
3332                 return err;
3333
3334         memset(&ev, 0, sizeof(ev));
3335
3336         len = pos->long_name_len + 1;
3337         len = PERF_ALIGN(len, NAME_ALIGN);
3338         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3339         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3340         ev.build_id.header.misc = misc;
3341         ev.build_id.pid = machine->pid;
3342         ev.build_id.header.size = sizeof(ev.build_id) + len;
3343         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3344
3345         err = process(tool, &ev, NULL, machine);
3346
3347         return err;
3348 }
3349
3350 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3351                                  union perf_event *event,
3352                                  struct perf_session *session)
3353 {
3354         __event_process_build_id(&event->build_id,
3355                                  event->build_id.filename,
3356                                  session);
3357         return 0;
3358 }