]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/header.c
ufs_truncate_blocks(): fix the case when size is in the last direct block
[karo-tx-linux.git] / tools / perf / util / header.c
1 #include <errno.h>
2 #include <inttypes.h>
3 #include "util.h"
4 #include "string2.h"
5 #include <sys/param.h>
6 #include <sys/types.h>
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <linux/list.h>
12 #include <linux/kernel.h>
13 #include <linux/bitops.h>
14 #include <sys/stat.h>
15 #include <sys/types.h>
16 #include <sys/utsname.h>
17 #include <unistd.h>
18
19 #include "evlist.h"
20 #include "evsel.h"
21 #include "header.h"
22 #include "memswap.h"
23 #include "../perf.h"
24 #include "trace-event.h"
25 #include "session.h"
26 #include "symbol.h"
27 #include "debug.h"
28 #include "cpumap.h"
29 #include "pmu.h"
30 #include "vdso.h"
31 #include "strbuf.h"
32 #include "build-id.h"
33 #include "data.h"
34 #include <api/fs/fs.h>
35 #include "asm/bug.h"
36
37 #include "sane_ctype.h"
38
39 /*
40  * magic2 = "PERFILE2"
41  * must be a numerical value to let the endianness
42  * determine the memory layout. That way we are able
43  * to detect endianness when reading the perf.data file
44  * back.
45  *
46  * we check for legacy (PERFFILE) format.
47  */
48 static const char *__perf_magic1 = "PERFFILE";
49 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
50 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
51
52 #define PERF_MAGIC      __perf_magic2
53
54 const char perf_version_string[] = PERF_VERSION;
55
56 struct perf_file_attr {
57         struct perf_event_attr  attr;
58         struct perf_file_section        ids;
59 };
60
61 void perf_header__set_feat(struct perf_header *header, int feat)
62 {
63         set_bit(feat, header->adds_features);
64 }
65
66 void perf_header__clear_feat(struct perf_header *header, int feat)
67 {
68         clear_bit(feat, header->adds_features);
69 }
70
71 bool perf_header__has_feat(const struct perf_header *header, int feat)
72 {
73         return test_bit(feat, header->adds_features);
74 }
75
76 static int do_write(int fd, const void *buf, size_t size)
77 {
78         while (size) {
79                 int ret = write(fd, buf, size);
80
81                 if (ret < 0)
82                         return -errno;
83
84                 size -= ret;
85                 buf += ret;
86         }
87
88         return 0;
89 }
90
91 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
92 {
93         static const char zero_buf[NAME_ALIGN];
94         int err = do_write(fd, bf, count);
95
96         if (!err)
97                 err = do_write(fd, zero_buf, count_aligned - count);
98
99         return err;
100 }
101
102 #define string_size(str)                                                \
103         (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
104
105 static int do_write_string(int fd, const char *str)
106 {
107         u32 len, olen;
108         int ret;
109
110         olen = strlen(str) + 1;
111         len = PERF_ALIGN(olen, NAME_ALIGN);
112
113         /* write len, incl. \0 */
114         ret = do_write(fd, &len, sizeof(len));
115         if (ret < 0)
116                 return ret;
117
118         return write_padded(fd, str, olen, len);
119 }
120
121 static char *do_read_string(int fd, struct perf_header *ph)
122 {
123         ssize_t sz, ret;
124         u32 len;
125         char *buf;
126
127         sz = readn(fd, &len, sizeof(len));
128         if (sz < (ssize_t)sizeof(len))
129                 return NULL;
130
131         if (ph->needs_swap)
132                 len = bswap_32(len);
133
134         buf = malloc(len);
135         if (!buf)
136                 return NULL;
137
138         ret = readn(fd, buf, len);
139         if (ret == (ssize_t)len) {
140                 /*
141                  * strings are padded by zeroes
142                  * thus the actual strlen of buf
143                  * may be less than len
144                  */
145                 return buf;
146         }
147
148         free(buf);
149         return NULL;
150 }
151
152 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
153                             struct perf_evlist *evlist)
154 {
155         return read_tracing_data(fd, &evlist->entries);
156 }
157
158
159 static int write_build_id(int fd, struct perf_header *h,
160                           struct perf_evlist *evlist __maybe_unused)
161 {
162         struct perf_session *session;
163         int err;
164
165         session = container_of(h, struct perf_session, header);
166
167         if (!perf_session__read_build_ids(session, true))
168                 return -1;
169
170         err = perf_session__write_buildid_table(session, fd);
171         if (err < 0) {
172                 pr_debug("failed to write buildid table\n");
173                 return err;
174         }
175         perf_session__cache_build_ids(session);
176
177         return 0;
178 }
179
180 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
181                           struct perf_evlist *evlist __maybe_unused)
182 {
183         struct utsname uts;
184         int ret;
185
186         ret = uname(&uts);
187         if (ret < 0)
188                 return -1;
189
190         return do_write_string(fd, uts.nodename);
191 }
192
193 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
194                            struct perf_evlist *evlist __maybe_unused)
195 {
196         struct utsname uts;
197         int ret;
198
199         ret = uname(&uts);
200         if (ret < 0)
201                 return -1;
202
203         return do_write_string(fd, uts.release);
204 }
205
206 static int write_arch(int fd, struct perf_header *h __maybe_unused,
207                       struct perf_evlist *evlist __maybe_unused)
208 {
209         struct utsname uts;
210         int ret;
211
212         ret = uname(&uts);
213         if (ret < 0)
214                 return -1;
215
216         return do_write_string(fd, uts.machine);
217 }
218
219 static int write_version(int fd, struct perf_header *h __maybe_unused,
220                          struct perf_evlist *evlist __maybe_unused)
221 {
222         return do_write_string(fd, perf_version_string);
223 }
224
225 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
226 {
227         FILE *file;
228         char *buf = NULL;
229         char *s, *p;
230         const char *search = cpuinfo_proc;
231         size_t len = 0;
232         int ret = -1;
233
234         if (!search)
235                 return -1;
236
237         file = fopen("/proc/cpuinfo", "r");
238         if (!file)
239                 return -1;
240
241         while (getline(&buf, &len, file) > 0) {
242                 ret = strncmp(buf, search, strlen(search));
243                 if (!ret)
244                         break;
245         }
246
247         if (ret) {
248                 ret = -1;
249                 goto done;
250         }
251
252         s = buf;
253
254         p = strchr(buf, ':');
255         if (p && *(p+1) == ' ' && *(p+2))
256                 s = p + 2;
257         p = strchr(s, '\n');
258         if (p)
259                 *p = '\0';
260
261         /* squash extra space characters (branding string) */
262         p = s;
263         while (*p) {
264                 if (isspace(*p)) {
265                         char *r = p + 1;
266                         char *q = r;
267                         *p = ' ';
268                         while (*q && isspace(*q))
269                                 q++;
270                         if (q != (p+1))
271                                 while ((*r++ = *q++));
272                 }
273                 p++;
274         }
275         ret = do_write_string(fd, s);
276 done:
277         free(buf);
278         fclose(file);
279         return ret;
280 }
281
282 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
283                        struct perf_evlist *evlist __maybe_unused)
284 {
285 #ifndef CPUINFO_PROC
286 #define CPUINFO_PROC {"model name", }
287 #endif
288         const char *cpuinfo_procs[] = CPUINFO_PROC;
289         unsigned int i;
290
291         for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
292                 int ret;
293                 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
294                 if (ret >= 0)
295                         return ret;
296         }
297         return -1;
298 }
299
300
301 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
302                         struct perf_evlist *evlist __maybe_unused)
303 {
304         long nr;
305         u32 nrc, nra;
306         int ret;
307
308         nrc = cpu__max_present_cpu();
309
310         nr = sysconf(_SC_NPROCESSORS_ONLN);
311         if (nr < 0)
312                 return -1;
313
314         nra = (u32)(nr & UINT_MAX);
315
316         ret = do_write(fd, &nrc, sizeof(nrc));
317         if (ret < 0)
318                 return ret;
319
320         return do_write(fd, &nra, sizeof(nra));
321 }
322
323 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
324                             struct perf_evlist *evlist)
325 {
326         struct perf_evsel *evsel;
327         u32 nre, nri, sz;
328         int ret;
329
330         nre = evlist->nr_entries;
331
332         /*
333          * write number of events
334          */
335         ret = do_write(fd, &nre, sizeof(nre));
336         if (ret < 0)
337                 return ret;
338
339         /*
340          * size of perf_event_attr struct
341          */
342         sz = (u32)sizeof(evsel->attr);
343         ret = do_write(fd, &sz, sizeof(sz));
344         if (ret < 0)
345                 return ret;
346
347         evlist__for_each_entry(evlist, evsel) {
348                 ret = do_write(fd, &evsel->attr, sz);
349                 if (ret < 0)
350                         return ret;
351                 /*
352                  * write number of unique id per event
353                  * there is one id per instance of an event
354                  *
355                  * copy into an nri to be independent of the
356                  * type of ids,
357                  */
358                 nri = evsel->ids;
359                 ret = do_write(fd, &nri, sizeof(nri));
360                 if (ret < 0)
361                         return ret;
362
363                 /*
364                  * write event string as passed on cmdline
365                  */
366                 ret = do_write_string(fd, perf_evsel__name(evsel));
367                 if (ret < 0)
368                         return ret;
369                 /*
370                  * write unique ids for this event
371                  */
372                 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
373                 if (ret < 0)
374                         return ret;
375         }
376         return 0;
377 }
378
379 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
380                          struct perf_evlist *evlist __maybe_unused)
381 {
382         char buf[MAXPATHLEN];
383         u32 n;
384         int i, ret;
385
386         /* actual path to perf binary */
387         ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
388         if (ret <= 0)
389                 return -1;
390
391         /* readlink() does not add null termination */
392         buf[ret] = '\0';
393
394         /* account for binary path */
395         n = perf_env.nr_cmdline + 1;
396
397         ret = do_write(fd, &n, sizeof(n));
398         if (ret < 0)
399                 return ret;
400
401         ret = do_write_string(fd, buf);
402         if (ret < 0)
403                 return ret;
404
405         for (i = 0 ; i < perf_env.nr_cmdline; i++) {
406                 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
407                 if (ret < 0)
408                         return ret;
409         }
410         return 0;
411 }
412
413 #define CORE_SIB_FMT \
414         "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
415 #define THRD_SIB_FMT \
416         "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
417
418 struct cpu_topo {
419         u32 cpu_nr;
420         u32 core_sib;
421         u32 thread_sib;
422         char **core_siblings;
423         char **thread_siblings;
424 };
425
426 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
427 {
428         FILE *fp;
429         char filename[MAXPATHLEN];
430         char *buf = NULL, *p;
431         size_t len = 0;
432         ssize_t sret;
433         u32 i = 0;
434         int ret = -1;
435
436         sprintf(filename, CORE_SIB_FMT, cpu);
437         fp = fopen(filename, "r");
438         if (!fp)
439                 goto try_threads;
440
441         sret = getline(&buf, &len, fp);
442         fclose(fp);
443         if (sret <= 0)
444                 goto try_threads;
445
446         p = strchr(buf, '\n');
447         if (p)
448                 *p = '\0';
449
450         for (i = 0; i < tp->core_sib; i++) {
451                 if (!strcmp(buf, tp->core_siblings[i]))
452                         break;
453         }
454         if (i == tp->core_sib) {
455                 tp->core_siblings[i] = buf;
456                 tp->core_sib++;
457                 buf = NULL;
458                 len = 0;
459         }
460         ret = 0;
461
462 try_threads:
463         sprintf(filename, THRD_SIB_FMT, cpu);
464         fp = fopen(filename, "r");
465         if (!fp)
466                 goto done;
467
468         if (getline(&buf, &len, fp) <= 0)
469                 goto done;
470
471         p = strchr(buf, '\n');
472         if (p)
473                 *p = '\0';
474
475         for (i = 0; i < tp->thread_sib; i++) {
476                 if (!strcmp(buf, tp->thread_siblings[i]))
477                         break;
478         }
479         if (i == tp->thread_sib) {
480                 tp->thread_siblings[i] = buf;
481                 tp->thread_sib++;
482                 buf = NULL;
483         }
484         ret = 0;
485 done:
486         if(fp)
487                 fclose(fp);
488         free(buf);
489         return ret;
490 }
491
492 static void free_cpu_topo(struct cpu_topo *tp)
493 {
494         u32 i;
495
496         if (!tp)
497                 return;
498
499         for (i = 0 ; i < tp->core_sib; i++)
500                 zfree(&tp->core_siblings[i]);
501
502         for (i = 0 ; i < tp->thread_sib; i++)
503                 zfree(&tp->thread_siblings[i]);
504
505         free(tp);
506 }
507
508 static struct cpu_topo *build_cpu_topology(void)
509 {
510         struct cpu_topo *tp = NULL;
511         void *addr;
512         u32 nr, i;
513         size_t sz;
514         long ncpus;
515         int ret = -1;
516         struct cpu_map *map;
517
518         ncpus = cpu__max_present_cpu();
519
520         /* build online CPU map */
521         map = cpu_map__new(NULL);
522         if (map == NULL) {
523                 pr_debug("failed to get system cpumap\n");
524                 return NULL;
525         }
526
527         nr = (u32)(ncpus & UINT_MAX);
528
529         sz = nr * sizeof(char *);
530         addr = calloc(1, sizeof(*tp) + 2 * sz);
531         if (!addr)
532                 goto out_free;
533
534         tp = addr;
535         tp->cpu_nr = nr;
536         addr += sizeof(*tp);
537         tp->core_siblings = addr;
538         addr += sz;
539         tp->thread_siblings = addr;
540
541         for (i = 0; i < nr; i++) {
542                 if (!cpu_map__has(map, i))
543                         continue;
544
545                 ret = build_cpu_topo(tp, i);
546                 if (ret < 0)
547                         break;
548         }
549
550 out_free:
551         cpu_map__put(map);
552         if (ret) {
553                 free_cpu_topo(tp);
554                 tp = NULL;
555         }
556         return tp;
557 }
558
559 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
560                           struct perf_evlist *evlist __maybe_unused)
561 {
562         struct cpu_topo *tp;
563         u32 i;
564         int ret, j;
565
566         tp = build_cpu_topology();
567         if (!tp)
568                 return -1;
569
570         ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
571         if (ret < 0)
572                 goto done;
573
574         for (i = 0; i < tp->core_sib; i++) {
575                 ret = do_write_string(fd, tp->core_siblings[i]);
576                 if (ret < 0)
577                         goto done;
578         }
579         ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
580         if (ret < 0)
581                 goto done;
582
583         for (i = 0; i < tp->thread_sib; i++) {
584                 ret = do_write_string(fd, tp->thread_siblings[i]);
585                 if (ret < 0)
586                         break;
587         }
588
589         ret = perf_env__read_cpu_topology_map(&perf_env);
590         if (ret < 0)
591                 goto done;
592
593         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
594                 ret = do_write(fd, &perf_env.cpu[j].core_id,
595                                sizeof(perf_env.cpu[j].core_id));
596                 if (ret < 0)
597                         return ret;
598                 ret = do_write(fd, &perf_env.cpu[j].socket_id,
599                                sizeof(perf_env.cpu[j].socket_id));
600                 if (ret < 0)
601                         return ret;
602         }
603 done:
604         free_cpu_topo(tp);
605         return ret;
606 }
607
608
609
610 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
611                           struct perf_evlist *evlist __maybe_unused)
612 {
613         char *buf = NULL;
614         FILE *fp;
615         size_t len = 0;
616         int ret = -1, n;
617         uint64_t mem;
618
619         fp = fopen("/proc/meminfo", "r");
620         if (!fp)
621                 return -1;
622
623         while (getline(&buf, &len, fp) > 0) {
624                 ret = strncmp(buf, "MemTotal:", 9);
625                 if (!ret)
626                         break;
627         }
628         if (!ret) {
629                 n = sscanf(buf, "%*s %"PRIu64, &mem);
630                 if (n == 1)
631                         ret = do_write(fd, &mem, sizeof(mem));
632         } else
633                 ret = -1;
634         free(buf);
635         fclose(fp);
636         return ret;
637 }
638
639 static int write_topo_node(int fd, int node)
640 {
641         char str[MAXPATHLEN];
642         char field[32];
643         char *buf = NULL, *p;
644         size_t len = 0;
645         FILE *fp;
646         u64 mem_total, mem_free, mem;
647         int ret = -1;
648
649         sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
650         fp = fopen(str, "r");
651         if (!fp)
652                 return -1;
653
654         while (getline(&buf, &len, fp) > 0) {
655                 /* skip over invalid lines */
656                 if (!strchr(buf, ':'))
657                         continue;
658                 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
659                         goto done;
660                 if (!strcmp(field, "MemTotal:"))
661                         mem_total = mem;
662                 if (!strcmp(field, "MemFree:"))
663                         mem_free = mem;
664         }
665
666         fclose(fp);
667         fp = NULL;
668
669         ret = do_write(fd, &mem_total, sizeof(u64));
670         if (ret)
671                 goto done;
672
673         ret = do_write(fd, &mem_free, sizeof(u64));
674         if (ret)
675                 goto done;
676
677         ret = -1;
678         sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
679
680         fp = fopen(str, "r");
681         if (!fp)
682                 goto done;
683
684         if (getline(&buf, &len, fp) <= 0)
685                 goto done;
686
687         p = strchr(buf, '\n');
688         if (p)
689                 *p = '\0';
690
691         ret = do_write_string(fd, buf);
692 done:
693         free(buf);
694         if (fp)
695                 fclose(fp);
696         return ret;
697 }
698
699 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
700                           struct perf_evlist *evlist __maybe_unused)
701 {
702         char *buf = NULL;
703         size_t len = 0;
704         FILE *fp;
705         struct cpu_map *node_map = NULL;
706         char *c;
707         u32 nr, i, j;
708         int ret = -1;
709
710         fp = fopen("/sys/devices/system/node/online", "r");
711         if (!fp)
712                 return -1;
713
714         if (getline(&buf, &len, fp) <= 0)
715                 goto done;
716
717         c = strchr(buf, '\n');
718         if (c)
719                 *c = '\0';
720
721         node_map = cpu_map__new(buf);
722         if (!node_map)
723                 goto done;
724
725         nr = (u32)node_map->nr;
726
727         ret = do_write(fd, &nr, sizeof(nr));
728         if (ret < 0)
729                 goto done;
730
731         for (i = 0; i < nr; i++) {
732                 j = (u32)node_map->map[i];
733                 ret = do_write(fd, &j, sizeof(j));
734                 if (ret < 0)
735                         break;
736
737                 ret = write_topo_node(fd, i);
738                 if (ret < 0)
739                         break;
740         }
741 done:
742         free(buf);
743         fclose(fp);
744         cpu_map__put(node_map);
745         return ret;
746 }
747
748 /*
749  * File format:
750  *
751  * struct pmu_mappings {
752  *      u32     pmu_num;
753  *      struct pmu_map {
754  *              u32     type;
755  *              char    name[];
756  *      }[pmu_num];
757  * };
758  */
759
760 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
761                               struct perf_evlist *evlist __maybe_unused)
762 {
763         struct perf_pmu *pmu = NULL;
764         off_t offset = lseek(fd, 0, SEEK_CUR);
765         __u32 pmu_num = 0;
766         int ret;
767
768         /* write real pmu_num later */
769         ret = do_write(fd, &pmu_num, sizeof(pmu_num));
770         if (ret < 0)
771                 return ret;
772
773         while ((pmu = perf_pmu__scan(pmu))) {
774                 if (!pmu->name)
775                         continue;
776                 pmu_num++;
777
778                 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
779                 if (ret < 0)
780                         return ret;
781
782                 ret = do_write_string(fd, pmu->name);
783                 if (ret < 0)
784                         return ret;
785         }
786
787         if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
788                 /* discard all */
789                 lseek(fd, offset, SEEK_SET);
790                 return -1;
791         }
792
793         return 0;
794 }
795
796 /*
797  * File format:
798  *
799  * struct group_descs {
800  *      u32     nr_groups;
801  *      struct group_desc {
802  *              char    name[];
803  *              u32     leader_idx;
804  *              u32     nr_members;
805  *      }[nr_groups];
806  * };
807  */
808 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
809                             struct perf_evlist *evlist)
810 {
811         u32 nr_groups = evlist->nr_groups;
812         struct perf_evsel *evsel;
813         int ret;
814
815         ret = do_write(fd, &nr_groups, sizeof(nr_groups));
816         if (ret < 0)
817                 return ret;
818
819         evlist__for_each_entry(evlist, evsel) {
820                 if (perf_evsel__is_group_leader(evsel) &&
821                     evsel->nr_members > 1) {
822                         const char *name = evsel->group_name ?: "{anon_group}";
823                         u32 leader_idx = evsel->idx;
824                         u32 nr_members = evsel->nr_members;
825
826                         ret = do_write_string(fd, name);
827                         if (ret < 0)
828                                 return ret;
829
830                         ret = do_write(fd, &leader_idx, sizeof(leader_idx));
831                         if (ret < 0)
832                                 return ret;
833
834                         ret = do_write(fd, &nr_members, sizeof(nr_members));
835                         if (ret < 0)
836                                 return ret;
837                 }
838         }
839         return 0;
840 }
841
842 /*
843  * default get_cpuid(): nothing gets recorded
844  * actual implementation must be in arch/$(ARCH)/util/header.c
845  */
846 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
847 {
848         return -1;
849 }
850
851 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
852                        struct perf_evlist *evlist __maybe_unused)
853 {
854         char buffer[64];
855         int ret;
856
857         ret = get_cpuid(buffer, sizeof(buffer));
858         if (!ret)
859                 goto write_it;
860
861         return -1;
862 write_it:
863         return do_write_string(fd, buffer);
864 }
865
866 static int write_branch_stack(int fd __maybe_unused,
867                               struct perf_header *h __maybe_unused,
868                        struct perf_evlist *evlist __maybe_unused)
869 {
870         return 0;
871 }
872
873 static int write_auxtrace(int fd, struct perf_header *h,
874                           struct perf_evlist *evlist __maybe_unused)
875 {
876         struct perf_session *session;
877         int err;
878
879         session = container_of(h, struct perf_session, header);
880
881         err = auxtrace_index__write(fd, &session->auxtrace_index);
882         if (err < 0)
883                 pr_err("Failed to write auxtrace index\n");
884         return err;
885 }
886
887 static int cpu_cache_level__sort(const void *a, const void *b)
888 {
889         struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
890         struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
891
892         return cache_a->level - cache_b->level;
893 }
894
895 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
896 {
897         if (a->level != b->level)
898                 return false;
899
900         if (a->line_size != b->line_size)
901                 return false;
902
903         if (a->sets != b->sets)
904                 return false;
905
906         if (a->ways != b->ways)
907                 return false;
908
909         if (strcmp(a->type, b->type))
910                 return false;
911
912         if (strcmp(a->size, b->size))
913                 return false;
914
915         if (strcmp(a->map, b->map))
916                 return false;
917
918         return true;
919 }
920
921 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
922 {
923         char path[PATH_MAX], file[PATH_MAX];
924         struct stat st;
925         size_t len;
926
927         scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
928         scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
929
930         if (stat(file, &st))
931                 return 1;
932
933         scnprintf(file, PATH_MAX, "%s/level", path);
934         if (sysfs__read_int(file, (int *) &cache->level))
935                 return -1;
936
937         scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
938         if (sysfs__read_int(file, (int *) &cache->line_size))
939                 return -1;
940
941         scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
942         if (sysfs__read_int(file, (int *) &cache->sets))
943                 return -1;
944
945         scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
946         if (sysfs__read_int(file, (int *) &cache->ways))
947                 return -1;
948
949         scnprintf(file, PATH_MAX, "%s/type", path);
950         if (sysfs__read_str(file, &cache->type, &len))
951                 return -1;
952
953         cache->type[len] = 0;
954         cache->type = rtrim(cache->type);
955
956         scnprintf(file, PATH_MAX, "%s/size", path);
957         if (sysfs__read_str(file, &cache->size, &len)) {
958                 free(cache->type);
959                 return -1;
960         }
961
962         cache->size[len] = 0;
963         cache->size = rtrim(cache->size);
964
965         scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
966         if (sysfs__read_str(file, &cache->map, &len)) {
967                 free(cache->map);
968                 free(cache->type);
969                 return -1;
970         }
971
972         cache->map[len] = 0;
973         cache->map = rtrim(cache->map);
974         return 0;
975 }
976
977 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
978 {
979         fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
980 }
981
982 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
983 {
984         u32 i, cnt = 0;
985         long ncpus;
986         u32 nr, cpu;
987         u16 level;
988
989         ncpus = sysconf(_SC_NPROCESSORS_CONF);
990         if (ncpus < 0)
991                 return -1;
992
993         nr = (u32)(ncpus & UINT_MAX);
994
995         for (cpu = 0; cpu < nr; cpu++) {
996                 for (level = 0; level < 10; level++) {
997                         struct cpu_cache_level c;
998                         int err;
999
1000                         err = cpu_cache_level__read(&c, cpu, level);
1001                         if (err < 0)
1002                                 return err;
1003
1004                         if (err == 1)
1005                                 break;
1006
1007                         for (i = 0; i < cnt; i++) {
1008                                 if (cpu_cache_level__cmp(&c, &caches[i]))
1009                                         break;
1010                         }
1011
1012                         if (i == cnt)
1013                                 caches[cnt++] = c;
1014                         else
1015                                 cpu_cache_level__free(&c);
1016
1017                         if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1018                                 goto out;
1019                 }
1020         }
1021  out:
1022         *cntp = cnt;
1023         return 0;
1024 }
1025
1026 #define MAX_CACHES 2000
1027
1028 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1029                           struct perf_evlist *evlist __maybe_unused)
1030 {
1031         struct cpu_cache_level caches[MAX_CACHES];
1032         u32 cnt = 0, i, version = 1;
1033         int ret;
1034
1035         ret = build_caches(caches, MAX_CACHES, &cnt);
1036         if (ret)
1037                 goto out;
1038
1039         qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1040
1041         ret = do_write(fd, &version, sizeof(u32));
1042         if (ret < 0)
1043                 goto out;
1044
1045         ret = do_write(fd, &cnt, sizeof(u32));
1046         if (ret < 0)
1047                 goto out;
1048
1049         for (i = 0; i < cnt; i++) {
1050                 struct cpu_cache_level *c = &caches[i];
1051
1052                 #define _W(v)                                   \
1053                         ret = do_write(fd, &c->v, sizeof(u32)); \
1054                         if (ret < 0)                            \
1055                                 goto out;
1056
1057                 _W(level)
1058                 _W(line_size)
1059                 _W(sets)
1060                 _W(ways)
1061                 #undef _W
1062
1063                 #define _W(v)                                           \
1064                         ret = do_write_string(fd, (const char *) c->v); \
1065                         if (ret < 0)                                    \
1066                                 goto out;
1067
1068                 _W(type)
1069                 _W(size)
1070                 _W(map)
1071                 #undef _W
1072         }
1073
1074 out:
1075         for (i = 0; i < cnt; i++)
1076                 cpu_cache_level__free(&caches[i]);
1077         return ret;
1078 }
1079
1080 static int write_stat(int fd __maybe_unused,
1081                       struct perf_header *h __maybe_unused,
1082                       struct perf_evlist *evlist __maybe_unused)
1083 {
1084         return 0;
1085 }
1086
1087 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1088                            FILE *fp)
1089 {
1090         fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1091 }
1092
1093 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1094                             FILE *fp)
1095 {
1096         fprintf(fp, "# os release : %s\n", ph->env.os_release);
1097 }
1098
1099 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1100 {
1101         fprintf(fp, "# arch : %s\n", ph->env.arch);
1102 }
1103
1104 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1105                           FILE *fp)
1106 {
1107         fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1108 }
1109
1110 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1111                          FILE *fp)
1112 {
1113         fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1114         fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1115 }
1116
1117 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1118                           FILE *fp)
1119 {
1120         fprintf(fp, "# perf version : %s\n", ph->env.version);
1121 }
1122
1123 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1124                           FILE *fp)
1125 {
1126         int nr, i;
1127
1128         nr = ph->env.nr_cmdline;
1129
1130         fprintf(fp, "# cmdline : ");
1131
1132         for (i = 0; i < nr; i++)
1133                 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1134         fputc('\n', fp);
1135 }
1136
1137 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1138                                FILE *fp)
1139 {
1140         int nr, i;
1141         char *str;
1142         int cpu_nr = ph->env.nr_cpus_avail;
1143
1144         nr = ph->env.nr_sibling_cores;
1145         str = ph->env.sibling_cores;
1146
1147         for (i = 0; i < nr; i++) {
1148                 fprintf(fp, "# sibling cores   : %s\n", str);
1149                 str += strlen(str) + 1;
1150         }
1151
1152         nr = ph->env.nr_sibling_threads;
1153         str = ph->env.sibling_threads;
1154
1155         for (i = 0; i < nr; i++) {
1156                 fprintf(fp, "# sibling threads : %s\n", str);
1157                 str += strlen(str) + 1;
1158         }
1159
1160         if (ph->env.cpu != NULL) {
1161                 for (i = 0; i < cpu_nr; i++)
1162                         fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1163                                 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1164         } else
1165                 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1166 }
1167
1168 static void free_event_desc(struct perf_evsel *events)
1169 {
1170         struct perf_evsel *evsel;
1171
1172         if (!events)
1173                 return;
1174
1175         for (evsel = events; evsel->attr.size; evsel++) {
1176                 zfree(&evsel->name);
1177                 zfree(&evsel->id);
1178         }
1179
1180         free(events);
1181 }
1182
1183 static struct perf_evsel *
1184 read_event_desc(struct perf_header *ph, int fd)
1185 {
1186         struct perf_evsel *evsel, *events = NULL;
1187         u64 *id;
1188         void *buf = NULL;
1189         u32 nre, sz, nr, i, j;
1190         ssize_t ret;
1191         size_t msz;
1192
1193         /* number of events */
1194         ret = readn(fd, &nre, sizeof(nre));
1195         if (ret != (ssize_t)sizeof(nre))
1196                 goto error;
1197
1198         if (ph->needs_swap)
1199                 nre = bswap_32(nre);
1200
1201         ret = readn(fd, &sz, sizeof(sz));
1202         if (ret != (ssize_t)sizeof(sz))
1203                 goto error;
1204
1205         if (ph->needs_swap)
1206                 sz = bswap_32(sz);
1207
1208         /* buffer to hold on file attr struct */
1209         buf = malloc(sz);
1210         if (!buf)
1211                 goto error;
1212
1213         /* the last event terminates with evsel->attr.size == 0: */
1214         events = calloc(nre + 1, sizeof(*events));
1215         if (!events)
1216                 goto error;
1217
1218         msz = sizeof(evsel->attr);
1219         if (sz < msz)
1220                 msz = sz;
1221
1222         for (i = 0, evsel = events; i < nre; evsel++, i++) {
1223                 evsel->idx = i;
1224
1225                 /*
1226                  * must read entire on-file attr struct to
1227                  * sync up with layout.
1228                  */
1229                 ret = readn(fd, buf, sz);
1230                 if (ret != (ssize_t)sz)
1231                         goto error;
1232
1233                 if (ph->needs_swap)
1234                         perf_event__attr_swap(buf);
1235
1236                 memcpy(&evsel->attr, buf, msz);
1237
1238                 ret = readn(fd, &nr, sizeof(nr));
1239                 if (ret != (ssize_t)sizeof(nr))
1240                         goto error;
1241
1242                 if (ph->needs_swap) {
1243                         nr = bswap_32(nr);
1244                         evsel->needs_swap = true;
1245                 }
1246
1247                 evsel->name = do_read_string(fd, ph);
1248
1249                 if (!nr)
1250                         continue;
1251
1252                 id = calloc(nr, sizeof(*id));
1253                 if (!id)
1254                         goto error;
1255                 evsel->ids = nr;
1256                 evsel->id = id;
1257
1258                 for (j = 0 ; j < nr; j++) {
1259                         ret = readn(fd, id, sizeof(*id));
1260                         if (ret != (ssize_t)sizeof(*id))
1261                                 goto error;
1262                         if (ph->needs_swap)
1263                                 *id = bswap_64(*id);
1264                         id++;
1265                 }
1266         }
1267 out:
1268         free(buf);
1269         return events;
1270 error:
1271         free_event_desc(events);
1272         events = NULL;
1273         goto out;
1274 }
1275
1276 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1277                                 void *priv __attribute__((unused)))
1278 {
1279         return fprintf(fp, ", %s = %s", name, val);
1280 }
1281
1282 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1283 {
1284         struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1285         u32 j;
1286         u64 *id;
1287
1288         if (!events) {
1289                 fprintf(fp, "# event desc: not available or unable to read\n");
1290                 return;
1291         }
1292
1293         for (evsel = events; evsel->attr.size; evsel++) {
1294                 fprintf(fp, "# event : name = %s, ", evsel->name);
1295
1296                 if (evsel->ids) {
1297                         fprintf(fp, ", id = {");
1298                         for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1299                                 if (j)
1300                                         fputc(',', fp);
1301                                 fprintf(fp, " %"PRIu64, *id);
1302                         }
1303                         fprintf(fp, " }");
1304                 }
1305
1306                 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1307
1308                 fputc('\n', fp);
1309         }
1310
1311         free_event_desc(events);
1312 }
1313
1314 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1315                             FILE *fp)
1316 {
1317         fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1318 }
1319
1320 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1321                                 FILE *fp)
1322 {
1323         int i;
1324         struct numa_node *n;
1325
1326         for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1327                 n = &ph->env.numa_nodes[i];
1328
1329                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1330                             " free = %"PRIu64" kB\n",
1331                         n->node, n->mem_total, n->mem_free);
1332
1333                 fprintf(fp, "# node%u cpu list : ", n->node);
1334                 cpu_map__fprintf(n->map, fp);
1335         }
1336 }
1337
1338 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1339 {
1340         fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1341 }
1342
1343 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1344                                int fd __maybe_unused, FILE *fp)
1345 {
1346         fprintf(fp, "# contains samples with branch stack\n");
1347 }
1348
1349 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1350                            int fd __maybe_unused, FILE *fp)
1351 {
1352         fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1353 }
1354
1355 static void print_stat(struct perf_header *ph __maybe_unused,
1356                        int fd __maybe_unused, FILE *fp)
1357 {
1358         fprintf(fp, "# contains stat data\n");
1359 }
1360
1361 static void print_cache(struct perf_header *ph __maybe_unused,
1362                         int fd __maybe_unused, FILE *fp __maybe_unused)
1363 {
1364         int i;
1365
1366         fprintf(fp, "# CPU cache info:\n");
1367         for (i = 0; i < ph->env.caches_cnt; i++) {
1368                 fprintf(fp, "#  ");
1369                 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1370         }
1371 }
1372
1373 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1374                                FILE *fp)
1375 {
1376         const char *delimiter = "# pmu mappings: ";
1377         char *str, *tmp;
1378         u32 pmu_num;
1379         u32 type;
1380
1381         pmu_num = ph->env.nr_pmu_mappings;
1382         if (!pmu_num) {
1383                 fprintf(fp, "# pmu mappings: not available\n");
1384                 return;
1385         }
1386
1387         str = ph->env.pmu_mappings;
1388
1389         while (pmu_num) {
1390                 type = strtoul(str, &tmp, 0);
1391                 if (*tmp != ':')
1392                         goto error;
1393
1394                 str = tmp + 1;
1395                 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1396
1397                 delimiter = ", ";
1398                 str += strlen(str) + 1;
1399                 pmu_num--;
1400         }
1401
1402         fprintf(fp, "\n");
1403
1404         if (!pmu_num)
1405                 return;
1406 error:
1407         fprintf(fp, "# pmu mappings: unable to read\n");
1408 }
1409
1410 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1411                              FILE *fp)
1412 {
1413         struct perf_session *session;
1414         struct perf_evsel *evsel;
1415         u32 nr = 0;
1416
1417         session = container_of(ph, struct perf_session, header);
1418
1419         evlist__for_each_entry(session->evlist, evsel) {
1420                 if (perf_evsel__is_group_leader(evsel) &&
1421                     evsel->nr_members > 1) {
1422                         fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1423                                 perf_evsel__name(evsel));
1424
1425                         nr = evsel->nr_members - 1;
1426                 } else if (nr) {
1427                         fprintf(fp, ",%s", perf_evsel__name(evsel));
1428
1429                         if (--nr == 0)
1430                                 fprintf(fp, "}\n");
1431                 }
1432         }
1433 }
1434
1435 static int __event_process_build_id(struct build_id_event *bev,
1436                                     char *filename,
1437                                     struct perf_session *session)
1438 {
1439         int err = -1;
1440         struct machine *machine;
1441         u16 cpumode;
1442         struct dso *dso;
1443         enum dso_kernel_type dso_type;
1444
1445         machine = perf_session__findnew_machine(session, bev->pid);
1446         if (!machine)
1447                 goto out;
1448
1449         cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1450
1451         switch (cpumode) {
1452         case PERF_RECORD_MISC_KERNEL:
1453                 dso_type = DSO_TYPE_KERNEL;
1454                 break;
1455         case PERF_RECORD_MISC_GUEST_KERNEL:
1456                 dso_type = DSO_TYPE_GUEST_KERNEL;
1457                 break;
1458         case PERF_RECORD_MISC_USER:
1459         case PERF_RECORD_MISC_GUEST_USER:
1460                 dso_type = DSO_TYPE_USER;
1461                 break;
1462         default:
1463                 goto out;
1464         }
1465
1466         dso = machine__findnew_dso(machine, filename);
1467         if (dso != NULL) {
1468                 char sbuild_id[SBUILD_ID_SIZE];
1469
1470                 dso__set_build_id(dso, &bev->build_id);
1471
1472                 if (!is_kernel_module(filename, cpumode))
1473                         dso->kernel = dso_type;
1474
1475                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1476                                   sbuild_id);
1477                 pr_debug("build id event received for %s: %s\n",
1478                          dso->long_name, sbuild_id);
1479                 dso__put(dso);
1480         }
1481
1482         err = 0;
1483 out:
1484         return err;
1485 }
1486
1487 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1488                                                  int input, u64 offset, u64 size)
1489 {
1490         struct perf_session *session = container_of(header, struct perf_session, header);
1491         struct {
1492                 struct perf_event_header   header;
1493                 u8                         build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1494                 char                       filename[0];
1495         } old_bev;
1496         struct build_id_event bev;
1497         char filename[PATH_MAX];
1498         u64 limit = offset + size;
1499
1500         while (offset < limit) {
1501                 ssize_t len;
1502
1503                 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1504                         return -1;
1505
1506                 if (header->needs_swap)
1507                         perf_event_header__bswap(&old_bev.header);
1508
1509                 len = old_bev.header.size - sizeof(old_bev);
1510                 if (readn(input, filename, len) != len)
1511                         return -1;
1512
1513                 bev.header = old_bev.header;
1514
1515                 /*
1516                  * As the pid is the missing value, we need to fill
1517                  * it properly. The header.misc value give us nice hint.
1518                  */
1519                 bev.pid = HOST_KERNEL_ID;
1520                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1521                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1522                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1523
1524                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1525                 __event_process_build_id(&bev, filename, session);
1526
1527                 offset += bev.header.size;
1528         }
1529
1530         return 0;
1531 }
1532
1533 static int perf_header__read_build_ids(struct perf_header *header,
1534                                        int input, u64 offset, u64 size)
1535 {
1536         struct perf_session *session = container_of(header, struct perf_session, header);
1537         struct build_id_event bev;
1538         char filename[PATH_MAX];
1539         u64 limit = offset + size, orig_offset = offset;
1540         int err = -1;
1541
1542         while (offset < limit) {
1543                 ssize_t len;
1544
1545                 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1546                         goto out;
1547
1548                 if (header->needs_swap)
1549                         perf_event_header__bswap(&bev.header);
1550
1551                 len = bev.header.size - sizeof(bev);
1552                 if (readn(input, filename, len) != len)
1553                         goto out;
1554                 /*
1555                  * The a1645ce1 changeset:
1556                  *
1557                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
1558                  *
1559                  * Added a field to struct build_id_event that broke the file
1560                  * format.
1561                  *
1562                  * Since the kernel build-id is the first entry, process the
1563                  * table using the old format if the well known
1564                  * '[kernel.kallsyms]' string for the kernel build-id has the
1565                  * first 4 characters chopped off (where the pid_t sits).
1566                  */
1567                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1568                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1569                                 return -1;
1570                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1571                 }
1572
1573                 __event_process_build_id(&bev, filename, session);
1574
1575                 offset += bev.header.size;
1576         }
1577         err = 0;
1578 out:
1579         return err;
1580 }
1581
1582 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1583                                 struct perf_header *ph __maybe_unused,
1584                                 int fd, void *data)
1585 {
1586         ssize_t ret = trace_report(fd, data, false);
1587         return ret < 0 ? -1 : 0;
1588 }
1589
1590 static int process_build_id(struct perf_file_section *section,
1591                             struct perf_header *ph, int fd,
1592                             void *data __maybe_unused)
1593 {
1594         if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1595                 pr_debug("Failed to read buildids, continuing...\n");
1596         return 0;
1597 }
1598
1599 static int process_hostname(struct perf_file_section *section __maybe_unused,
1600                             struct perf_header *ph, int fd,
1601                             void *data __maybe_unused)
1602 {
1603         ph->env.hostname = do_read_string(fd, ph);
1604         return ph->env.hostname ? 0 : -ENOMEM;
1605 }
1606
1607 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1608                              struct perf_header *ph, int fd,
1609                              void *data __maybe_unused)
1610 {
1611         ph->env.os_release = do_read_string(fd, ph);
1612         return ph->env.os_release ? 0 : -ENOMEM;
1613 }
1614
1615 static int process_version(struct perf_file_section *section __maybe_unused,
1616                            struct perf_header *ph, int fd,
1617                            void *data __maybe_unused)
1618 {
1619         ph->env.version = do_read_string(fd, ph);
1620         return ph->env.version ? 0 : -ENOMEM;
1621 }
1622
1623 static int process_arch(struct perf_file_section *section __maybe_unused,
1624                         struct perf_header *ph, int fd,
1625                         void *data __maybe_unused)
1626 {
1627         ph->env.arch = do_read_string(fd, ph);
1628         return ph->env.arch ? 0 : -ENOMEM;
1629 }
1630
1631 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1632                           struct perf_header *ph, int fd,
1633                           void *data __maybe_unused)
1634 {
1635         ssize_t ret;
1636         u32 nr;
1637
1638         ret = readn(fd, &nr, sizeof(nr));
1639         if (ret != sizeof(nr))
1640                 return -1;
1641
1642         if (ph->needs_swap)
1643                 nr = bswap_32(nr);
1644
1645         ph->env.nr_cpus_avail = nr;
1646
1647         ret = readn(fd, &nr, sizeof(nr));
1648         if (ret != sizeof(nr))
1649                 return -1;
1650
1651         if (ph->needs_swap)
1652                 nr = bswap_32(nr);
1653
1654         ph->env.nr_cpus_online = nr;
1655         return 0;
1656 }
1657
1658 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1659                            struct perf_header *ph, int fd,
1660                            void *data __maybe_unused)
1661 {
1662         ph->env.cpu_desc = do_read_string(fd, ph);
1663         return ph->env.cpu_desc ? 0 : -ENOMEM;
1664 }
1665
1666 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1667                          struct perf_header *ph,  int fd,
1668                          void *data __maybe_unused)
1669 {
1670         ph->env.cpuid = do_read_string(fd, ph);
1671         return ph->env.cpuid ? 0 : -ENOMEM;
1672 }
1673
1674 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1675                              struct perf_header *ph, int fd,
1676                              void *data __maybe_unused)
1677 {
1678         uint64_t mem;
1679         ssize_t ret;
1680
1681         ret = readn(fd, &mem, sizeof(mem));
1682         if (ret != sizeof(mem))
1683                 return -1;
1684
1685         if (ph->needs_swap)
1686                 mem = bswap_64(mem);
1687
1688         ph->env.total_mem = mem;
1689         return 0;
1690 }
1691
1692 static struct perf_evsel *
1693 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1694 {
1695         struct perf_evsel *evsel;
1696
1697         evlist__for_each_entry(evlist, evsel) {
1698                 if (evsel->idx == idx)
1699                         return evsel;
1700         }
1701
1702         return NULL;
1703 }
1704
1705 static void
1706 perf_evlist__set_event_name(struct perf_evlist *evlist,
1707                             struct perf_evsel *event)
1708 {
1709         struct perf_evsel *evsel;
1710
1711         if (!event->name)
1712                 return;
1713
1714         evsel = perf_evlist__find_by_index(evlist, event->idx);
1715         if (!evsel)
1716                 return;
1717
1718         if (evsel->name)
1719                 return;
1720
1721         evsel->name = strdup(event->name);
1722 }
1723
1724 static int
1725 process_event_desc(struct perf_file_section *section __maybe_unused,
1726                    struct perf_header *header, int fd,
1727                    void *data __maybe_unused)
1728 {
1729         struct perf_session *session;
1730         struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1731
1732         if (!events)
1733                 return 0;
1734
1735         session = container_of(header, struct perf_session, header);
1736         for (evsel = events; evsel->attr.size; evsel++)
1737                 perf_evlist__set_event_name(session->evlist, evsel);
1738
1739         free_event_desc(events);
1740
1741         return 0;
1742 }
1743
1744 static int process_cmdline(struct perf_file_section *section,
1745                            struct perf_header *ph, int fd,
1746                            void *data __maybe_unused)
1747 {
1748         ssize_t ret;
1749         char *str, *cmdline = NULL, **argv = NULL;
1750         u32 nr, i, len = 0;
1751
1752         ret = readn(fd, &nr, sizeof(nr));
1753         if (ret != sizeof(nr))
1754                 return -1;
1755
1756         if (ph->needs_swap)
1757                 nr = bswap_32(nr);
1758
1759         ph->env.nr_cmdline = nr;
1760
1761         cmdline = zalloc(section->size + nr + 1);
1762         if (!cmdline)
1763                 return -1;
1764
1765         argv = zalloc(sizeof(char *) * (nr + 1));
1766         if (!argv)
1767                 goto error;
1768
1769         for (i = 0; i < nr; i++) {
1770                 str = do_read_string(fd, ph);
1771                 if (!str)
1772                         goto error;
1773
1774                 argv[i] = cmdline + len;
1775                 memcpy(argv[i], str, strlen(str) + 1);
1776                 len += strlen(str) + 1;
1777                 free(str);
1778         }
1779         ph->env.cmdline = cmdline;
1780         ph->env.cmdline_argv = (const char **) argv;
1781         return 0;
1782
1783 error:
1784         free(argv);
1785         free(cmdline);
1786         return -1;
1787 }
1788
1789 static int process_cpu_topology(struct perf_file_section *section,
1790                                 struct perf_header *ph, int fd,
1791                                 void *data __maybe_unused)
1792 {
1793         ssize_t ret;
1794         u32 nr, i;
1795         char *str;
1796         struct strbuf sb;
1797         int cpu_nr = ph->env.nr_cpus_avail;
1798         u64 size = 0;
1799
1800         ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1801         if (!ph->env.cpu)
1802                 return -1;
1803
1804         ret = readn(fd, &nr, sizeof(nr));
1805         if (ret != sizeof(nr))
1806                 goto free_cpu;
1807
1808         if (ph->needs_swap)
1809                 nr = bswap_32(nr);
1810
1811         ph->env.nr_sibling_cores = nr;
1812         size += sizeof(u32);
1813         if (strbuf_init(&sb, 128) < 0)
1814                 goto free_cpu;
1815
1816         for (i = 0; i < nr; i++) {
1817                 str = do_read_string(fd, ph);
1818                 if (!str)
1819                         goto error;
1820
1821                 /* include a NULL character at the end */
1822                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1823                         goto error;
1824                 size += string_size(str);
1825                 free(str);
1826         }
1827         ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1828
1829         ret = readn(fd, &nr, sizeof(nr));
1830         if (ret != sizeof(nr))
1831                 return -1;
1832
1833         if (ph->needs_swap)
1834                 nr = bswap_32(nr);
1835
1836         ph->env.nr_sibling_threads = nr;
1837         size += sizeof(u32);
1838
1839         for (i = 0; i < nr; i++) {
1840                 str = do_read_string(fd, ph);
1841                 if (!str)
1842                         goto error;
1843
1844                 /* include a NULL character at the end */
1845                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1846                         goto error;
1847                 size += string_size(str);
1848                 free(str);
1849         }
1850         ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1851
1852         /*
1853          * The header may be from old perf,
1854          * which doesn't include core id and socket id information.
1855          */
1856         if (section->size <= size) {
1857                 zfree(&ph->env.cpu);
1858                 return 0;
1859         }
1860
1861         for (i = 0; i < (u32)cpu_nr; i++) {
1862                 ret = readn(fd, &nr, sizeof(nr));
1863                 if (ret != sizeof(nr))
1864                         goto free_cpu;
1865
1866                 if (ph->needs_swap)
1867                         nr = bswap_32(nr);
1868
1869                 ph->env.cpu[i].core_id = nr;
1870
1871                 ret = readn(fd, &nr, sizeof(nr));
1872                 if (ret != sizeof(nr))
1873                         goto free_cpu;
1874
1875                 if (ph->needs_swap)
1876                         nr = bswap_32(nr);
1877
1878                 if (nr != (u32)-1 && nr > (u32)cpu_nr) {
1879                         pr_debug("socket_id number is too big."
1880                                  "You may need to upgrade the perf tool.\n");
1881                         goto free_cpu;
1882                 }
1883
1884                 ph->env.cpu[i].socket_id = nr;
1885         }
1886
1887         return 0;
1888
1889 error:
1890         strbuf_release(&sb);
1891 free_cpu:
1892         zfree(&ph->env.cpu);
1893         return -1;
1894 }
1895
1896 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1897                                  struct perf_header *ph, int fd,
1898                                  void *data __maybe_unused)
1899 {
1900         struct numa_node *nodes, *n;
1901         ssize_t ret;
1902         u32 nr, i;
1903         char *str;
1904
1905         /* nr nodes */
1906         ret = readn(fd, &nr, sizeof(nr));
1907         if (ret != sizeof(nr))
1908                 return -1;
1909
1910         if (ph->needs_swap)
1911                 nr = bswap_32(nr);
1912
1913         nodes = zalloc(sizeof(*nodes) * nr);
1914         if (!nodes)
1915                 return -ENOMEM;
1916
1917         for (i = 0; i < nr; i++) {
1918                 n = &nodes[i];
1919
1920                 /* node number */
1921                 ret = readn(fd, &n->node, sizeof(u32));
1922                 if (ret != sizeof(n->node))
1923                         goto error;
1924
1925                 ret = readn(fd, &n->mem_total, sizeof(u64));
1926                 if (ret != sizeof(u64))
1927                         goto error;
1928
1929                 ret = readn(fd, &n->mem_free, sizeof(u64));
1930                 if (ret != sizeof(u64))
1931                         goto error;
1932
1933                 if (ph->needs_swap) {
1934                         n->node      = bswap_32(n->node);
1935                         n->mem_total = bswap_64(n->mem_total);
1936                         n->mem_free  = bswap_64(n->mem_free);
1937                 }
1938
1939                 str = do_read_string(fd, ph);
1940                 if (!str)
1941                         goto error;
1942
1943                 n->map = cpu_map__new(str);
1944                 if (!n->map)
1945                         goto error;
1946
1947                 free(str);
1948         }
1949         ph->env.nr_numa_nodes = nr;
1950         ph->env.numa_nodes = nodes;
1951         return 0;
1952
1953 error:
1954         free(nodes);
1955         return -1;
1956 }
1957
1958 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1959                                 struct perf_header *ph, int fd,
1960                                 void *data __maybe_unused)
1961 {
1962         ssize_t ret;
1963         char *name;
1964         u32 pmu_num;
1965         u32 type;
1966         struct strbuf sb;
1967
1968         ret = readn(fd, &pmu_num, sizeof(pmu_num));
1969         if (ret != sizeof(pmu_num))
1970                 return -1;
1971
1972         if (ph->needs_swap)
1973                 pmu_num = bswap_32(pmu_num);
1974
1975         if (!pmu_num) {
1976                 pr_debug("pmu mappings not available\n");
1977                 return 0;
1978         }
1979
1980         ph->env.nr_pmu_mappings = pmu_num;
1981         if (strbuf_init(&sb, 128) < 0)
1982                 return -1;
1983
1984         while (pmu_num) {
1985                 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1986                         goto error;
1987                 if (ph->needs_swap)
1988                         type = bswap_32(type);
1989
1990                 name = do_read_string(fd, ph);
1991                 if (!name)
1992                         goto error;
1993
1994                 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1995                         goto error;
1996                 /* include a NULL character at the end */
1997                 if (strbuf_add(&sb, "", 1) < 0)
1998                         goto error;
1999
2000                 if (!strcmp(name, "msr"))
2001                         ph->env.msr_pmu_type = type;
2002
2003                 free(name);
2004                 pmu_num--;
2005         }
2006         ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2007         return 0;
2008
2009 error:
2010         strbuf_release(&sb);
2011         return -1;
2012 }
2013
2014 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2015                               struct perf_header *ph, int fd,
2016                               void *data __maybe_unused)
2017 {
2018         size_t ret = -1;
2019         u32 i, nr, nr_groups;
2020         struct perf_session *session;
2021         struct perf_evsel *evsel, *leader = NULL;
2022         struct group_desc {
2023                 char *name;
2024                 u32 leader_idx;
2025                 u32 nr_members;
2026         } *desc;
2027
2028         if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2029                 return -1;
2030
2031         if (ph->needs_swap)
2032                 nr_groups = bswap_32(nr_groups);
2033
2034         ph->env.nr_groups = nr_groups;
2035         if (!nr_groups) {
2036                 pr_debug("group desc not available\n");
2037                 return 0;
2038         }
2039
2040         desc = calloc(nr_groups, sizeof(*desc));
2041         if (!desc)
2042                 return -1;
2043
2044         for (i = 0; i < nr_groups; i++) {
2045                 desc[i].name = do_read_string(fd, ph);
2046                 if (!desc[i].name)
2047                         goto out_free;
2048
2049                 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2050                         goto out_free;
2051
2052                 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2053                         goto out_free;
2054
2055                 if (ph->needs_swap) {
2056                         desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2057                         desc[i].nr_members = bswap_32(desc[i].nr_members);
2058                 }
2059         }
2060
2061         /*
2062          * Rebuild group relationship based on the group_desc
2063          */
2064         session = container_of(ph, struct perf_session, header);
2065         session->evlist->nr_groups = nr_groups;
2066
2067         i = nr = 0;
2068         evlist__for_each_entry(session->evlist, evsel) {
2069                 if (evsel->idx == (int) desc[i].leader_idx) {
2070                         evsel->leader = evsel;
2071                         /* {anon_group} is a dummy name */
2072                         if (strcmp(desc[i].name, "{anon_group}")) {
2073                                 evsel->group_name = desc[i].name;
2074                                 desc[i].name = NULL;
2075                         }
2076                         evsel->nr_members = desc[i].nr_members;
2077
2078                         if (i >= nr_groups || nr > 0) {
2079                                 pr_debug("invalid group desc\n");
2080                                 goto out_free;
2081                         }
2082
2083                         leader = evsel;
2084                         nr = evsel->nr_members - 1;
2085                         i++;
2086                 } else if (nr) {
2087                         /* This is a group member */
2088                         evsel->leader = leader;
2089
2090                         nr--;
2091                 }
2092         }
2093
2094         if (i != nr_groups || nr != 0) {
2095                 pr_debug("invalid group desc\n");
2096                 goto out_free;
2097         }
2098
2099         ret = 0;
2100 out_free:
2101         for (i = 0; i < nr_groups; i++)
2102                 zfree(&desc[i].name);
2103         free(desc);
2104
2105         return ret;
2106 }
2107
2108 static int process_auxtrace(struct perf_file_section *section,
2109                             struct perf_header *ph, int fd,
2110                             void *data __maybe_unused)
2111 {
2112         struct perf_session *session;
2113         int err;
2114
2115         session = container_of(ph, struct perf_session, header);
2116
2117         err = auxtrace_index__process(fd, section->size, session,
2118                                       ph->needs_swap);
2119         if (err < 0)
2120                 pr_err("Failed to process auxtrace index\n");
2121         return err;
2122 }
2123
2124 static int process_cache(struct perf_file_section *section __maybe_unused,
2125                          struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2126                          void *data __maybe_unused)
2127 {
2128         struct cpu_cache_level *caches;
2129         u32 cnt, i, version;
2130
2131         if (readn(fd, &version, sizeof(version)) != sizeof(version))
2132                 return -1;
2133
2134         if (ph->needs_swap)
2135                 version = bswap_32(version);
2136
2137         if (version != 1)
2138                 return -1;
2139
2140         if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2141                 return -1;
2142
2143         if (ph->needs_swap)
2144                 cnt = bswap_32(cnt);
2145
2146         caches = zalloc(sizeof(*caches) * cnt);
2147         if (!caches)
2148                 return -1;
2149
2150         for (i = 0; i < cnt; i++) {
2151                 struct cpu_cache_level c;
2152
2153                 #define _R(v)                                           \
2154                         if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2155                                 goto out_free_caches;                   \
2156                         if (ph->needs_swap)                             \
2157                                 c.v = bswap_32(c.v);                    \
2158
2159                 _R(level)
2160                 _R(line_size)
2161                 _R(sets)
2162                 _R(ways)
2163                 #undef _R
2164
2165                 #define _R(v)                           \
2166                         c.v = do_read_string(fd, ph);   \
2167                         if (!c.v)                       \
2168                                 goto out_free_caches;
2169
2170                 _R(type)
2171                 _R(size)
2172                 _R(map)
2173                 #undef _R
2174
2175                 caches[i] = c;
2176         }
2177
2178         ph->env.caches = caches;
2179         ph->env.caches_cnt = cnt;
2180         return 0;
2181 out_free_caches:
2182         free(caches);
2183         return -1;
2184 }
2185
2186 struct feature_ops {
2187         int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2188         void (*print)(struct perf_header *h, int fd, FILE *fp);
2189         int (*process)(struct perf_file_section *section,
2190                        struct perf_header *h, int fd, void *data);
2191         const char *name;
2192         bool full_only;
2193 };
2194
2195 #define FEAT_OPA(n, func) \
2196         [n] = { .name = #n, .write = write_##func, .print = print_##func }
2197 #define FEAT_OPP(n, func) \
2198         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2199                 .process = process_##func }
2200 #define FEAT_OPF(n, func) \
2201         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2202                 .process = process_##func, .full_only = true }
2203
2204 /* feature_ops not implemented: */
2205 #define print_tracing_data      NULL
2206 #define print_build_id          NULL
2207
2208 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2209         FEAT_OPP(HEADER_TRACING_DATA,   tracing_data),
2210         FEAT_OPP(HEADER_BUILD_ID,       build_id),
2211         FEAT_OPP(HEADER_HOSTNAME,       hostname),
2212         FEAT_OPP(HEADER_OSRELEASE,      osrelease),
2213         FEAT_OPP(HEADER_VERSION,        version),
2214         FEAT_OPP(HEADER_ARCH,           arch),
2215         FEAT_OPP(HEADER_NRCPUS,         nrcpus),
2216         FEAT_OPP(HEADER_CPUDESC,        cpudesc),
2217         FEAT_OPP(HEADER_CPUID,          cpuid),
2218         FEAT_OPP(HEADER_TOTAL_MEM,      total_mem),
2219         FEAT_OPP(HEADER_EVENT_DESC,     event_desc),
2220         FEAT_OPP(HEADER_CMDLINE,        cmdline),
2221         FEAT_OPF(HEADER_CPU_TOPOLOGY,   cpu_topology),
2222         FEAT_OPF(HEADER_NUMA_TOPOLOGY,  numa_topology),
2223         FEAT_OPA(HEADER_BRANCH_STACK,   branch_stack),
2224         FEAT_OPP(HEADER_PMU_MAPPINGS,   pmu_mappings),
2225         FEAT_OPP(HEADER_GROUP_DESC,     group_desc),
2226         FEAT_OPP(HEADER_AUXTRACE,       auxtrace),
2227         FEAT_OPA(HEADER_STAT,           stat),
2228         FEAT_OPF(HEADER_CACHE,          cache),
2229 };
2230
2231 struct header_print_data {
2232         FILE *fp;
2233         bool full; /* extended list of headers */
2234 };
2235
2236 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2237                                            struct perf_header *ph,
2238                                            int feat, int fd, void *data)
2239 {
2240         struct header_print_data *hd = data;
2241
2242         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2243                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2244                                 "%d, continuing...\n", section->offset, feat);
2245                 return 0;
2246         }
2247         if (feat >= HEADER_LAST_FEATURE) {
2248                 pr_warning("unknown feature %d\n", feat);
2249                 return 0;
2250         }
2251         if (!feat_ops[feat].print)
2252                 return 0;
2253
2254         if (!feat_ops[feat].full_only || hd->full)
2255                 feat_ops[feat].print(ph, fd, hd->fp);
2256         else
2257                 fprintf(hd->fp, "# %s info available, use -I to display\n",
2258                         feat_ops[feat].name);
2259
2260         return 0;
2261 }
2262
2263 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2264 {
2265         struct header_print_data hd;
2266         struct perf_header *header = &session->header;
2267         int fd = perf_data_file__fd(session->file);
2268         struct stat st;
2269         int ret, bit;
2270
2271         hd.fp = fp;
2272         hd.full = full;
2273
2274         ret = fstat(fd, &st);
2275         if (ret == -1)
2276                 return -1;
2277
2278         fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2279
2280         perf_header__process_sections(header, fd, &hd,
2281                                       perf_file_section__fprintf_info);
2282
2283         if (session->file->is_pipe)
2284                 return 0;
2285
2286         fprintf(fp, "# missing features: ");
2287         for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2288                 if (bit)
2289                         fprintf(fp, "%s ", feat_ops[bit].name);
2290         }
2291
2292         fprintf(fp, "\n");
2293         return 0;
2294 }
2295
2296 static int do_write_feat(int fd, struct perf_header *h, int type,
2297                          struct perf_file_section **p,
2298                          struct perf_evlist *evlist)
2299 {
2300         int err;
2301         int ret = 0;
2302
2303         if (perf_header__has_feat(h, type)) {
2304                 if (!feat_ops[type].write)
2305                         return -1;
2306
2307                 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2308
2309                 err = feat_ops[type].write(fd, h, evlist);
2310                 if (err < 0) {
2311                         pr_debug("failed to write feature %s\n", feat_ops[type].name);
2312
2313                         /* undo anything written */
2314                         lseek(fd, (*p)->offset, SEEK_SET);
2315
2316                         return -1;
2317                 }
2318                 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2319                 (*p)++;
2320         }
2321         return ret;
2322 }
2323
2324 static int perf_header__adds_write(struct perf_header *header,
2325                                    struct perf_evlist *evlist, int fd)
2326 {
2327         int nr_sections;
2328         struct perf_file_section *feat_sec, *p;
2329         int sec_size;
2330         u64 sec_start;
2331         int feat;
2332         int err;
2333
2334         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2335         if (!nr_sections)
2336                 return 0;
2337
2338         feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2339         if (feat_sec == NULL)
2340                 return -ENOMEM;
2341
2342         sec_size = sizeof(*feat_sec) * nr_sections;
2343
2344         sec_start = header->feat_offset;
2345         lseek(fd, sec_start + sec_size, SEEK_SET);
2346
2347         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2348                 if (do_write_feat(fd, header, feat, &p, evlist))
2349                         perf_header__clear_feat(header, feat);
2350         }
2351
2352         lseek(fd, sec_start, SEEK_SET);
2353         /*
2354          * may write more than needed due to dropped feature, but
2355          * this is okay, reader will skip the mising entries
2356          */
2357         err = do_write(fd, feat_sec, sec_size);
2358         if (err < 0)
2359                 pr_debug("failed to write feature section\n");
2360         free(feat_sec);
2361         return err;
2362 }
2363
2364 int perf_header__write_pipe(int fd)
2365 {
2366         struct perf_pipe_file_header f_header;
2367         int err;
2368
2369         f_header = (struct perf_pipe_file_header){
2370                 .magic     = PERF_MAGIC,
2371                 .size      = sizeof(f_header),
2372         };
2373
2374         err = do_write(fd, &f_header, sizeof(f_header));
2375         if (err < 0) {
2376                 pr_debug("failed to write perf pipe header\n");
2377                 return err;
2378         }
2379
2380         return 0;
2381 }
2382
2383 int perf_session__write_header(struct perf_session *session,
2384                                struct perf_evlist *evlist,
2385                                int fd, bool at_exit)
2386 {
2387         struct perf_file_header f_header;
2388         struct perf_file_attr   f_attr;
2389         struct perf_header *header = &session->header;
2390         struct perf_evsel *evsel;
2391         u64 attr_offset;
2392         int err;
2393
2394         lseek(fd, sizeof(f_header), SEEK_SET);
2395
2396         evlist__for_each_entry(session->evlist, evsel) {
2397                 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2398                 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2399                 if (err < 0) {
2400                         pr_debug("failed to write perf header\n");
2401                         return err;
2402                 }
2403         }
2404
2405         attr_offset = lseek(fd, 0, SEEK_CUR);
2406
2407         evlist__for_each_entry(evlist, evsel) {
2408                 f_attr = (struct perf_file_attr){
2409                         .attr = evsel->attr,
2410                         .ids  = {
2411                                 .offset = evsel->id_offset,
2412                                 .size   = evsel->ids * sizeof(u64),
2413                         }
2414                 };
2415                 err = do_write(fd, &f_attr, sizeof(f_attr));
2416                 if (err < 0) {
2417                         pr_debug("failed to write perf header attribute\n");
2418                         return err;
2419                 }
2420         }
2421
2422         if (!header->data_offset)
2423                 header->data_offset = lseek(fd, 0, SEEK_CUR);
2424         header->feat_offset = header->data_offset + header->data_size;
2425
2426         if (at_exit) {
2427                 err = perf_header__adds_write(header, evlist, fd);
2428                 if (err < 0)
2429                         return err;
2430         }
2431
2432         f_header = (struct perf_file_header){
2433                 .magic     = PERF_MAGIC,
2434                 .size      = sizeof(f_header),
2435                 .attr_size = sizeof(f_attr),
2436                 .attrs = {
2437                         .offset = attr_offset,
2438                         .size   = evlist->nr_entries * sizeof(f_attr),
2439                 },
2440                 .data = {
2441                         .offset = header->data_offset,
2442                         .size   = header->data_size,
2443                 },
2444                 /* event_types is ignored, store zeros */
2445         };
2446
2447         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2448
2449         lseek(fd, 0, SEEK_SET);
2450         err = do_write(fd, &f_header, sizeof(f_header));
2451         if (err < 0) {
2452                 pr_debug("failed to write perf header\n");
2453                 return err;
2454         }
2455         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2456
2457         return 0;
2458 }
2459
2460 static int perf_header__getbuffer64(struct perf_header *header,
2461                                     int fd, void *buf, size_t size)
2462 {
2463         if (readn(fd, buf, size) <= 0)
2464                 return -1;
2465
2466         if (header->needs_swap)
2467                 mem_bswap_64(buf, size);
2468
2469         return 0;
2470 }
2471
2472 int perf_header__process_sections(struct perf_header *header, int fd,
2473                                   void *data,
2474                                   int (*process)(struct perf_file_section *section,
2475                                                  struct perf_header *ph,
2476                                                  int feat, int fd, void *data))
2477 {
2478         struct perf_file_section *feat_sec, *sec;
2479         int nr_sections;
2480         int sec_size;
2481         int feat;
2482         int err;
2483
2484         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2485         if (!nr_sections)
2486                 return 0;
2487
2488         feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2489         if (!feat_sec)
2490                 return -1;
2491
2492         sec_size = sizeof(*feat_sec) * nr_sections;
2493
2494         lseek(fd, header->feat_offset, SEEK_SET);
2495
2496         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2497         if (err < 0)
2498                 goto out_free;
2499
2500         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2501                 err = process(sec++, header, feat, fd, data);
2502                 if (err < 0)
2503                         goto out_free;
2504         }
2505         err = 0;
2506 out_free:
2507         free(feat_sec);
2508         return err;
2509 }
2510
2511 static const int attr_file_abi_sizes[] = {
2512         [0] = PERF_ATTR_SIZE_VER0,
2513         [1] = PERF_ATTR_SIZE_VER1,
2514         [2] = PERF_ATTR_SIZE_VER2,
2515         [3] = PERF_ATTR_SIZE_VER3,
2516         [4] = PERF_ATTR_SIZE_VER4,
2517         0,
2518 };
2519
2520 /*
2521  * In the legacy file format, the magic number is not used to encode endianness.
2522  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2523  * on ABI revisions, we need to try all combinations for all endianness to
2524  * detect the endianness.
2525  */
2526 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2527 {
2528         uint64_t ref_size, attr_size;
2529         int i;
2530
2531         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2532                 ref_size = attr_file_abi_sizes[i]
2533                          + sizeof(struct perf_file_section);
2534                 if (hdr_sz != ref_size) {
2535                         attr_size = bswap_64(hdr_sz);
2536                         if (attr_size != ref_size)
2537                                 continue;
2538
2539                         ph->needs_swap = true;
2540                 }
2541                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2542                          i,
2543                          ph->needs_swap);
2544                 return 0;
2545         }
2546         /* could not determine endianness */
2547         return -1;
2548 }
2549
2550 #define PERF_PIPE_HDR_VER0      16
2551
2552 static const size_t attr_pipe_abi_sizes[] = {
2553         [0] = PERF_PIPE_HDR_VER0,
2554         0,
2555 };
2556
2557 /*
2558  * In the legacy pipe format, there is an implicit assumption that endiannesss
2559  * between host recording the samples, and host parsing the samples is the
2560  * same. This is not always the case given that the pipe output may always be
2561  * redirected into a file and analyzed on a different machine with possibly a
2562  * different endianness and perf_event ABI revsions in the perf tool itself.
2563  */
2564 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2565 {
2566         u64 attr_size;
2567         int i;
2568
2569         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2570                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2571                         attr_size = bswap_64(hdr_sz);
2572                         if (attr_size != hdr_sz)
2573                                 continue;
2574
2575                         ph->needs_swap = true;
2576                 }
2577                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2578                 return 0;
2579         }
2580         return -1;
2581 }
2582
2583 bool is_perf_magic(u64 magic)
2584 {
2585         if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2586                 || magic == __perf_magic2
2587                 || magic == __perf_magic2_sw)
2588                 return true;
2589
2590         return false;
2591 }
2592
2593 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2594                               bool is_pipe, struct perf_header *ph)
2595 {
2596         int ret;
2597
2598         /* check for legacy format */
2599         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2600         if (ret == 0) {
2601                 ph->version = PERF_HEADER_VERSION_1;
2602                 pr_debug("legacy perf.data format\n");
2603                 if (is_pipe)
2604                         return try_all_pipe_abis(hdr_sz, ph);
2605
2606                 return try_all_file_abis(hdr_sz, ph);
2607         }
2608         /*
2609          * the new magic number serves two purposes:
2610          * - unique number to identify actual perf.data files
2611          * - encode endianness of file
2612          */
2613         ph->version = PERF_HEADER_VERSION_2;
2614
2615         /* check magic number with one endianness */
2616         if (magic == __perf_magic2)
2617                 return 0;
2618
2619         /* check magic number with opposite endianness */
2620         if (magic != __perf_magic2_sw)
2621                 return -1;
2622
2623         ph->needs_swap = true;
2624
2625         return 0;
2626 }
2627
2628 int perf_file_header__read(struct perf_file_header *header,
2629                            struct perf_header *ph, int fd)
2630 {
2631         ssize_t ret;
2632
2633         lseek(fd, 0, SEEK_SET);
2634
2635         ret = readn(fd, header, sizeof(*header));
2636         if (ret <= 0)
2637                 return -1;
2638
2639         if (check_magic_endian(header->magic,
2640                                header->attr_size, false, ph) < 0) {
2641                 pr_debug("magic/endian check failed\n");
2642                 return -1;
2643         }
2644
2645         if (ph->needs_swap) {
2646                 mem_bswap_64(header, offsetof(struct perf_file_header,
2647                              adds_features));
2648         }
2649
2650         if (header->size != sizeof(*header)) {
2651                 /* Support the previous format */
2652                 if (header->size == offsetof(typeof(*header), adds_features))
2653                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2654                 else
2655                         return -1;
2656         } else if (ph->needs_swap) {
2657                 /*
2658                  * feature bitmap is declared as an array of unsigned longs --
2659                  * not good since its size can differ between the host that
2660                  * generated the data file and the host analyzing the file.
2661                  *
2662                  * We need to handle endianness, but we don't know the size of
2663                  * the unsigned long where the file was generated. Take a best
2664                  * guess at determining it: try 64-bit swap first (ie., file
2665                  * created on a 64-bit host), and check if the hostname feature
2666                  * bit is set (this feature bit is forced on as of fbe96f2).
2667                  * If the bit is not, undo the 64-bit swap and try a 32-bit
2668                  * swap. If the hostname bit is still not set (e.g., older data
2669                  * file), punt and fallback to the original behavior --
2670                  * clearing all feature bits and setting buildid.
2671                  */
2672                 mem_bswap_64(&header->adds_features,
2673                             BITS_TO_U64(HEADER_FEAT_BITS));
2674
2675                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2676                         /* unswap as u64 */
2677                         mem_bswap_64(&header->adds_features,
2678                                     BITS_TO_U64(HEADER_FEAT_BITS));
2679
2680                         /* unswap as u32 */
2681                         mem_bswap_32(&header->adds_features,
2682                                     BITS_TO_U32(HEADER_FEAT_BITS));
2683                 }
2684
2685                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2686                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2687                         set_bit(HEADER_BUILD_ID, header->adds_features);
2688                 }
2689         }
2690
2691         memcpy(&ph->adds_features, &header->adds_features,
2692                sizeof(ph->adds_features));
2693
2694         ph->data_offset  = header->data.offset;
2695         ph->data_size    = header->data.size;
2696         ph->feat_offset  = header->data.offset + header->data.size;
2697         return 0;
2698 }
2699
2700 static int perf_file_section__process(struct perf_file_section *section,
2701                                       struct perf_header *ph,
2702                                       int feat, int fd, void *data)
2703 {
2704         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2705                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2706                           "%d, continuing...\n", section->offset, feat);
2707                 return 0;
2708         }
2709
2710         if (feat >= HEADER_LAST_FEATURE) {
2711                 pr_debug("unknown feature %d, continuing...\n", feat);
2712                 return 0;
2713         }
2714
2715         if (!feat_ops[feat].process)
2716                 return 0;
2717
2718         return feat_ops[feat].process(section, ph, fd, data);
2719 }
2720
2721 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2722                                        struct perf_header *ph, int fd,
2723                                        bool repipe)
2724 {
2725         ssize_t ret;
2726
2727         ret = readn(fd, header, sizeof(*header));
2728         if (ret <= 0)
2729                 return -1;
2730
2731         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2732                 pr_debug("endian/magic failed\n");
2733                 return -1;
2734         }
2735
2736         if (ph->needs_swap)
2737                 header->size = bswap_64(header->size);
2738
2739         if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2740                 return -1;
2741
2742         return 0;
2743 }
2744
2745 static int perf_header__read_pipe(struct perf_session *session)
2746 {
2747         struct perf_header *header = &session->header;
2748         struct perf_pipe_file_header f_header;
2749
2750         if (perf_file_header__read_pipe(&f_header, header,
2751                                         perf_data_file__fd(session->file),
2752                                         session->repipe) < 0) {
2753                 pr_debug("incompatible file format\n");
2754                 return -EINVAL;
2755         }
2756
2757         return 0;
2758 }
2759
2760 static int read_attr(int fd, struct perf_header *ph,
2761                      struct perf_file_attr *f_attr)
2762 {
2763         struct perf_event_attr *attr = &f_attr->attr;
2764         size_t sz, left;
2765         size_t our_sz = sizeof(f_attr->attr);
2766         ssize_t ret;
2767
2768         memset(f_attr, 0, sizeof(*f_attr));
2769
2770         /* read minimal guaranteed structure */
2771         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2772         if (ret <= 0) {
2773                 pr_debug("cannot read %d bytes of header attr\n",
2774                          PERF_ATTR_SIZE_VER0);
2775                 return -1;
2776         }
2777
2778         /* on file perf_event_attr size */
2779         sz = attr->size;
2780
2781         if (ph->needs_swap)
2782                 sz = bswap_32(sz);
2783
2784         if (sz == 0) {
2785                 /* assume ABI0 */
2786                 sz =  PERF_ATTR_SIZE_VER0;
2787         } else if (sz > our_sz) {
2788                 pr_debug("file uses a more recent and unsupported ABI"
2789                          " (%zu bytes extra)\n", sz - our_sz);
2790                 return -1;
2791         }
2792         /* what we have not yet read and that we know about */
2793         left = sz - PERF_ATTR_SIZE_VER0;
2794         if (left) {
2795                 void *ptr = attr;
2796                 ptr += PERF_ATTR_SIZE_VER0;
2797
2798                 ret = readn(fd, ptr, left);
2799         }
2800         /* read perf_file_section, ids are read in caller */
2801         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2802
2803         return ret <= 0 ? -1 : 0;
2804 }
2805
2806 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2807                                                 struct pevent *pevent)
2808 {
2809         struct event_format *event;
2810         char bf[128];
2811
2812         /* already prepared */
2813         if (evsel->tp_format)
2814                 return 0;
2815
2816         if (pevent == NULL) {
2817                 pr_debug("broken or missing trace data\n");
2818                 return -1;
2819         }
2820
2821         event = pevent_find_event(pevent, evsel->attr.config);
2822         if (event == NULL) {
2823                 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
2824                 return -1;
2825         }
2826
2827         if (!evsel->name) {
2828                 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2829                 evsel->name = strdup(bf);
2830                 if (evsel->name == NULL)
2831                         return -1;
2832         }
2833
2834         evsel->tp_format = event;
2835         return 0;
2836 }
2837
2838 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2839                                                   struct pevent *pevent)
2840 {
2841         struct perf_evsel *pos;
2842
2843         evlist__for_each_entry(evlist, pos) {
2844                 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2845                     perf_evsel__prepare_tracepoint_event(pos, pevent))
2846                         return -1;
2847         }
2848
2849         return 0;
2850 }
2851
2852 int perf_session__read_header(struct perf_session *session)
2853 {
2854         struct perf_data_file *file = session->file;
2855         struct perf_header *header = &session->header;
2856         struct perf_file_header f_header;
2857         struct perf_file_attr   f_attr;
2858         u64                     f_id;
2859         int nr_attrs, nr_ids, i, j;
2860         int fd = perf_data_file__fd(file);
2861
2862         session->evlist = perf_evlist__new();
2863         if (session->evlist == NULL)
2864                 return -ENOMEM;
2865
2866         session->evlist->env = &header->env;
2867         session->machines.host.env = &header->env;
2868         if (perf_data_file__is_pipe(file))
2869                 return perf_header__read_pipe(session);
2870
2871         if (perf_file_header__read(&f_header, header, fd) < 0)
2872                 return -EINVAL;
2873
2874         /*
2875          * Sanity check that perf.data was written cleanly; data size is
2876          * initialized to 0 and updated only if the on_exit function is run.
2877          * If data size is still 0 then the file contains only partial
2878          * information.  Just warn user and process it as much as it can.
2879          */
2880         if (f_header.data.size == 0) {
2881                 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2882                            "Was the 'perf record' command properly terminated?\n",
2883                            file->path);
2884         }
2885
2886         nr_attrs = f_header.attrs.size / f_header.attr_size;
2887         lseek(fd, f_header.attrs.offset, SEEK_SET);
2888
2889         for (i = 0; i < nr_attrs; i++) {
2890                 struct perf_evsel *evsel;
2891                 off_t tmp;
2892
2893                 if (read_attr(fd, header, &f_attr) < 0)
2894                         goto out_errno;
2895
2896                 if (header->needs_swap) {
2897                         f_attr.ids.size   = bswap_64(f_attr.ids.size);
2898                         f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2899                         perf_event__attr_swap(&f_attr.attr);
2900                 }
2901
2902                 tmp = lseek(fd, 0, SEEK_CUR);
2903                 evsel = perf_evsel__new(&f_attr.attr);
2904
2905                 if (evsel == NULL)
2906                         goto out_delete_evlist;
2907
2908                 evsel->needs_swap = header->needs_swap;
2909                 /*
2910                  * Do it before so that if perf_evsel__alloc_id fails, this
2911                  * entry gets purged too at perf_evlist__delete().
2912                  */
2913                 perf_evlist__add(session->evlist, evsel);
2914
2915                 nr_ids = f_attr.ids.size / sizeof(u64);
2916                 /*
2917                  * We don't have the cpu and thread maps on the header, so
2918                  * for allocating the perf_sample_id table we fake 1 cpu and
2919                  * hattr->ids threads.
2920                  */
2921                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2922                         goto out_delete_evlist;
2923
2924                 lseek(fd, f_attr.ids.offset, SEEK_SET);
2925
2926                 for (j = 0; j < nr_ids; j++) {
2927                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2928                                 goto out_errno;
2929
2930                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2931                 }
2932
2933                 lseek(fd, tmp, SEEK_SET);
2934         }
2935
2936         symbol_conf.nr_events = nr_attrs;
2937
2938         perf_header__process_sections(header, fd, &session->tevent,
2939                                       perf_file_section__process);
2940
2941         if (perf_evlist__prepare_tracepoint_events(session->evlist,
2942                                                    session->tevent.pevent))
2943                 goto out_delete_evlist;
2944
2945         return 0;
2946 out_errno:
2947         return -errno;
2948
2949 out_delete_evlist:
2950         perf_evlist__delete(session->evlist);
2951         session->evlist = NULL;
2952         return -ENOMEM;
2953 }
2954
2955 int perf_event__synthesize_attr(struct perf_tool *tool,
2956                                 struct perf_event_attr *attr, u32 ids, u64 *id,
2957                                 perf_event__handler_t process)
2958 {
2959         union perf_event *ev;
2960         size_t size;
2961         int err;
2962
2963         size = sizeof(struct perf_event_attr);
2964         size = PERF_ALIGN(size, sizeof(u64));
2965         size += sizeof(struct perf_event_header);
2966         size += ids * sizeof(u64);
2967
2968         ev = malloc(size);
2969
2970         if (ev == NULL)
2971                 return -ENOMEM;
2972
2973         ev->attr.attr = *attr;
2974         memcpy(ev->attr.id, id, ids * sizeof(u64));
2975
2976         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2977         ev->attr.header.size = (u16)size;
2978
2979         if (ev->attr.header.size == size)
2980                 err = process(tool, ev, NULL, NULL);
2981         else
2982                 err = -E2BIG;
2983
2984         free(ev);
2985
2986         return err;
2987 }
2988
2989 static struct event_update_event *
2990 event_update_event__new(size_t size, u64 type, u64 id)
2991 {
2992         struct event_update_event *ev;
2993
2994         size += sizeof(*ev);
2995         size  = PERF_ALIGN(size, sizeof(u64));
2996
2997         ev = zalloc(size);
2998         if (ev) {
2999                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3000                 ev->header.size = (u16)size;
3001                 ev->type = type;
3002                 ev->id = id;
3003         }
3004         return ev;
3005 }
3006
3007 int
3008 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3009                                          struct perf_evsel *evsel,
3010                                          perf_event__handler_t process)
3011 {
3012         struct event_update_event *ev;
3013         size_t size = strlen(evsel->unit);
3014         int err;
3015
3016         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3017         if (ev == NULL)
3018                 return -ENOMEM;
3019
3020         strncpy(ev->data, evsel->unit, size);
3021         err = process(tool, (union perf_event *)ev, NULL, NULL);
3022         free(ev);
3023         return err;
3024 }
3025
3026 int
3027 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3028                                           struct perf_evsel *evsel,
3029                                           perf_event__handler_t process)
3030 {
3031         struct event_update_event *ev;
3032         struct event_update_event_scale *ev_data;
3033         int err;
3034
3035         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3036         if (ev == NULL)
3037                 return -ENOMEM;
3038
3039         ev_data = (struct event_update_event_scale *) ev->data;
3040         ev_data->scale = evsel->scale;
3041         err = process(tool, (union perf_event*) ev, NULL, NULL);
3042         free(ev);
3043         return err;
3044 }
3045
3046 int
3047 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3048                                          struct perf_evsel *evsel,
3049                                          perf_event__handler_t process)
3050 {
3051         struct event_update_event *ev;
3052         size_t len = strlen(evsel->name);
3053         int err;
3054
3055         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3056         if (ev == NULL)
3057                 return -ENOMEM;
3058
3059         strncpy(ev->data, evsel->name, len);
3060         err = process(tool, (union perf_event*) ev, NULL, NULL);
3061         free(ev);
3062         return err;
3063 }
3064
3065 int
3066 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3067                                         struct perf_evsel *evsel,
3068                                         perf_event__handler_t process)
3069 {
3070         size_t size = sizeof(struct event_update_event);
3071         struct event_update_event *ev;
3072         int max, err;
3073         u16 type;
3074
3075         if (!evsel->own_cpus)
3076                 return 0;
3077
3078         ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3079         if (!ev)
3080                 return -ENOMEM;
3081
3082         ev->header.type = PERF_RECORD_EVENT_UPDATE;
3083         ev->header.size = (u16)size;
3084         ev->type = PERF_EVENT_UPDATE__CPUS;
3085         ev->id   = evsel->id[0];
3086
3087         cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3088                                  evsel->own_cpus,
3089                                  type, max);
3090
3091         err = process(tool, (union perf_event*) ev, NULL, NULL);
3092         free(ev);
3093         return err;
3094 }
3095
3096 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3097 {
3098         struct event_update_event *ev = &event->event_update;
3099         struct event_update_event_scale *ev_scale;
3100         struct event_update_event_cpus *ev_cpus;
3101         struct cpu_map *map;
3102         size_t ret;
3103
3104         ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3105
3106         switch (ev->type) {
3107         case PERF_EVENT_UPDATE__SCALE:
3108                 ev_scale = (struct event_update_event_scale *) ev->data;
3109                 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3110                 break;
3111         case PERF_EVENT_UPDATE__UNIT:
3112                 ret += fprintf(fp, "... unit:  %s\n", ev->data);
3113                 break;
3114         case PERF_EVENT_UPDATE__NAME:
3115                 ret += fprintf(fp, "... name:  %s\n", ev->data);
3116                 break;
3117         case PERF_EVENT_UPDATE__CPUS:
3118                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3119                 ret += fprintf(fp, "... ");
3120
3121                 map = cpu_map__new_data(&ev_cpus->cpus);
3122                 if (map)
3123                         ret += cpu_map__fprintf(map, fp);
3124                 else
3125                         ret += fprintf(fp, "failed to get cpus\n");
3126                 break;
3127         default:
3128                 ret += fprintf(fp, "... unknown type\n");
3129                 break;
3130         }
3131
3132         return ret;
3133 }
3134
3135 int perf_event__synthesize_attrs(struct perf_tool *tool,
3136                                    struct perf_session *session,
3137                                    perf_event__handler_t process)
3138 {
3139         struct perf_evsel *evsel;
3140         int err = 0;
3141
3142         evlist__for_each_entry(session->evlist, evsel) {
3143                 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3144                                                   evsel->id, process);
3145                 if (err) {
3146                         pr_debug("failed to create perf header attribute\n");
3147                         return err;
3148                 }
3149         }
3150
3151         return err;
3152 }
3153
3154 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3155                              union perf_event *event,
3156                              struct perf_evlist **pevlist)
3157 {
3158         u32 i, ids, n_ids;
3159         struct perf_evsel *evsel;
3160         struct perf_evlist *evlist = *pevlist;
3161
3162         if (evlist == NULL) {
3163                 *pevlist = evlist = perf_evlist__new();
3164                 if (evlist == NULL)
3165                         return -ENOMEM;
3166         }
3167
3168         evsel = perf_evsel__new(&event->attr.attr);
3169         if (evsel == NULL)
3170                 return -ENOMEM;
3171
3172         perf_evlist__add(evlist, evsel);
3173
3174         ids = event->header.size;
3175         ids -= (void *)&event->attr.id - (void *)event;
3176         n_ids = ids / sizeof(u64);
3177         /*
3178          * We don't have the cpu and thread maps on the header, so
3179          * for allocating the perf_sample_id table we fake 1 cpu and
3180          * hattr->ids threads.
3181          */
3182         if (perf_evsel__alloc_id(evsel, 1, n_ids))
3183                 return -ENOMEM;
3184
3185         for (i = 0; i < n_ids; i++) {
3186                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3187         }
3188
3189         symbol_conf.nr_events = evlist->nr_entries;
3190
3191         return 0;
3192 }
3193
3194 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3195                                      union perf_event *event,
3196                                      struct perf_evlist **pevlist)
3197 {
3198         struct event_update_event *ev = &event->event_update;
3199         struct event_update_event_scale *ev_scale;
3200         struct event_update_event_cpus *ev_cpus;
3201         struct perf_evlist *evlist;
3202         struct perf_evsel *evsel;
3203         struct cpu_map *map;
3204
3205         if (!pevlist || *pevlist == NULL)
3206                 return -EINVAL;
3207
3208         evlist = *pevlist;
3209
3210         evsel = perf_evlist__id2evsel(evlist, ev->id);
3211         if (evsel == NULL)
3212                 return -EINVAL;
3213
3214         switch (ev->type) {
3215         case PERF_EVENT_UPDATE__UNIT:
3216                 evsel->unit = strdup(ev->data);
3217                 break;
3218         case PERF_EVENT_UPDATE__NAME:
3219                 evsel->name = strdup(ev->data);
3220                 break;
3221         case PERF_EVENT_UPDATE__SCALE:
3222                 ev_scale = (struct event_update_event_scale *) ev->data;
3223                 evsel->scale = ev_scale->scale;
3224                 break;
3225         case PERF_EVENT_UPDATE__CPUS:
3226                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3227
3228                 map = cpu_map__new_data(&ev_cpus->cpus);
3229                 if (map)
3230                         evsel->own_cpus = map;
3231                 else
3232                         pr_err("failed to get event_update cpus\n");
3233         default:
3234                 break;
3235         }
3236
3237         return 0;
3238 }
3239
3240 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3241                                         struct perf_evlist *evlist,
3242                                         perf_event__handler_t process)
3243 {
3244         union perf_event ev;
3245         struct tracing_data *tdata;
3246         ssize_t size = 0, aligned_size = 0, padding;
3247         int err __maybe_unused = 0;
3248
3249         /*
3250          * We are going to store the size of the data followed
3251          * by the data contents. Since the fd descriptor is a pipe,
3252          * we cannot seek back to store the size of the data once
3253          * we know it. Instead we:
3254          *
3255          * - write the tracing data to the temp file
3256          * - get/write the data size to pipe
3257          * - write the tracing data from the temp file
3258          *   to the pipe
3259          */
3260         tdata = tracing_data_get(&evlist->entries, fd, true);
3261         if (!tdata)
3262                 return -1;
3263
3264         memset(&ev, 0, sizeof(ev));
3265
3266         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3267         size = tdata->size;
3268         aligned_size = PERF_ALIGN(size, sizeof(u64));
3269         padding = aligned_size - size;
3270         ev.tracing_data.header.size = sizeof(ev.tracing_data);
3271         ev.tracing_data.size = aligned_size;
3272
3273         process(tool, &ev, NULL, NULL);
3274
3275         /*
3276          * The put function will copy all the tracing data
3277          * stored in temp file to the pipe.
3278          */
3279         tracing_data_put(tdata);
3280
3281         write_padded(fd, NULL, 0, padding);
3282
3283         return aligned_size;
3284 }
3285
3286 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3287                                      union perf_event *event,
3288                                      struct perf_session *session)
3289 {
3290         ssize_t size_read, padding, size = event->tracing_data.size;
3291         int fd = perf_data_file__fd(session->file);
3292         off_t offset = lseek(fd, 0, SEEK_CUR);
3293         char buf[BUFSIZ];
3294
3295         /* setup for reading amidst mmap */
3296         lseek(fd, offset + sizeof(struct tracing_data_event),
3297               SEEK_SET);
3298
3299         size_read = trace_report(fd, &session->tevent,
3300                                  session->repipe);
3301         padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3302
3303         if (readn(fd, buf, padding) < 0) {
3304                 pr_err("%s: reading input file", __func__);
3305                 return -1;
3306         }
3307         if (session->repipe) {
3308                 int retw = write(STDOUT_FILENO, buf, padding);
3309                 if (retw <= 0 || retw != padding) {
3310                         pr_err("%s: repiping tracing data padding", __func__);
3311                         return -1;
3312                 }
3313         }
3314
3315         if (size_read + padding != size) {
3316                 pr_err("%s: tracing data size mismatch", __func__);
3317                 return -1;
3318         }
3319
3320         perf_evlist__prepare_tracepoint_events(session->evlist,
3321                                                session->tevent.pevent);
3322
3323         return size_read + padding;
3324 }
3325
3326 int perf_event__synthesize_build_id(struct perf_tool *tool,
3327                                     struct dso *pos, u16 misc,
3328                                     perf_event__handler_t process,
3329                                     struct machine *machine)
3330 {
3331         union perf_event ev;
3332         size_t len;
3333         int err = 0;
3334
3335         if (!pos->hit)
3336                 return err;
3337
3338         memset(&ev, 0, sizeof(ev));
3339
3340         len = pos->long_name_len + 1;
3341         len = PERF_ALIGN(len, NAME_ALIGN);
3342         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3343         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3344         ev.build_id.header.misc = misc;
3345         ev.build_id.pid = machine->pid;
3346         ev.build_id.header.size = sizeof(ev.build_id) + len;
3347         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3348
3349         err = process(tool, &ev, NULL, machine);
3350
3351         return err;
3352 }
3353
3354 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3355                                  union perf_event *event,
3356                                  struct perf_session *session)
3357 {
3358         __event_process_build_id(&event->build_id,
3359                                  event->build_id.filename,
3360                                  session);
3361         return 0;
3362 }