]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/header.c
Merge tag 'xfs-for-linus-4.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / tools / perf / util / header.c
1 #include "util.h"
2 #include <sys/types.h>
3 #include <byteswap.h>
4 #include <unistd.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <linux/list.h>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <sys/utsname.h>
11
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "header.h"
15 #include "../perf.h"
16 #include "trace-event.h"
17 #include "session.h"
18 #include "symbol.h"
19 #include "debug.h"
20 #include "cpumap.h"
21 #include "pmu.h"
22 #include "vdso.h"
23 #include "strbuf.h"
24 #include "build-id.h"
25 #include "data.h"
26 #include <api/fs/fs.h>
27 #include "asm/bug.h"
28
29 /*
30  * magic2 = "PERFILE2"
31  * must be a numerical value to let the endianness
32  * determine the memory layout. That way we are able
33  * to detect endianness when reading the perf.data file
34  * back.
35  *
36  * we check for legacy (PERFFILE) format.
37  */
38 static const char *__perf_magic1 = "PERFFILE";
39 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
40 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
41
42 #define PERF_MAGIC      __perf_magic2
43
44 struct perf_file_attr {
45         struct perf_event_attr  attr;
46         struct perf_file_section        ids;
47 };
48
49 void perf_header__set_feat(struct perf_header *header, int feat)
50 {
51         set_bit(feat, header->adds_features);
52 }
53
54 void perf_header__clear_feat(struct perf_header *header, int feat)
55 {
56         clear_bit(feat, header->adds_features);
57 }
58
59 bool perf_header__has_feat(const struct perf_header *header, int feat)
60 {
61         return test_bit(feat, header->adds_features);
62 }
63
64 static int do_write(int fd, const void *buf, size_t size)
65 {
66         while (size) {
67                 int ret = write(fd, buf, size);
68
69                 if (ret < 0)
70                         return -errno;
71
72                 size -= ret;
73                 buf += ret;
74         }
75
76         return 0;
77 }
78
79 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
80 {
81         static const char zero_buf[NAME_ALIGN];
82         int err = do_write(fd, bf, count);
83
84         if (!err)
85                 err = do_write(fd, zero_buf, count_aligned - count);
86
87         return err;
88 }
89
90 #define string_size(str)                                                \
91         (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
92
93 static int do_write_string(int fd, const char *str)
94 {
95         u32 len, olen;
96         int ret;
97
98         olen = strlen(str) + 1;
99         len = PERF_ALIGN(olen, NAME_ALIGN);
100
101         /* write len, incl. \0 */
102         ret = do_write(fd, &len, sizeof(len));
103         if (ret < 0)
104                 return ret;
105
106         return write_padded(fd, str, olen, len);
107 }
108
109 static char *do_read_string(int fd, struct perf_header *ph)
110 {
111         ssize_t sz, ret;
112         u32 len;
113         char *buf;
114
115         sz = readn(fd, &len, sizeof(len));
116         if (sz < (ssize_t)sizeof(len))
117                 return NULL;
118
119         if (ph->needs_swap)
120                 len = bswap_32(len);
121
122         buf = malloc(len);
123         if (!buf)
124                 return NULL;
125
126         ret = readn(fd, buf, len);
127         if (ret == (ssize_t)len) {
128                 /*
129                  * strings are padded by zeroes
130                  * thus the actual strlen of buf
131                  * may be less than len
132                  */
133                 return buf;
134         }
135
136         free(buf);
137         return NULL;
138 }
139
140 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
141                             struct perf_evlist *evlist)
142 {
143         return read_tracing_data(fd, &evlist->entries);
144 }
145
146
147 static int write_build_id(int fd, struct perf_header *h,
148                           struct perf_evlist *evlist __maybe_unused)
149 {
150         struct perf_session *session;
151         int err;
152
153         session = container_of(h, struct perf_session, header);
154
155         if (!perf_session__read_build_ids(session, true))
156                 return -1;
157
158         err = perf_session__write_buildid_table(session, fd);
159         if (err < 0) {
160                 pr_debug("failed to write buildid table\n");
161                 return err;
162         }
163         perf_session__cache_build_ids(session);
164
165         return 0;
166 }
167
168 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
169                           struct perf_evlist *evlist __maybe_unused)
170 {
171         struct utsname uts;
172         int ret;
173
174         ret = uname(&uts);
175         if (ret < 0)
176                 return -1;
177
178         return do_write_string(fd, uts.nodename);
179 }
180
181 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
182                            struct perf_evlist *evlist __maybe_unused)
183 {
184         struct utsname uts;
185         int ret;
186
187         ret = uname(&uts);
188         if (ret < 0)
189                 return -1;
190
191         return do_write_string(fd, uts.release);
192 }
193
194 static int write_arch(int fd, struct perf_header *h __maybe_unused,
195                       struct perf_evlist *evlist __maybe_unused)
196 {
197         struct utsname uts;
198         int ret;
199
200         ret = uname(&uts);
201         if (ret < 0)
202                 return -1;
203
204         return do_write_string(fd, uts.machine);
205 }
206
207 static int write_version(int fd, struct perf_header *h __maybe_unused,
208                          struct perf_evlist *evlist __maybe_unused)
209 {
210         return do_write_string(fd, perf_version_string);
211 }
212
213 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
214 {
215         FILE *file;
216         char *buf = NULL;
217         char *s, *p;
218         const char *search = cpuinfo_proc;
219         size_t len = 0;
220         int ret = -1;
221
222         if (!search)
223                 return -1;
224
225         file = fopen("/proc/cpuinfo", "r");
226         if (!file)
227                 return -1;
228
229         while (getline(&buf, &len, file) > 0) {
230                 ret = strncmp(buf, search, strlen(search));
231                 if (!ret)
232                         break;
233         }
234
235         if (ret) {
236                 ret = -1;
237                 goto done;
238         }
239
240         s = buf;
241
242         p = strchr(buf, ':');
243         if (p && *(p+1) == ' ' && *(p+2))
244                 s = p + 2;
245         p = strchr(s, '\n');
246         if (p)
247                 *p = '\0';
248
249         /* squash extra space characters (branding string) */
250         p = s;
251         while (*p) {
252                 if (isspace(*p)) {
253                         char *r = p + 1;
254                         char *q = r;
255                         *p = ' ';
256                         while (*q && isspace(*q))
257                                 q++;
258                         if (q != (p+1))
259                                 while ((*r++ = *q++));
260                 }
261                 p++;
262         }
263         ret = do_write_string(fd, s);
264 done:
265         free(buf);
266         fclose(file);
267         return ret;
268 }
269
270 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
271                        struct perf_evlist *evlist __maybe_unused)
272 {
273 #ifndef CPUINFO_PROC
274 #define CPUINFO_PROC {"model name", }
275 #endif
276         const char *cpuinfo_procs[] = CPUINFO_PROC;
277         unsigned int i;
278
279         for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
280                 int ret;
281                 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
282                 if (ret >= 0)
283                         return ret;
284         }
285         return -1;
286 }
287
288
289 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
290                         struct perf_evlist *evlist __maybe_unused)
291 {
292         long nr;
293         u32 nrc, nra;
294         int ret;
295
296         nr = sysconf(_SC_NPROCESSORS_CONF);
297         if (nr < 0)
298                 return -1;
299
300         nrc = (u32)(nr & UINT_MAX);
301
302         nr = sysconf(_SC_NPROCESSORS_ONLN);
303         if (nr < 0)
304                 return -1;
305
306         nra = (u32)(nr & UINT_MAX);
307
308         ret = do_write(fd, &nrc, sizeof(nrc));
309         if (ret < 0)
310                 return ret;
311
312         return do_write(fd, &nra, sizeof(nra));
313 }
314
315 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
316                             struct perf_evlist *evlist)
317 {
318         struct perf_evsel *evsel;
319         u32 nre, nri, sz;
320         int ret;
321
322         nre = evlist->nr_entries;
323
324         /*
325          * write number of events
326          */
327         ret = do_write(fd, &nre, sizeof(nre));
328         if (ret < 0)
329                 return ret;
330
331         /*
332          * size of perf_event_attr struct
333          */
334         sz = (u32)sizeof(evsel->attr);
335         ret = do_write(fd, &sz, sizeof(sz));
336         if (ret < 0)
337                 return ret;
338
339         evlist__for_each(evlist, evsel) {
340                 ret = do_write(fd, &evsel->attr, sz);
341                 if (ret < 0)
342                         return ret;
343                 /*
344                  * write number of unique id per event
345                  * there is one id per instance of an event
346                  *
347                  * copy into an nri to be independent of the
348                  * type of ids,
349                  */
350                 nri = evsel->ids;
351                 ret = do_write(fd, &nri, sizeof(nri));
352                 if (ret < 0)
353                         return ret;
354
355                 /*
356                  * write event string as passed on cmdline
357                  */
358                 ret = do_write_string(fd, perf_evsel__name(evsel));
359                 if (ret < 0)
360                         return ret;
361                 /*
362                  * write unique ids for this event
363                  */
364                 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
365                 if (ret < 0)
366                         return ret;
367         }
368         return 0;
369 }
370
371 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
372                          struct perf_evlist *evlist __maybe_unused)
373 {
374         char buf[MAXPATHLEN];
375         char proc[32];
376         u32 n;
377         int i, ret;
378
379         /*
380          * actual atual path to perf binary
381          */
382         sprintf(proc, "/proc/%d/exe", getpid());
383         ret = readlink(proc, buf, sizeof(buf));
384         if (ret <= 0)
385                 return -1;
386
387         /* readlink() does not add null termination */
388         buf[ret] = '\0';
389
390         /* account for binary path */
391         n = perf_env.nr_cmdline + 1;
392
393         ret = do_write(fd, &n, sizeof(n));
394         if (ret < 0)
395                 return ret;
396
397         ret = do_write_string(fd, buf);
398         if (ret < 0)
399                 return ret;
400
401         for (i = 0 ; i < perf_env.nr_cmdline; i++) {
402                 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
403                 if (ret < 0)
404                         return ret;
405         }
406         return 0;
407 }
408
409 #define CORE_SIB_FMT \
410         "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
411 #define THRD_SIB_FMT \
412         "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
413
414 struct cpu_topo {
415         u32 cpu_nr;
416         u32 core_sib;
417         u32 thread_sib;
418         char **core_siblings;
419         char **thread_siblings;
420 };
421
422 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
423 {
424         FILE *fp;
425         char filename[MAXPATHLEN];
426         char *buf = NULL, *p;
427         size_t len = 0;
428         ssize_t sret;
429         u32 i = 0;
430         int ret = -1;
431
432         sprintf(filename, CORE_SIB_FMT, cpu);
433         fp = fopen(filename, "r");
434         if (!fp)
435                 goto try_threads;
436
437         sret = getline(&buf, &len, fp);
438         fclose(fp);
439         if (sret <= 0)
440                 goto try_threads;
441
442         p = strchr(buf, '\n');
443         if (p)
444                 *p = '\0';
445
446         for (i = 0; i < tp->core_sib; i++) {
447                 if (!strcmp(buf, tp->core_siblings[i]))
448                         break;
449         }
450         if (i == tp->core_sib) {
451                 tp->core_siblings[i] = buf;
452                 tp->core_sib++;
453                 buf = NULL;
454                 len = 0;
455         }
456         ret = 0;
457
458 try_threads:
459         sprintf(filename, THRD_SIB_FMT, cpu);
460         fp = fopen(filename, "r");
461         if (!fp)
462                 goto done;
463
464         if (getline(&buf, &len, fp) <= 0)
465                 goto done;
466
467         p = strchr(buf, '\n');
468         if (p)
469                 *p = '\0';
470
471         for (i = 0; i < tp->thread_sib; i++) {
472                 if (!strcmp(buf, tp->thread_siblings[i]))
473                         break;
474         }
475         if (i == tp->thread_sib) {
476                 tp->thread_siblings[i] = buf;
477                 tp->thread_sib++;
478                 buf = NULL;
479         }
480         ret = 0;
481 done:
482         if(fp)
483                 fclose(fp);
484         free(buf);
485         return ret;
486 }
487
488 static void free_cpu_topo(struct cpu_topo *tp)
489 {
490         u32 i;
491
492         if (!tp)
493                 return;
494
495         for (i = 0 ; i < tp->core_sib; i++)
496                 zfree(&tp->core_siblings[i]);
497
498         for (i = 0 ; i < tp->thread_sib; i++)
499                 zfree(&tp->thread_siblings[i]);
500
501         free(tp);
502 }
503
504 static struct cpu_topo *build_cpu_topology(void)
505 {
506         struct cpu_topo *tp;
507         void *addr;
508         u32 nr, i;
509         size_t sz;
510         long ncpus;
511         int ret = -1;
512
513         ncpus = sysconf(_SC_NPROCESSORS_CONF);
514         if (ncpus < 0)
515                 return NULL;
516
517         nr = (u32)(ncpus & UINT_MAX);
518
519         sz = nr * sizeof(char *);
520
521         addr = calloc(1, sizeof(*tp) + 2 * sz);
522         if (!addr)
523                 return NULL;
524
525         tp = addr;
526         tp->cpu_nr = nr;
527         addr += sizeof(*tp);
528         tp->core_siblings = addr;
529         addr += sz;
530         tp->thread_siblings = addr;
531
532         for (i = 0; i < nr; i++) {
533                 ret = build_cpu_topo(tp, i);
534                 if (ret < 0)
535                         break;
536         }
537         if (ret) {
538                 free_cpu_topo(tp);
539                 tp = NULL;
540         }
541         return tp;
542 }
543
544 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
545                           struct perf_evlist *evlist __maybe_unused)
546 {
547         struct cpu_topo *tp;
548         u32 i;
549         int ret, j;
550
551         tp = build_cpu_topology();
552         if (!tp)
553                 return -1;
554
555         ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
556         if (ret < 0)
557                 goto done;
558
559         for (i = 0; i < tp->core_sib; i++) {
560                 ret = do_write_string(fd, tp->core_siblings[i]);
561                 if (ret < 0)
562                         goto done;
563         }
564         ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
565         if (ret < 0)
566                 goto done;
567
568         for (i = 0; i < tp->thread_sib; i++) {
569                 ret = do_write_string(fd, tp->thread_siblings[i]);
570                 if (ret < 0)
571                         break;
572         }
573
574         ret = perf_env__read_cpu_topology_map(&perf_env);
575         if (ret < 0)
576                 goto done;
577
578         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
579                 ret = do_write(fd, &perf_env.cpu[j].core_id,
580                                sizeof(perf_env.cpu[j].core_id));
581                 if (ret < 0)
582                         return ret;
583                 ret = do_write(fd, &perf_env.cpu[j].socket_id,
584                                sizeof(perf_env.cpu[j].socket_id));
585                 if (ret < 0)
586                         return ret;
587         }
588 done:
589         free_cpu_topo(tp);
590         return ret;
591 }
592
593
594
595 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
596                           struct perf_evlist *evlist __maybe_unused)
597 {
598         char *buf = NULL;
599         FILE *fp;
600         size_t len = 0;
601         int ret = -1, n;
602         uint64_t mem;
603
604         fp = fopen("/proc/meminfo", "r");
605         if (!fp)
606                 return -1;
607
608         while (getline(&buf, &len, fp) > 0) {
609                 ret = strncmp(buf, "MemTotal:", 9);
610                 if (!ret)
611                         break;
612         }
613         if (!ret) {
614                 n = sscanf(buf, "%*s %"PRIu64, &mem);
615                 if (n == 1)
616                         ret = do_write(fd, &mem, sizeof(mem));
617         } else
618                 ret = -1;
619         free(buf);
620         fclose(fp);
621         return ret;
622 }
623
624 static int write_topo_node(int fd, int node)
625 {
626         char str[MAXPATHLEN];
627         char field[32];
628         char *buf = NULL, *p;
629         size_t len = 0;
630         FILE *fp;
631         u64 mem_total, mem_free, mem;
632         int ret = -1;
633
634         sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
635         fp = fopen(str, "r");
636         if (!fp)
637                 return -1;
638
639         while (getline(&buf, &len, fp) > 0) {
640                 /* skip over invalid lines */
641                 if (!strchr(buf, ':'))
642                         continue;
643                 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
644                         goto done;
645                 if (!strcmp(field, "MemTotal:"))
646                         mem_total = mem;
647                 if (!strcmp(field, "MemFree:"))
648                         mem_free = mem;
649         }
650
651         fclose(fp);
652         fp = NULL;
653
654         ret = do_write(fd, &mem_total, sizeof(u64));
655         if (ret)
656                 goto done;
657
658         ret = do_write(fd, &mem_free, sizeof(u64));
659         if (ret)
660                 goto done;
661
662         ret = -1;
663         sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
664
665         fp = fopen(str, "r");
666         if (!fp)
667                 goto done;
668
669         if (getline(&buf, &len, fp) <= 0)
670                 goto done;
671
672         p = strchr(buf, '\n');
673         if (p)
674                 *p = '\0';
675
676         ret = do_write_string(fd, buf);
677 done:
678         free(buf);
679         if (fp)
680                 fclose(fp);
681         return ret;
682 }
683
684 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
685                           struct perf_evlist *evlist __maybe_unused)
686 {
687         char *buf = NULL;
688         size_t len = 0;
689         FILE *fp;
690         struct cpu_map *node_map = NULL;
691         char *c;
692         u32 nr, i, j;
693         int ret = -1;
694
695         fp = fopen("/sys/devices/system/node/online", "r");
696         if (!fp)
697                 return -1;
698
699         if (getline(&buf, &len, fp) <= 0)
700                 goto done;
701
702         c = strchr(buf, '\n');
703         if (c)
704                 *c = '\0';
705
706         node_map = cpu_map__new(buf);
707         if (!node_map)
708                 goto done;
709
710         nr = (u32)node_map->nr;
711
712         ret = do_write(fd, &nr, sizeof(nr));
713         if (ret < 0)
714                 goto done;
715
716         for (i = 0; i < nr; i++) {
717                 j = (u32)node_map->map[i];
718                 ret = do_write(fd, &j, sizeof(j));
719                 if (ret < 0)
720                         break;
721
722                 ret = write_topo_node(fd, i);
723                 if (ret < 0)
724                         break;
725         }
726 done:
727         free(buf);
728         fclose(fp);
729         cpu_map__put(node_map);
730         return ret;
731 }
732
733 /*
734  * File format:
735  *
736  * struct pmu_mappings {
737  *      u32     pmu_num;
738  *      struct pmu_map {
739  *              u32     type;
740  *              char    name[];
741  *      }[pmu_num];
742  * };
743  */
744
745 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
746                               struct perf_evlist *evlist __maybe_unused)
747 {
748         struct perf_pmu *pmu = NULL;
749         off_t offset = lseek(fd, 0, SEEK_CUR);
750         __u32 pmu_num = 0;
751         int ret;
752
753         /* write real pmu_num later */
754         ret = do_write(fd, &pmu_num, sizeof(pmu_num));
755         if (ret < 0)
756                 return ret;
757
758         while ((pmu = perf_pmu__scan(pmu))) {
759                 if (!pmu->name)
760                         continue;
761                 pmu_num++;
762
763                 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
764                 if (ret < 0)
765                         return ret;
766
767                 ret = do_write_string(fd, pmu->name);
768                 if (ret < 0)
769                         return ret;
770         }
771
772         if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
773                 /* discard all */
774                 lseek(fd, offset, SEEK_SET);
775                 return -1;
776         }
777
778         return 0;
779 }
780
781 /*
782  * File format:
783  *
784  * struct group_descs {
785  *      u32     nr_groups;
786  *      struct group_desc {
787  *              char    name[];
788  *              u32     leader_idx;
789  *              u32     nr_members;
790  *      }[nr_groups];
791  * };
792  */
793 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
794                             struct perf_evlist *evlist)
795 {
796         u32 nr_groups = evlist->nr_groups;
797         struct perf_evsel *evsel;
798         int ret;
799
800         ret = do_write(fd, &nr_groups, sizeof(nr_groups));
801         if (ret < 0)
802                 return ret;
803
804         evlist__for_each(evlist, evsel) {
805                 if (perf_evsel__is_group_leader(evsel) &&
806                     evsel->nr_members > 1) {
807                         const char *name = evsel->group_name ?: "{anon_group}";
808                         u32 leader_idx = evsel->idx;
809                         u32 nr_members = evsel->nr_members;
810
811                         ret = do_write_string(fd, name);
812                         if (ret < 0)
813                                 return ret;
814
815                         ret = do_write(fd, &leader_idx, sizeof(leader_idx));
816                         if (ret < 0)
817                                 return ret;
818
819                         ret = do_write(fd, &nr_members, sizeof(nr_members));
820                         if (ret < 0)
821                                 return ret;
822                 }
823         }
824         return 0;
825 }
826
827 /*
828  * default get_cpuid(): nothing gets recorded
829  * actual implementation must be in arch/$(ARCH)/util/header.c
830  */
831 int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused,
832                                      size_t sz __maybe_unused)
833 {
834         return -1;
835 }
836
837 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
838                        struct perf_evlist *evlist __maybe_unused)
839 {
840         char buffer[64];
841         int ret;
842
843         ret = get_cpuid(buffer, sizeof(buffer));
844         if (!ret)
845                 goto write_it;
846
847         return -1;
848 write_it:
849         return do_write_string(fd, buffer);
850 }
851
852 static int write_branch_stack(int fd __maybe_unused,
853                               struct perf_header *h __maybe_unused,
854                        struct perf_evlist *evlist __maybe_unused)
855 {
856         return 0;
857 }
858
859 static int write_auxtrace(int fd, struct perf_header *h,
860                           struct perf_evlist *evlist __maybe_unused)
861 {
862         struct perf_session *session;
863         int err;
864
865         session = container_of(h, struct perf_session, header);
866
867         err = auxtrace_index__write(fd, &session->auxtrace_index);
868         if (err < 0)
869                 pr_err("Failed to write auxtrace index\n");
870         return err;
871 }
872
873 static int cpu_cache_level__sort(const void *a, const void *b)
874 {
875         struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
876         struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
877
878         return cache_a->level - cache_b->level;
879 }
880
881 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
882 {
883         if (a->level != b->level)
884                 return false;
885
886         if (a->line_size != b->line_size)
887                 return false;
888
889         if (a->sets != b->sets)
890                 return false;
891
892         if (a->ways != b->ways)
893                 return false;
894
895         if (strcmp(a->type, b->type))
896                 return false;
897
898         if (strcmp(a->size, b->size))
899                 return false;
900
901         if (strcmp(a->map, b->map))
902                 return false;
903
904         return true;
905 }
906
907 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
908 {
909         char path[PATH_MAX], file[PATH_MAX];
910         struct stat st;
911         size_t len;
912
913         scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
914         scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
915
916         if (stat(file, &st))
917                 return 1;
918
919         scnprintf(file, PATH_MAX, "%s/level", path);
920         if (sysfs__read_int(file, (int *) &cache->level))
921                 return -1;
922
923         scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
924         if (sysfs__read_int(file, (int *) &cache->line_size))
925                 return -1;
926
927         scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
928         if (sysfs__read_int(file, (int *) &cache->sets))
929                 return -1;
930
931         scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
932         if (sysfs__read_int(file, (int *) &cache->ways))
933                 return -1;
934
935         scnprintf(file, PATH_MAX, "%s/type", path);
936         if (sysfs__read_str(file, &cache->type, &len))
937                 return -1;
938
939         cache->type[len] = 0;
940         cache->type = rtrim(cache->type);
941
942         scnprintf(file, PATH_MAX, "%s/size", path);
943         if (sysfs__read_str(file, &cache->size, &len)) {
944                 free(cache->type);
945                 return -1;
946         }
947
948         cache->size[len] = 0;
949         cache->size = rtrim(cache->size);
950
951         scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
952         if (sysfs__read_str(file, &cache->map, &len)) {
953                 free(cache->map);
954                 free(cache->type);
955                 return -1;
956         }
957
958         cache->map[len] = 0;
959         cache->map = rtrim(cache->map);
960         return 0;
961 }
962
963 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
964 {
965         fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
966 }
967
968 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
969 {
970         u32 i, cnt = 0;
971         long ncpus;
972         u32 nr, cpu;
973         u16 level;
974
975         ncpus = sysconf(_SC_NPROCESSORS_CONF);
976         if (ncpus < 0)
977                 return -1;
978
979         nr = (u32)(ncpus & UINT_MAX);
980
981         for (cpu = 0; cpu < nr; cpu++) {
982                 for (level = 0; level < 10; level++) {
983                         struct cpu_cache_level c;
984                         int err;
985
986                         err = cpu_cache_level__read(&c, cpu, level);
987                         if (err < 0)
988                                 return err;
989
990                         if (err == 1)
991                                 break;
992
993                         for (i = 0; i < cnt; i++) {
994                                 if (cpu_cache_level__cmp(&c, &caches[i]))
995                                         break;
996                         }
997
998                         if (i == cnt)
999                                 caches[cnt++] = c;
1000                         else
1001                                 cpu_cache_level__free(&c);
1002
1003                         if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1004                                 goto out;
1005                 }
1006         }
1007  out:
1008         *cntp = cnt;
1009         return 0;
1010 }
1011
1012 #define MAX_CACHES 2000
1013
1014 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1015                           struct perf_evlist *evlist __maybe_unused)
1016 {
1017         struct cpu_cache_level caches[MAX_CACHES];
1018         u32 cnt = 0, i, version = 1;
1019         int ret;
1020
1021         ret = build_caches(caches, MAX_CACHES, &cnt);
1022         if (ret)
1023                 goto out;
1024
1025         qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1026
1027         ret = do_write(fd, &version, sizeof(u32));
1028         if (ret < 0)
1029                 goto out;
1030
1031         ret = do_write(fd, &cnt, sizeof(u32));
1032         if (ret < 0)
1033                 goto out;
1034
1035         for (i = 0; i < cnt; i++) {
1036                 struct cpu_cache_level *c = &caches[i];
1037
1038                 #define _W(v)                                   \
1039                         ret = do_write(fd, &c->v, sizeof(u32)); \
1040                         if (ret < 0)                            \
1041                                 goto out;
1042
1043                 _W(level)
1044                 _W(line_size)
1045                 _W(sets)
1046                 _W(ways)
1047                 #undef _W
1048
1049                 #define _W(v)                                           \
1050                         ret = do_write_string(fd, (const char *) c->v); \
1051                         if (ret < 0)                                    \
1052                                 goto out;
1053
1054                 _W(type)
1055                 _W(size)
1056                 _W(map)
1057                 #undef _W
1058         }
1059
1060 out:
1061         for (i = 0; i < cnt; i++)
1062                 cpu_cache_level__free(&caches[i]);
1063         return ret;
1064 }
1065
1066 static int write_stat(int fd __maybe_unused,
1067                       struct perf_header *h __maybe_unused,
1068                       struct perf_evlist *evlist __maybe_unused)
1069 {
1070         return 0;
1071 }
1072
1073 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1074                            FILE *fp)
1075 {
1076         fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1077 }
1078
1079 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1080                             FILE *fp)
1081 {
1082         fprintf(fp, "# os release : %s\n", ph->env.os_release);
1083 }
1084
1085 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1086 {
1087         fprintf(fp, "# arch : %s\n", ph->env.arch);
1088 }
1089
1090 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1091                           FILE *fp)
1092 {
1093         fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1094 }
1095
1096 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1097                          FILE *fp)
1098 {
1099         fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1100         fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1101 }
1102
1103 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1104                           FILE *fp)
1105 {
1106         fprintf(fp, "# perf version : %s\n", ph->env.version);
1107 }
1108
1109 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1110                           FILE *fp)
1111 {
1112         int nr, i;
1113
1114         nr = ph->env.nr_cmdline;
1115
1116         fprintf(fp, "# cmdline : ");
1117
1118         for (i = 0; i < nr; i++)
1119                 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1120         fputc('\n', fp);
1121 }
1122
1123 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1124                                FILE *fp)
1125 {
1126         int nr, i;
1127         char *str;
1128         int cpu_nr = ph->env.nr_cpus_online;
1129
1130         nr = ph->env.nr_sibling_cores;
1131         str = ph->env.sibling_cores;
1132
1133         for (i = 0; i < nr; i++) {
1134                 fprintf(fp, "# sibling cores   : %s\n", str);
1135                 str += strlen(str) + 1;
1136         }
1137
1138         nr = ph->env.nr_sibling_threads;
1139         str = ph->env.sibling_threads;
1140
1141         for (i = 0; i < nr; i++) {
1142                 fprintf(fp, "# sibling threads : %s\n", str);
1143                 str += strlen(str) + 1;
1144         }
1145
1146         if (ph->env.cpu != NULL) {
1147                 for (i = 0; i < cpu_nr; i++)
1148                         fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1149                                 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1150         } else
1151                 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1152 }
1153
1154 static void free_event_desc(struct perf_evsel *events)
1155 {
1156         struct perf_evsel *evsel;
1157
1158         if (!events)
1159                 return;
1160
1161         for (evsel = events; evsel->attr.size; evsel++) {
1162                 zfree(&evsel->name);
1163                 zfree(&evsel->id);
1164         }
1165
1166         free(events);
1167 }
1168
1169 static struct perf_evsel *
1170 read_event_desc(struct perf_header *ph, int fd)
1171 {
1172         struct perf_evsel *evsel, *events = NULL;
1173         u64 *id;
1174         void *buf = NULL;
1175         u32 nre, sz, nr, i, j;
1176         ssize_t ret;
1177         size_t msz;
1178
1179         /* number of events */
1180         ret = readn(fd, &nre, sizeof(nre));
1181         if (ret != (ssize_t)sizeof(nre))
1182                 goto error;
1183
1184         if (ph->needs_swap)
1185                 nre = bswap_32(nre);
1186
1187         ret = readn(fd, &sz, sizeof(sz));
1188         if (ret != (ssize_t)sizeof(sz))
1189                 goto error;
1190
1191         if (ph->needs_swap)
1192                 sz = bswap_32(sz);
1193
1194         /* buffer to hold on file attr struct */
1195         buf = malloc(sz);
1196         if (!buf)
1197                 goto error;
1198
1199         /* the last event terminates with evsel->attr.size == 0: */
1200         events = calloc(nre + 1, sizeof(*events));
1201         if (!events)
1202                 goto error;
1203
1204         msz = sizeof(evsel->attr);
1205         if (sz < msz)
1206                 msz = sz;
1207
1208         for (i = 0, evsel = events; i < nre; evsel++, i++) {
1209                 evsel->idx = i;
1210
1211                 /*
1212                  * must read entire on-file attr struct to
1213                  * sync up with layout.
1214                  */
1215                 ret = readn(fd, buf, sz);
1216                 if (ret != (ssize_t)sz)
1217                         goto error;
1218
1219                 if (ph->needs_swap)
1220                         perf_event__attr_swap(buf);
1221
1222                 memcpy(&evsel->attr, buf, msz);
1223
1224                 ret = readn(fd, &nr, sizeof(nr));
1225                 if (ret != (ssize_t)sizeof(nr))
1226                         goto error;
1227
1228                 if (ph->needs_swap) {
1229                         nr = bswap_32(nr);
1230                         evsel->needs_swap = true;
1231                 }
1232
1233                 evsel->name = do_read_string(fd, ph);
1234
1235                 if (!nr)
1236                         continue;
1237
1238                 id = calloc(nr, sizeof(*id));
1239                 if (!id)
1240                         goto error;
1241                 evsel->ids = nr;
1242                 evsel->id = id;
1243
1244                 for (j = 0 ; j < nr; j++) {
1245                         ret = readn(fd, id, sizeof(*id));
1246                         if (ret != (ssize_t)sizeof(*id))
1247                                 goto error;
1248                         if (ph->needs_swap)
1249                                 *id = bswap_64(*id);
1250                         id++;
1251                 }
1252         }
1253 out:
1254         free(buf);
1255         return events;
1256 error:
1257         free_event_desc(events);
1258         events = NULL;
1259         goto out;
1260 }
1261
1262 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1263                                 void *priv __attribute__((unused)))
1264 {
1265         return fprintf(fp, ", %s = %s", name, val);
1266 }
1267
1268 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1269 {
1270         struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1271         u32 j;
1272         u64 *id;
1273
1274         if (!events) {
1275                 fprintf(fp, "# event desc: not available or unable to read\n");
1276                 return;
1277         }
1278
1279         for (evsel = events; evsel->attr.size; evsel++) {
1280                 fprintf(fp, "# event : name = %s, ", evsel->name);
1281
1282                 if (evsel->ids) {
1283                         fprintf(fp, ", id = {");
1284                         for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1285                                 if (j)
1286                                         fputc(',', fp);
1287                                 fprintf(fp, " %"PRIu64, *id);
1288                         }
1289                         fprintf(fp, " }");
1290                 }
1291
1292                 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1293
1294                 fputc('\n', fp);
1295         }
1296
1297         free_event_desc(events);
1298 }
1299
1300 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1301                             FILE *fp)
1302 {
1303         fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1304 }
1305
1306 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1307                                 FILE *fp)
1308 {
1309         u32 nr, c, i;
1310         char *str, *tmp;
1311         uint64_t mem_total, mem_free;
1312
1313         /* nr nodes */
1314         nr = ph->env.nr_numa_nodes;
1315         str = ph->env.numa_nodes;
1316
1317         for (i = 0; i < nr; i++) {
1318                 /* node number */
1319                 c = strtoul(str, &tmp, 0);
1320                 if (*tmp != ':')
1321                         goto error;
1322
1323                 str = tmp + 1;
1324                 mem_total = strtoull(str, &tmp, 0);
1325                 if (*tmp != ':')
1326                         goto error;
1327
1328                 str = tmp + 1;
1329                 mem_free = strtoull(str, &tmp, 0);
1330                 if (*tmp != ':')
1331                         goto error;
1332
1333                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1334                             " free = %"PRIu64" kB\n",
1335                         c, mem_total, mem_free);
1336
1337                 str = tmp + 1;
1338                 fprintf(fp, "# node%u cpu list : %s\n", c, str);
1339
1340                 str += strlen(str) + 1;
1341         }
1342         return;
1343 error:
1344         fprintf(fp, "# numa topology : not available\n");
1345 }
1346
1347 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1348 {
1349         fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1350 }
1351
1352 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1353                                int fd __maybe_unused, FILE *fp)
1354 {
1355         fprintf(fp, "# contains samples with branch stack\n");
1356 }
1357
1358 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1359                            int fd __maybe_unused, FILE *fp)
1360 {
1361         fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1362 }
1363
1364 static void print_stat(struct perf_header *ph __maybe_unused,
1365                        int fd __maybe_unused, FILE *fp)
1366 {
1367         fprintf(fp, "# contains stat data\n");
1368 }
1369
1370 static void print_cache(struct perf_header *ph __maybe_unused,
1371                         int fd __maybe_unused, FILE *fp __maybe_unused)
1372 {
1373         int i;
1374
1375         fprintf(fp, "# CPU cache info:\n");
1376         for (i = 0; i < ph->env.caches_cnt; i++) {
1377                 fprintf(fp, "#  ");
1378                 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1379         }
1380 }
1381
1382 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1383                                FILE *fp)
1384 {
1385         const char *delimiter = "# pmu mappings: ";
1386         char *str, *tmp;
1387         u32 pmu_num;
1388         u32 type;
1389
1390         pmu_num = ph->env.nr_pmu_mappings;
1391         if (!pmu_num) {
1392                 fprintf(fp, "# pmu mappings: not available\n");
1393                 return;
1394         }
1395
1396         str = ph->env.pmu_mappings;
1397
1398         while (pmu_num) {
1399                 type = strtoul(str, &tmp, 0);
1400                 if (*tmp != ':')
1401                         goto error;
1402
1403                 str = tmp + 1;
1404                 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1405
1406                 delimiter = ", ";
1407                 str += strlen(str) + 1;
1408                 pmu_num--;
1409         }
1410
1411         fprintf(fp, "\n");
1412
1413         if (!pmu_num)
1414                 return;
1415 error:
1416         fprintf(fp, "# pmu mappings: unable to read\n");
1417 }
1418
1419 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1420                              FILE *fp)
1421 {
1422         struct perf_session *session;
1423         struct perf_evsel *evsel;
1424         u32 nr = 0;
1425
1426         session = container_of(ph, struct perf_session, header);
1427
1428         evlist__for_each(session->evlist, evsel) {
1429                 if (perf_evsel__is_group_leader(evsel) &&
1430                     evsel->nr_members > 1) {
1431                         fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1432                                 perf_evsel__name(evsel));
1433
1434                         nr = evsel->nr_members - 1;
1435                 } else if (nr) {
1436                         fprintf(fp, ",%s", perf_evsel__name(evsel));
1437
1438                         if (--nr == 0)
1439                                 fprintf(fp, "}\n");
1440                 }
1441         }
1442 }
1443
1444 static int __event_process_build_id(struct build_id_event *bev,
1445                                     char *filename,
1446                                     struct perf_session *session)
1447 {
1448         int err = -1;
1449         struct machine *machine;
1450         u16 cpumode;
1451         struct dso *dso;
1452         enum dso_kernel_type dso_type;
1453
1454         machine = perf_session__findnew_machine(session, bev->pid);
1455         if (!machine)
1456                 goto out;
1457
1458         cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1459
1460         switch (cpumode) {
1461         case PERF_RECORD_MISC_KERNEL:
1462                 dso_type = DSO_TYPE_KERNEL;
1463                 break;
1464         case PERF_RECORD_MISC_GUEST_KERNEL:
1465                 dso_type = DSO_TYPE_GUEST_KERNEL;
1466                 break;
1467         case PERF_RECORD_MISC_USER:
1468         case PERF_RECORD_MISC_GUEST_USER:
1469                 dso_type = DSO_TYPE_USER;
1470                 break;
1471         default:
1472                 goto out;
1473         }
1474
1475         dso = machine__findnew_dso(machine, filename);
1476         if (dso != NULL) {
1477                 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1478
1479                 dso__set_build_id(dso, &bev->build_id);
1480
1481                 if (!is_kernel_module(filename, cpumode))
1482                         dso->kernel = dso_type;
1483
1484                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1485                                   sbuild_id);
1486                 pr_debug("build id event received for %s: %s\n",
1487                          dso->long_name, sbuild_id);
1488                 dso__put(dso);
1489         }
1490
1491         err = 0;
1492 out:
1493         return err;
1494 }
1495
1496 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1497                                                  int input, u64 offset, u64 size)
1498 {
1499         struct perf_session *session = container_of(header, struct perf_session, header);
1500         struct {
1501                 struct perf_event_header   header;
1502                 u8                         build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1503                 char                       filename[0];
1504         } old_bev;
1505         struct build_id_event bev;
1506         char filename[PATH_MAX];
1507         u64 limit = offset + size;
1508
1509         while (offset < limit) {
1510                 ssize_t len;
1511
1512                 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1513                         return -1;
1514
1515                 if (header->needs_swap)
1516                         perf_event_header__bswap(&old_bev.header);
1517
1518                 len = old_bev.header.size - sizeof(old_bev);
1519                 if (readn(input, filename, len) != len)
1520                         return -1;
1521
1522                 bev.header = old_bev.header;
1523
1524                 /*
1525                  * As the pid is the missing value, we need to fill
1526                  * it properly. The header.misc value give us nice hint.
1527                  */
1528                 bev.pid = HOST_KERNEL_ID;
1529                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1530                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1531                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1532
1533                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1534                 __event_process_build_id(&bev, filename, session);
1535
1536                 offset += bev.header.size;
1537         }
1538
1539         return 0;
1540 }
1541
1542 static int perf_header__read_build_ids(struct perf_header *header,
1543                                        int input, u64 offset, u64 size)
1544 {
1545         struct perf_session *session = container_of(header, struct perf_session, header);
1546         struct build_id_event bev;
1547         char filename[PATH_MAX];
1548         u64 limit = offset + size, orig_offset = offset;
1549         int err = -1;
1550
1551         while (offset < limit) {
1552                 ssize_t len;
1553
1554                 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1555                         goto out;
1556
1557                 if (header->needs_swap)
1558                         perf_event_header__bswap(&bev.header);
1559
1560                 len = bev.header.size - sizeof(bev);
1561                 if (readn(input, filename, len) != len)
1562                         goto out;
1563                 /*
1564                  * The a1645ce1 changeset:
1565                  *
1566                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
1567                  *
1568                  * Added a field to struct build_id_event that broke the file
1569                  * format.
1570                  *
1571                  * Since the kernel build-id is the first entry, process the
1572                  * table using the old format if the well known
1573                  * '[kernel.kallsyms]' string for the kernel build-id has the
1574                  * first 4 characters chopped off (where the pid_t sits).
1575                  */
1576                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1577                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1578                                 return -1;
1579                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1580                 }
1581
1582                 __event_process_build_id(&bev, filename, session);
1583
1584                 offset += bev.header.size;
1585         }
1586         err = 0;
1587 out:
1588         return err;
1589 }
1590
1591 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1592                                 struct perf_header *ph __maybe_unused,
1593                                 int fd, void *data)
1594 {
1595         ssize_t ret = trace_report(fd, data, false);
1596         return ret < 0 ? -1 : 0;
1597 }
1598
1599 static int process_build_id(struct perf_file_section *section,
1600                             struct perf_header *ph, int fd,
1601                             void *data __maybe_unused)
1602 {
1603         if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1604                 pr_debug("Failed to read buildids, continuing...\n");
1605         return 0;
1606 }
1607
1608 static int process_hostname(struct perf_file_section *section __maybe_unused,
1609                             struct perf_header *ph, int fd,
1610                             void *data __maybe_unused)
1611 {
1612         ph->env.hostname = do_read_string(fd, ph);
1613         return ph->env.hostname ? 0 : -ENOMEM;
1614 }
1615
1616 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1617                              struct perf_header *ph, int fd,
1618                              void *data __maybe_unused)
1619 {
1620         ph->env.os_release = do_read_string(fd, ph);
1621         return ph->env.os_release ? 0 : -ENOMEM;
1622 }
1623
1624 static int process_version(struct perf_file_section *section __maybe_unused,
1625                            struct perf_header *ph, int fd,
1626                            void *data __maybe_unused)
1627 {
1628         ph->env.version = do_read_string(fd, ph);
1629         return ph->env.version ? 0 : -ENOMEM;
1630 }
1631
1632 static int process_arch(struct perf_file_section *section __maybe_unused,
1633                         struct perf_header *ph, int fd,
1634                         void *data __maybe_unused)
1635 {
1636         ph->env.arch = do_read_string(fd, ph);
1637         return ph->env.arch ? 0 : -ENOMEM;
1638 }
1639
1640 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1641                           struct perf_header *ph, int fd,
1642                           void *data __maybe_unused)
1643 {
1644         ssize_t ret;
1645         u32 nr;
1646
1647         ret = readn(fd, &nr, sizeof(nr));
1648         if (ret != sizeof(nr))
1649                 return -1;
1650
1651         if (ph->needs_swap)
1652                 nr = bswap_32(nr);
1653
1654         ph->env.nr_cpus_avail = nr;
1655
1656         ret = readn(fd, &nr, sizeof(nr));
1657         if (ret != sizeof(nr))
1658                 return -1;
1659
1660         if (ph->needs_swap)
1661                 nr = bswap_32(nr);
1662
1663         ph->env.nr_cpus_online = nr;
1664         return 0;
1665 }
1666
1667 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1668                            struct perf_header *ph, int fd,
1669                            void *data __maybe_unused)
1670 {
1671         ph->env.cpu_desc = do_read_string(fd, ph);
1672         return ph->env.cpu_desc ? 0 : -ENOMEM;
1673 }
1674
1675 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1676                          struct perf_header *ph,  int fd,
1677                          void *data __maybe_unused)
1678 {
1679         ph->env.cpuid = do_read_string(fd, ph);
1680         return ph->env.cpuid ? 0 : -ENOMEM;
1681 }
1682
1683 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1684                              struct perf_header *ph, int fd,
1685                              void *data __maybe_unused)
1686 {
1687         uint64_t mem;
1688         ssize_t ret;
1689
1690         ret = readn(fd, &mem, sizeof(mem));
1691         if (ret != sizeof(mem))
1692                 return -1;
1693
1694         if (ph->needs_swap)
1695                 mem = bswap_64(mem);
1696
1697         ph->env.total_mem = mem;
1698         return 0;
1699 }
1700
1701 static struct perf_evsel *
1702 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1703 {
1704         struct perf_evsel *evsel;
1705
1706         evlist__for_each(evlist, evsel) {
1707                 if (evsel->idx == idx)
1708                         return evsel;
1709         }
1710
1711         return NULL;
1712 }
1713
1714 static void
1715 perf_evlist__set_event_name(struct perf_evlist *evlist,
1716                             struct perf_evsel *event)
1717 {
1718         struct perf_evsel *evsel;
1719
1720         if (!event->name)
1721                 return;
1722
1723         evsel = perf_evlist__find_by_index(evlist, event->idx);
1724         if (!evsel)
1725                 return;
1726
1727         if (evsel->name)
1728                 return;
1729
1730         evsel->name = strdup(event->name);
1731 }
1732
1733 static int
1734 process_event_desc(struct perf_file_section *section __maybe_unused,
1735                    struct perf_header *header, int fd,
1736                    void *data __maybe_unused)
1737 {
1738         struct perf_session *session;
1739         struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1740
1741         if (!events)
1742                 return 0;
1743
1744         session = container_of(header, struct perf_session, header);
1745         for (evsel = events; evsel->attr.size; evsel++)
1746                 perf_evlist__set_event_name(session->evlist, evsel);
1747
1748         free_event_desc(events);
1749
1750         return 0;
1751 }
1752
1753 static int process_cmdline(struct perf_file_section *section,
1754                            struct perf_header *ph, int fd,
1755                            void *data __maybe_unused)
1756 {
1757         ssize_t ret;
1758         char *str, *cmdline = NULL, **argv = NULL;
1759         u32 nr, i, len = 0;
1760
1761         ret = readn(fd, &nr, sizeof(nr));
1762         if (ret != sizeof(nr))
1763                 return -1;
1764
1765         if (ph->needs_swap)
1766                 nr = bswap_32(nr);
1767
1768         ph->env.nr_cmdline = nr;
1769
1770         cmdline = zalloc(section->size + nr + 1);
1771         if (!cmdline)
1772                 return -1;
1773
1774         argv = zalloc(sizeof(char *) * (nr + 1));
1775         if (!argv)
1776                 goto error;
1777
1778         for (i = 0; i < nr; i++) {
1779                 str = do_read_string(fd, ph);
1780                 if (!str)
1781                         goto error;
1782
1783                 argv[i] = cmdline + len;
1784                 memcpy(argv[i], str, strlen(str) + 1);
1785                 len += strlen(str) + 1;
1786                 free(str);
1787         }
1788         ph->env.cmdline = cmdline;
1789         ph->env.cmdline_argv = (const char **) argv;
1790         return 0;
1791
1792 error:
1793         free(argv);
1794         free(cmdline);
1795         return -1;
1796 }
1797
1798 static int process_cpu_topology(struct perf_file_section *section,
1799                                 struct perf_header *ph, int fd,
1800                                 void *data __maybe_unused)
1801 {
1802         ssize_t ret;
1803         u32 nr, i;
1804         char *str;
1805         struct strbuf sb;
1806         int cpu_nr = ph->env.nr_cpus_online;
1807         u64 size = 0;
1808
1809         ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1810         if (!ph->env.cpu)
1811                 return -1;
1812
1813         ret = readn(fd, &nr, sizeof(nr));
1814         if (ret != sizeof(nr))
1815                 goto free_cpu;
1816
1817         if (ph->needs_swap)
1818                 nr = bswap_32(nr);
1819
1820         ph->env.nr_sibling_cores = nr;
1821         size += sizeof(u32);
1822         strbuf_init(&sb, 128);
1823
1824         for (i = 0; i < nr; i++) {
1825                 str = do_read_string(fd, ph);
1826                 if (!str)
1827                         goto error;
1828
1829                 /* include a NULL character at the end */
1830                 strbuf_add(&sb, str, strlen(str) + 1);
1831                 size += string_size(str);
1832                 free(str);
1833         }
1834         ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1835
1836         ret = readn(fd, &nr, sizeof(nr));
1837         if (ret != sizeof(nr))
1838                 return -1;
1839
1840         if (ph->needs_swap)
1841                 nr = bswap_32(nr);
1842
1843         ph->env.nr_sibling_threads = nr;
1844         size += sizeof(u32);
1845
1846         for (i = 0; i < nr; i++) {
1847                 str = do_read_string(fd, ph);
1848                 if (!str)
1849                         goto error;
1850
1851                 /* include a NULL character at the end */
1852                 strbuf_add(&sb, str, strlen(str) + 1);
1853                 size += string_size(str);
1854                 free(str);
1855         }
1856         ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1857
1858         /*
1859          * The header may be from old perf,
1860          * which doesn't include core id and socket id information.
1861          */
1862         if (section->size <= size) {
1863                 zfree(&ph->env.cpu);
1864                 return 0;
1865         }
1866
1867         for (i = 0; i < (u32)cpu_nr; i++) {
1868                 ret = readn(fd, &nr, sizeof(nr));
1869                 if (ret != sizeof(nr))
1870                         goto free_cpu;
1871
1872                 if (ph->needs_swap)
1873                         nr = bswap_32(nr);
1874
1875                 if (nr > (u32)cpu_nr) {
1876                         pr_debug("core_id number is too big."
1877                                  "You may need to upgrade the perf tool.\n");
1878                         goto free_cpu;
1879                 }
1880                 ph->env.cpu[i].core_id = nr;
1881
1882                 ret = readn(fd, &nr, sizeof(nr));
1883                 if (ret != sizeof(nr))
1884                         goto free_cpu;
1885
1886                 if (ph->needs_swap)
1887                         nr = bswap_32(nr);
1888
1889                 if (nr > (u32)cpu_nr) {
1890                         pr_debug("socket_id number is too big."
1891                                  "You may need to upgrade the perf tool.\n");
1892                         goto free_cpu;
1893                 }
1894
1895                 ph->env.cpu[i].socket_id = nr;
1896         }
1897
1898         return 0;
1899
1900 error:
1901         strbuf_release(&sb);
1902 free_cpu:
1903         zfree(&ph->env.cpu);
1904         return -1;
1905 }
1906
1907 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1908                                  struct perf_header *ph, int fd,
1909                                  void *data __maybe_unused)
1910 {
1911         ssize_t ret;
1912         u32 nr, node, i;
1913         char *str;
1914         uint64_t mem_total, mem_free;
1915         struct strbuf sb;
1916
1917         /* nr nodes */
1918         ret = readn(fd, &nr, sizeof(nr));
1919         if (ret != sizeof(nr))
1920                 goto error;
1921
1922         if (ph->needs_swap)
1923                 nr = bswap_32(nr);
1924
1925         ph->env.nr_numa_nodes = nr;
1926         strbuf_init(&sb, 256);
1927
1928         for (i = 0; i < nr; i++) {
1929                 /* node number */
1930                 ret = readn(fd, &node, sizeof(node));
1931                 if (ret != sizeof(node))
1932                         goto error;
1933
1934                 ret = readn(fd, &mem_total, sizeof(u64));
1935                 if (ret != sizeof(u64))
1936                         goto error;
1937
1938                 ret = readn(fd, &mem_free, sizeof(u64));
1939                 if (ret != sizeof(u64))
1940                         goto error;
1941
1942                 if (ph->needs_swap) {
1943                         node = bswap_32(node);
1944                         mem_total = bswap_64(mem_total);
1945                         mem_free = bswap_64(mem_free);
1946                 }
1947
1948                 strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
1949                             node, mem_total, mem_free);
1950
1951                 str = do_read_string(fd, ph);
1952                 if (!str)
1953                         goto error;
1954
1955                 /* include a NULL character at the end */
1956                 strbuf_add(&sb, str, strlen(str) + 1);
1957                 free(str);
1958         }
1959         ph->env.numa_nodes = strbuf_detach(&sb, NULL);
1960         return 0;
1961
1962 error:
1963         strbuf_release(&sb);
1964         return -1;
1965 }
1966
1967 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1968                                 struct perf_header *ph, int fd,
1969                                 void *data __maybe_unused)
1970 {
1971         ssize_t ret;
1972         char *name;
1973         u32 pmu_num;
1974         u32 type;
1975         struct strbuf sb;
1976
1977         ret = readn(fd, &pmu_num, sizeof(pmu_num));
1978         if (ret != sizeof(pmu_num))
1979                 return -1;
1980
1981         if (ph->needs_swap)
1982                 pmu_num = bswap_32(pmu_num);
1983
1984         if (!pmu_num) {
1985                 pr_debug("pmu mappings not available\n");
1986                 return 0;
1987         }
1988
1989         ph->env.nr_pmu_mappings = pmu_num;
1990         strbuf_init(&sb, 128);
1991
1992         while (pmu_num) {
1993                 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1994                         goto error;
1995                 if (ph->needs_swap)
1996                         type = bswap_32(type);
1997
1998                 name = do_read_string(fd, ph);
1999                 if (!name)
2000                         goto error;
2001
2002                 strbuf_addf(&sb, "%u:%s", type, name);
2003                 /* include a NULL character at the end */
2004                 strbuf_add(&sb, "", 1);
2005
2006                 if (!strcmp(name, "msr"))
2007                         ph->env.msr_pmu_type = type;
2008
2009                 free(name);
2010                 pmu_num--;
2011         }
2012         ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2013         return 0;
2014
2015 error:
2016         strbuf_release(&sb);
2017         return -1;
2018 }
2019
2020 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2021                               struct perf_header *ph, int fd,
2022                               void *data __maybe_unused)
2023 {
2024         size_t ret = -1;
2025         u32 i, nr, nr_groups;
2026         struct perf_session *session;
2027         struct perf_evsel *evsel, *leader = NULL;
2028         struct group_desc {
2029                 char *name;
2030                 u32 leader_idx;
2031                 u32 nr_members;
2032         } *desc;
2033
2034         if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2035                 return -1;
2036
2037         if (ph->needs_swap)
2038                 nr_groups = bswap_32(nr_groups);
2039
2040         ph->env.nr_groups = nr_groups;
2041         if (!nr_groups) {
2042                 pr_debug("group desc not available\n");
2043                 return 0;
2044         }
2045
2046         desc = calloc(nr_groups, sizeof(*desc));
2047         if (!desc)
2048                 return -1;
2049
2050         for (i = 0; i < nr_groups; i++) {
2051                 desc[i].name = do_read_string(fd, ph);
2052                 if (!desc[i].name)
2053                         goto out_free;
2054
2055                 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2056                         goto out_free;
2057
2058                 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2059                         goto out_free;
2060
2061                 if (ph->needs_swap) {
2062                         desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2063                         desc[i].nr_members = bswap_32(desc[i].nr_members);
2064                 }
2065         }
2066
2067         /*
2068          * Rebuild group relationship based on the group_desc
2069          */
2070         session = container_of(ph, struct perf_session, header);
2071         session->evlist->nr_groups = nr_groups;
2072
2073         i = nr = 0;
2074         evlist__for_each(session->evlist, evsel) {
2075                 if (evsel->idx == (int) desc[i].leader_idx) {
2076                         evsel->leader = evsel;
2077                         /* {anon_group} is a dummy name */
2078                         if (strcmp(desc[i].name, "{anon_group}")) {
2079                                 evsel->group_name = desc[i].name;
2080                                 desc[i].name = NULL;
2081                         }
2082                         evsel->nr_members = desc[i].nr_members;
2083
2084                         if (i >= nr_groups || nr > 0) {
2085                                 pr_debug("invalid group desc\n");
2086                                 goto out_free;
2087                         }
2088
2089                         leader = evsel;
2090                         nr = evsel->nr_members - 1;
2091                         i++;
2092                 } else if (nr) {
2093                         /* This is a group member */
2094                         evsel->leader = leader;
2095
2096                         nr--;
2097                 }
2098         }
2099
2100         if (i != nr_groups || nr != 0) {
2101                 pr_debug("invalid group desc\n");
2102                 goto out_free;
2103         }
2104
2105         ret = 0;
2106 out_free:
2107         for (i = 0; i < nr_groups; i++)
2108                 zfree(&desc[i].name);
2109         free(desc);
2110
2111         return ret;
2112 }
2113
2114 static int process_auxtrace(struct perf_file_section *section,
2115                             struct perf_header *ph, int fd,
2116                             void *data __maybe_unused)
2117 {
2118         struct perf_session *session;
2119         int err;
2120
2121         session = container_of(ph, struct perf_session, header);
2122
2123         err = auxtrace_index__process(fd, section->size, session,
2124                                       ph->needs_swap);
2125         if (err < 0)
2126                 pr_err("Failed to process auxtrace index\n");
2127         return err;
2128 }
2129
2130 static int process_cache(struct perf_file_section *section __maybe_unused,
2131                          struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2132                          void *data __maybe_unused)
2133 {
2134         struct cpu_cache_level *caches;
2135         u32 cnt, i, version;
2136
2137         if (readn(fd, &version, sizeof(version)) != sizeof(version))
2138                 return -1;
2139
2140         if (ph->needs_swap)
2141                 version = bswap_32(version);
2142
2143         if (version != 1)
2144                 return -1;
2145
2146         if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2147                 return -1;
2148
2149         if (ph->needs_swap)
2150                 cnt = bswap_32(cnt);
2151
2152         caches = zalloc(sizeof(*caches) * cnt);
2153         if (!caches)
2154                 return -1;
2155
2156         for (i = 0; i < cnt; i++) {
2157                 struct cpu_cache_level c;
2158
2159                 #define _R(v)                                           \
2160                         if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2161                                 goto out_free_caches;                   \
2162                         if (ph->needs_swap)                             \
2163                                 c.v = bswap_32(c.v);                    \
2164
2165                 _R(level)
2166                 _R(line_size)
2167                 _R(sets)
2168                 _R(ways)
2169                 #undef _R
2170
2171                 #define _R(v)                           \
2172                         c.v = do_read_string(fd, ph);   \
2173                         if (!c.v)                       \
2174                                 goto out_free_caches;
2175
2176                 _R(type)
2177                 _R(size)
2178                 _R(map)
2179                 #undef _R
2180
2181                 caches[i] = c;
2182         }
2183
2184         ph->env.caches = caches;
2185         ph->env.caches_cnt = cnt;
2186         return 0;
2187 out_free_caches:
2188         free(caches);
2189         return -1;
2190 }
2191
2192 struct feature_ops {
2193         int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2194         void (*print)(struct perf_header *h, int fd, FILE *fp);
2195         int (*process)(struct perf_file_section *section,
2196                        struct perf_header *h, int fd, void *data);
2197         const char *name;
2198         bool full_only;
2199 };
2200
2201 #define FEAT_OPA(n, func) \
2202         [n] = { .name = #n, .write = write_##func, .print = print_##func }
2203 #define FEAT_OPP(n, func) \
2204         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2205                 .process = process_##func }
2206 #define FEAT_OPF(n, func) \
2207         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2208                 .process = process_##func, .full_only = true }
2209
2210 /* feature_ops not implemented: */
2211 #define print_tracing_data      NULL
2212 #define print_build_id          NULL
2213
2214 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2215         FEAT_OPP(HEADER_TRACING_DATA,   tracing_data),
2216         FEAT_OPP(HEADER_BUILD_ID,       build_id),
2217         FEAT_OPP(HEADER_HOSTNAME,       hostname),
2218         FEAT_OPP(HEADER_OSRELEASE,      osrelease),
2219         FEAT_OPP(HEADER_VERSION,        version),
2220         FEAT_OPP(HEADER_ARCH,           arch),
2221         FEAT_OPP(HEADER_NRCPUS,         nrcpus),
2222         FEAT_OPP(HEADER_CPUDESC,        cpudesc),
2223         FEAT_OPP(HEADER_CPUID,          cpuid),
2224         FEAT_OPP(HEADER_TOTAL_MEM,      total_mem),
2225         FEAT_OPP(HEADER_EVENT_DESC,     event_desc),
2226         FEAT_OPP(HEADER_CMDLINE,        cmdline),
2227         FEAT_OPF(HEADER_CPU_TOPOLOGY,   cpu_topology),
2228         FEAT_OPF(HEADER_NUMA_TOPOLOGY,  numa_topology),
2229         FEAT_OPA(HEADER_BRANCH_STACK,   branch_stack),
2230         FEAT_OPP(HEADER_PMU_MAPPINGS,   pmu_mappings),
2231         FEAT_OPP(HEADER_GROUP_DESC,     group_desc),
2232         FEAT_OPP(HEADER_AUXTRACE,       auxtrace),
2233         FEAT_OPA(HEADER_STAT,           stat),
2234         FEAT_OPF(HEADER_CACHE,          cache),
2235 };
2236
2237 struct header_print_data {
2238         FILE *fp;
2239         bool full; /* extended list of headers */
2240 };
2241
2242 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2243                                            struct perf_header *ph,
2244                                            int feat, int fd, void *data)
2245 {
2246         struct header_print_data *hd = data;
2247
2248         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2249                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2250                                 "%d, continuing...\n", section->offset, feat);
2251                 return 0;
2252         }
2253         if (feat >= HEADER_LAST_FEATURE) {
2254                 pr_warning("unknown feature %d\n", feat);
2255                 return 0;
2256         }
2257         if (!feat_ops[feat].print)
2258                 return 0;
2259
2260         if (!feat_ops[feat].full_only || hd->full)
2261                 feat_ops[feat].print(ph, fd, hd->fp);
2262         else
2263                 fprintf(hd->fp, "# %s info available, use -I to display\n",
2264                         feat_ops[feat].name);
2265
2266         return 0;
2267 }
2268
2269 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2270 {
2271         struct header_print_data hd;
2272         struct perf_header *header = &session->header;
2273         int fd = perf_data_file__fd(session->file);
2274         hd.fp = fp;
2275         hd.full = full;
2276
2277         perf_header__process_sections(header, fd, &hd,
2278                                       perf_file_section__fprintf_info);
2279         return 0;
2280 }
2281
2282 static int do_write_feat(int fd, struct perf_header *h, int type,
2283                          struct perf_file_section **p,
2284                          struct perf_evlist *evlist)
2285 {
2286         int err;
2287         int ret = 0;
2288
2289         if (perf_header__has_feat(h, type)) {
2290                 if (!feat_ops[type].write)
2291                         return -1;
2292
2293                 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2294
2295                 err = feat_ops[type].write(fd, h, evlist);
2296                 if (err < 0) {
2297                         pr_debug("failed to write feature %d\n", type);
2298
2299                         /* undo anything written */
2300                         lseek(fd, (*p)->offset, SEEK_SET);
2301
2302                         return -1;
2303                 }
2304                 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2305                 (*p)++;
2306         }
2307         return ret;
2308 }
2309
2310 static int perf_header__adds_write(struct perf_header *header,
2311                                    struct perf_evlist *evlist, int fd)
2312 {
2313         int nr_sections;
2314         struct perf_file_section *feat_sec, *p;
2315         int sec_size;
2316         u64 sec_start;
2317         int feat;
2318         int err;
2319
2320         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2321         if (!nr_sections)
2322                 return 0;
2323
2324         feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2325         if (feat_sec == NULL)
2326                 return -ENOMEM;
2327
2328         sec_size = sizeof(*feat_sec) * nr_sections;
2329
2330         sec_start = header->feat_offset;
2331         lseek(fd, sec_start + sec_size, SEEK_SET);
2332
2333         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2334                 if (do_write_feat(fd, header, feat, &p, evlist))
2335                         perf_header__clear_feat(header, feat);
2336         }
2337
2338         lseek(fd, sec_start, SEEK_SET);
2339         /*
2340          * may write more than needed due to dropped feature, but
2341          * this is okay, reader will skip the mising entries
2342          */
2343         err = do_write(fd, feat_sec, sec_size);
2344         if (err < 0)
2345                 pr_debug("failed to write feature section\n");
2346         free(feat_sec);
2347         return err;
2348 }
2349
2350 int perf_header__write_pipe(int fd)
2351 {
2352         struct perf_pipe_file_header f_header;
2353         int err;
2354
2355         f_header = (struct perf_pipe_file_header){
2356                 .magic     = PERF_MAGIC,
2357                 .size      = sizeof(f_header),
2358         };
2359
2360         err = do_write(fd, &f_header, sizeof(f_header));
2361         if (err < 0) {
2362                 pr_debug("failed to write perf pipe header\n");
2363                 return err;
2364         }
2365
2366         return 0;
2367 }
2368
2369 int perf_session__write_header(struct perf_session *session,
2370                                struct perf_evlist *evlist,
2371                                int fd, bool at_exit)
2372 {
2373         struct perf_file_header f_header;
2374         struct perf_file_attr   f_attr;
2375         struct perf_header *header = &session->header;
2376         struct perf_evsel *evsel;
2377         u64 attr_offset;
2378         int err;
2379
2380         lseek(fd, sizeof(f_header), SEEK_SET);
2381
2382         evlist__for_each(session->evlist, evsel) {
2383                 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2384                 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2385                 if (err < 0) {
2386                         pr_debug("failed to write perf header\n");
2387                         return err;
2388                 }
2389         }
2390
2391         attr_offset = lseek(fd, 0, SEEK_CUR);
2392
2393         evlist__for_each(evlist, evsel) {
2394                 f_attr = (struct perf_file_attr){
2395                         .attr = evsel->attr,
2396                         .ids  = {
2397                                 .offset = evsel->id_offset,
2398                                 .size   = evsel->ids * sizeof(u64),
2399                         }
2400                 };
2401                 err = do_write(fd, &f_attr, sizeof(f_attr));
2402                 if (err < 0) {
2403                         pr_debug("failed to write perf header attribute\n");
2404                         return err;
2405                 }
2406         }
2407
2408         if (!header->data_offset)
2409                 header->data_offset = lseek(fd, 0, SEEK_CUR);
2410         header->feat_offset = header->data_offset + header->data_size;
2411
2412         if (at_exit) {
2413                 err = perf_header__adds_write(header, evlist, fd);
2414                 if (err < 0)
2415                         return err;
2416         }
2417
2418         f_header = (struct perf_file_header){
2419                 .magic     = PERF_MAGIC,
2420                 .size      = sizeof(f_header),
2421                 .attr_size = sizeof(f_attr),
2422                 .attrs = {
2423                         .offset = attr_offset,
2424                         .size   = evlist->nr_entries * sizeof(f_attr),
2425                 },
2426                 .data = {
2427                         .offset = header->data_offset,
2428                         .size   = header->data_size,
2429                 },
2430                 /* event_types is ignored, store zeros */
2431         };
2432
2433         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2434
2435         lseek(fd, 0, SEEK_SET);
2436         err = do_write(fd, &f_header, sizeof(f_header));
2437         if (err < 0) {
2438                 pr_debug("failed to write perf header\n");
2439                 return err;
2440         }
2441         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2442
2443         return 0;
2444 }
2445
2446 static int perf_header__getbuffer64(struct perf_header *header,
2447                                     int fd, void *buf, size_t size)
2448 {
2449         if (readn(fd, buf, size) <= 0)
2450                 return -1;
2451
2452         if (header->needs_swap)
2453                 mem_bswap_64(buf, size);
2454
2455         return 0;
2456 }
2457
2458 int perf_header__process_sections(struct perf_header *header, int fd,
2459                                   void *data,
2460                                   int (*process)(struct perf_file_section *section,
2461                                                  struct perf_header *ph,
2462                                                  int feat, int fd, void *data))
2463 {
2464         struct perf_file_section *feat_sec, *sec;
2465         int nr_sections;
2466         int sec_size;
2467         int feat;
2468         int err;
2469
2470         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2471         if (!nr_sections)
2472                 return 0;
2473
2474         feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2475         if (!feat_sec)
2476                 return -1;
2477
2478         sec_size = sizeof(*feat_sec) * nr_sections;
2479
2480         lseek(fd, header->feat_offset, SEEK_SET);
2481
2482         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2483         if (err < 0)
2484                 goto out_free;
2485
2486         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2487                 err = process(sec++, header, feat, fd, data);
2488                 if (err < 0)
2489                         goto out_free;
2490         }
2491         err = 0;
2492 out_free:
2493         free(feat_sec);
2494         return err;
2495 }
2496
2497 static const int attr_file_abi_sizes[] = {
2498         [0] = PERF_ATTR_SIZE_VER0,
2499         [1] = PERF_ATTR_SIZE_VER1,
2500         [2] = PERF_ATTR_SIZE_VER2,
2501         [3] = PERF_ATTR_SIZE_VER3,
2502         [4] = PERF_ATTR_SIZE_VER4,
2503         0,
2504 };
2505
2506 /*
2507  * In the legacy file format, the magic number is not used to encode endianness.
2508  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2509  * on ABI revisions, we need to try all combinations for all endianness to
2510  * detect the endianness.
2511  */
2512 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2513 {
2514         uint64_t ref_size, attr_size;
2515         int i;
2516
2517         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2518                 ref_size = attr_file_abi_sizes[i]
2519                          + sizeof(struct perf_file_section);
2520                 if (hdr_sz != ref_size) {
2521                         attr_size = bswap_64(hdr_sz);
2522                         if (attr_size != ref_size)
2523                                 continue;
2524
2525                         ph->needs_swap = true;
2526                 }
2527                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2528                          i,
2529                          ph->needs_swap);
2530                 return 0;
2531         }
2532         /* could not determine endianness */
2533         return -1;
2534 }
2535
2536 #define PERF_PIPE_HDR_VER0      16
2537
2538 static const size_t attr_pipe_abi_sizes[] = {
2539         [0] = PERF_PIPE_HDR_VER0,
2540         0,
2541 };
2542
2543 /*
2544  * In the legacy pipe format, there is an implicit assumption that endiannesss
2545  * between host recording the samples, and host parsing the samples is the
2546  * same. This is not always the case given that the pipe output may always be
2547  * redirected into a file and analyzed on a different machine with possibly a
2548  * different endianness and perf_event ABI revsions in the perf tool itself.
2549  */
2550 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2551 {
2552         u64 attr_size;
2553         int i;
2554
2555         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2556                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2557                         attr_size = bswap_64(hdr_sz);
2558                         if (attr_size != hdr_sz)
2559                                 continue;
2560
2561                         ph->needs_swap = true;
2562                 }
2563                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2564                 return 0;
2565         }
2566         return -1;
2567 }
2568
2569 bool is_perf_magic(u64 magic)
2570 {
2571         if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2572                 || magic == __perf_magic2
2573                 || magic == __perf_magic2_sw)
2574                 return true;
2575
2576         return false;
2577 }
2578
2579 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2580                               bool is_pipe, struct perf_header *ph)
2581 {
2582         int ret;
2583
2584         /* check for legacy format */
2585         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2586         if (ret == 0) {
2587                 ph->version = PERF_HEADER_VERSION_1;
2588                 pr_debug("legacy perf.data format\n");
2589                 if (is_pipe)
2590                         return try_all_pipe_abis(hdr_sz, ph);
2591
2592                 return try_all_file_abis(hdr_sz, ph);
2593         }
2594         /*
2595          * the new magic number serves two purposes:
2596          * - unique number to identify actual perf.data files
2597          * - encode endianness of file
2598          */
2599         ph->version = PERF_HEADER_VERSION_2;
2600
2601         /* check magic number with one endianness */
2602         if (magic == __perf_magic2)
2603                 return 0;
2604
2605         /* check magic number with opposite endianness */
2606         if (magic != __perf_magic2_sw)
2607                 return -1;
2608
2609         ph->needs_swap = true;
2610
2611         return 0;
2612 }
2613
2614 int perf_file_header__read(struct perf_file_header *header,
2615                            struct perf_header *ph, int fd)
2616 {
2617         ssize_t ret;
2618
2619         lseek(fd, 0, SEEK_SET);
2620
2621         ret = readn(fd, header, sizeof(*header));
2622         if (ret <= 0)
2623                 return -1;
2624
2625         if (check_magic_endian(header->magic,
2626                                header->attr_size, false, ph) < 0) {
2627                 pr_debug("magic/endian check failed\n");
2628                 return -1;
2629         }
2630
2631         if (ph->needs_swap) {
2632                 mem_bswap_64(header, offsetof(struct perf_file_header,
2633                              adds_features));
2634         }
2635
2636         if (header->size != sizeof(*header)) {
2637                 /* Support the previous format */
2638                 if (header->size == offsetof(typeof(*header), adds_features))
2639                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2640                 else
2641                         return -1;
2642         } else if (ph->needs_swap) {
2643                 /*
2644                  * feature bitmap is declared as an array of unsigned longs --
2645                  * not good since its size can differ between the host that
2646                  * generated the data file and the host analyzing the file.
2647                  *
2648                  * We need to handle endianness, but we don't know the size of
2649                  * the unsigned long where the file was generated. Take a best
2650                  * guess at determining it: try 64-bit swap first (ie., file
2651                  * created on a 64-bit host), and check if the hostname feature
2652                  * bit is set (this feature bit is forced on as of fbe96f2).
2653                  * If the bit is not, undo the 64-bit swap and try a 32-bit
2654                  * swap. If the hostname bit is still not set (e.g., older data
2655                  * file), punt and fallback to the original behavior --
2656                  * clearing all feature bits and setting buildid.
2657                  */
2658                 mem_bswap_64(&header->adds_features,
2659                             BITS_TO_U64(HEADER_FEAT_BITS));
2660
2661                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2662                         /* unswap as u64 */
2663                         mem_bswap_64(&header->adds_features,
2664                                     BITS_TO_U64(HEADER_FEAT_BITS));
2665
2666                         /* unswap as u32 */
2667                         mem_bswap_32(&header->adds_features,
2668                                     BITS_TO_U32(HEADER_FEAT_BITS));
2669                 }
2670
2671                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2672                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2673                         set_bit(HEADER_BUILD_ID, header->adds_features);
2674                 }
2675         }
2676
2677         memcpy(&ph->adds_features, &header->adds_features,
2678                sizeof(ph->adds_features));
2679
2680         ph->data_offset  = header->data.offset;
2681         ph->data_size    = header->data.size;
2682         ph->feat_offset  = header->data.offset + header->data.size;
2683         return 0;
2684 }
2685
2686 static int perf_file_section__process(struct perf_file_section *section,
2687                                       struct perf_header *ph,
2688                                       int feat, int fd, void *data)
2689 {
2690         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2691                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2692                           "%d, continuing...\n", section->offset, feat);
2693                 return 0;
2694         }
2695
2696         if (feat >= HEADER_LAST_FEATURE) {
2697                 pr_debug("unknown feature %d, continuing...\n", feat);
2698                 return 0;
2699         }
2700
2701         if (!feat_ops[feat].process)
2702                 return 0;
2703
2704         return feat_ops[feat].process(section, ph, fd, data);
2705 }
2706
2707 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2708                                        struct perf_header *ph, int fd,
2709                                        bool repipe)
2710 {
2711         ssize_t ret;
2712
2713         ret = readn(fd, header, sizeof(*header));
2714         if (ret <= 0)
2715                 return -1;
2716
2717         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2718                 pr_debug("endian/magic failed\n");
2719                 return -1;
2720         }
2721
2722         if (ph->needs_swap)
2723                 header->size = bswap_64(header->size);
2724
2725         if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2726                 return -1;
2727
2728         return 0;
2729 }
2730
2731 static int perf_header__read_pipe(struct perf_session *session)
2732 {
2733         struct perf_header *header = &session->header;
2734         struct perf_pipe_file_header f_header;
2735
2736         if (perf_file_header__read_pipe(&f_header, header,
2737                                         perf_data_file__fd(session->file),
2738                                         session->repipe) < 0) {
2739                 pr_debug("incompatible file format\n");
2740                 return -EINVAL;
2741         }
2742
2743         return 0;
2744 }
2745
2746 static int read_attr(int fd, struct perf_header *ph,
2747                      struct perf_file_attr *f_attr)
2748 {
2749         struct perf_event_attr *attr = &f_attr->attr;
2750         size_t sz, left;
2751         size_t our_sz = sizeof(f_attr->attr);
2752         ssize_t ret;
2753
2754         memset(f_attr, 0, sizeof(*f_attr));
2755
2756         /* read minimal guaranteed structure */
2757         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2758         if (ret <= 0) {
2759                 pr_debug("cannot read %d bytes of header attr\n",
2760                          PERF_ATTR_SIZE_VER0);
2761                 return -1;
2762         }
2763
2764         /* on file perf_event_attr size */
2765         sz = attr->size;
2766
2767         if (ph->needs_swap)
2768                 sz = bswap_32(sz);
2769
2770         if (sz == 0) {
2771                 /* assume ABI0 */
2772                 sz =  PERF_ATTR_SIZE_VER0;
2773         } else if (sz > our_sz) {
2774                 pr_debug("file uses a more recent and unsupported ABI"
2775                          " (%zu bytes extra)\n", sz - our_sz);
2776                 return -1;
2777         }
2778         /* what we have not yet read and that we know about */
2779         left = sz - PERF_ATTR_SIZE_VER0;
2780         if (left) {
2781                 void *ptr = attr;
2782                 ptr += PERF_ATTR_SIZE_VER0;
2783
2784                 ret = readn(fd, ptr, left);
2785         }
2786         /* read perf_file_section, ids are read in caller */
2787         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2788
2789         return ret <= 0 ? -1 : 0;
2790 }
2791
2792 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2793                                                 struct pevent *pevent)
2794 {
2795         struct event_format *event;
2796         char bf[128];
2797
2798         /* already prepared */
2799         if (evsel->tp_format)
2800                 return 0;
2801
2802         if (pevent == NULL) {
2803                 pr_debug("broken or missing trace data\n");
2804                 return -1;
2805         }
2806
2807         event = pevent_find_event(pevent, evsel->attr.config);
2808         if (event == NULL)
2809                 return -1;
2810
2811         if (!evsel->name) {
2812                 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2813                 evsel->name = strdup(bf);
2814                 if (evsel->name == NULL)
2815                         return -1;
2816         }
2817
2818         evsel->tp_format = event;
2819         return 0;
2820 }
2821
2822 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2823                                                   struct pevent *pevent)
2824 {
2825         struct perf_evsel *pos;
2826
2827         evlist__for_each(evlist, pos) {
2828                 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2829                     perf_evsel__prepare_tracepoint_event(pos, pevent))
2830                         return -1;
2831         }
2832
2833         return 0;
2834 }
2835
2836 int perf_session__read_header(struct perf_session *session)
2837 {
2838         struct perf_data_file *file = session->file;
2839         struct perf_header *header = &session->header;
2840         struct perf_file_header f_header;
2841         struct perf_file_attr   f_attr;
2842         u64                     f_id;
2843         int nr_attrs, nr_ids, i, j;
2844         int fd = perf_data_file__fd(file);
2845
2846         session->evlist = perf_evlist__new();
2847         if (session->evlist == NULL)
2848                 return -ENOMEM;
2849
2850         session->evlist->env = &header->env;
2851         session->machines.host.env = &header->env;
2852         if (perf_data_file__is_pipe(file))
2853                 return perf_header__read_pipe(session);
2854
2855         if (perf_file_header__read(&f_header, header, fd) < 0)
2856                 return -EINVAL;
2857
2858         /*
2859          * Sanity check that perf.data was written cleanly; data size is
2860          * initialized to 0 and updated only if the on_exit function is run.
2861          * If data size is still 0 then the file contains only partial
2862          * information.  Just warn user and process it as much as it can.
2863          */
2864         if (f_header.data.size == 0) {
2865                 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2866                            "Was the 'perf record' command properly terminated?\n",
2867                            file->path);
2868         }
2869
2870         nr_attrs = f_header.attrs.size / f_header.attr_size;
2871         lseek(fd, f_header.attrs.offset, SEEK_SET);
2872
2873         for (i = 0; i < nr_attrs; i++) {
2874                 struct perf_evsel *evsel;
2875                 off_t tmp;
2876
2877                 if (read_attr(fd, header, &f_attr) < 0)
2878                         goto out_errno;
2879
2880                 if (header->needs_swap) {
2881                         f_attr.ids.size   = bswap_64(f_attr.ids.size);
2882                         f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2883                         perf_event__attr_swap(&f_attr.attr);
2884                 }
2885
2886                 tmp = lseek(fd, 0, SEEK_CUR);
2887                 evsel = perf_evsel__new(&f_attr.attr);
2888
2889                 if (evsel == NULL)
2890                         goto out_delete_evlist;
2891
2892                 evsel->needs_swap = header->needs_swap;
2893                 /*
2894                  * Do it before so that if perf_evsel__alloc_id fails, this
2895                  * entry gets purged too at perf_evlist__delete().
2896                  */
2897                 perf_evlist__add(session->evlist, evsel);
2898
2899                 nr_ids = f_attr.ids.size / sizeof(u64);
2900                 /*
2901                  * We don't have the cpu and thread maps on the header, so
2902                  * for allocating the perf_sample_id table we fake 1 cpu and
2903                  * hattr->ids threads.
2904                  */
2905                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2906                         goto out_delete_evlist;
2907
2908                 lseek(fd, f_attr.ids.offset, SEEK_SET);
2909
2910                 for (j = 0; j < nr_ids; j++) {
2911                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2912                                 goto out_errno;
2913
2914                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2915                 }
2916
2917                 lseek(fd, tmp, SEEK_SET);
2918         }
2919
2920         symbol_conf.nr_events = nr_attrs;
2921
2922         perf_header__process_sections(header, fd, &session->tevent,
2923                                       perf_file_section__process);
2924
2925         if (perf_evlist__prepare_tracepoint_events(session->evlist,
2926                                                    session->tevent.pevent))
2927                 goto out_delete_evlist;
2928
2929         return 0;
2930 out_errno:
2931         return -errno;
2932
2933 out_delete_evlist:
2934         perf_evlist__delete(session->evlist);
2935         session->evlist = NULL;
2936         return -ENOMEM;
2937 }
2938
2939 int perf_event__synthesize_attr(struct perf_tool *tool,
2940                                 struct perf_event_attr *attr, u32 ids, u64 *id,
2941                                 perf_event__handler_t process)
2942 {
2943         union perf_event *ev;
2944         size_t size;
2945         int err;
2946
2947         size = sizeof(struct perf_event_attr);
2948         size = PERF_ALIGN(size, sizeof(u64));
2949         size += sizeof(struct perf_event_header);
2950         size += ids * sizeof(u64);
2951
2952         ev = malloc(size);
2953
2954         if (ev == NULL)
2955                 return -ENOMEM;
2956
2957         ev->attr.attr = *attr;
2958         memcpy(ev->attr.id, id, ids * sizeof(u64));
2959
2960         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2961         ev->attr.header.size = (u16)size;
2962
2963         if (ev->attr.header.size == size)
2964                 err = process(tool, ev, NULL, NULL);
2965         else
2966                 err = -E2BIG;
2967
2968         free(ev);
2969
2970         return err;
2971 }
2972
2973 static struct event_update_event *
2974 event_update_event__new(size_t size, u64 type, u64 id)
2975 {
2976         struct event_update_event *ev;
2977
2978         size += sizeof(*ev);
2979         size  = PERF_ALIGN(size, sizeof(u64));
2980
2981         ev = zalloc(size);
2982         if (ev) {
2983                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2984                 ev->header.size = (u16)size;
2985                 ev->type = type;
2986                 ev->id = id;
2987         }
2988         return ev;
2989 }
2990
2991 int
2992 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
2993                                          struct perf_evsel *evsel,
2994                                          perf_event__handler_t process)
2995 {
2996         struct event_update_event *ev;
2997         size_t size = strlen(evsel->unit);
2998         int err;
2999
3000         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3001         if (ev == NULL)
3002                 return -ENOMEM;
3003
3004         strncpy(ev->data, evsel->unit, size);
3005         err = process(tool, (union perf_event *)ev, NULL, NULL);
3006         free(ev);
3007         return err;
3008 }
3009
3010 int
3011 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3012                                           struct perf_evsel *evsel,
3013                                           perf_event__handler_t process)
3014 {
3015         struct event_update_event *ev;
3016         struct event_update_event_scale *ev_data;
3017         int err;
3018
3019         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3020         if (ev == NULL)
3021                 return -ENOMEM;
3022
3023         ev_data = (struct event_update_event_scale *) ev->data;
3024         ev_data->scale = evsel->scale;
3025         err = process(tool, (union perf_event*) ev, NULL, NULL);
3026         free(ev);
3027         return err;
3028 }
3029
3030 int
3031 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3032                                          struct perf_evsel *evsel,
3033                                          perf_event__handler_t process)
3034 {
3035         struct event_update_event *ev;
3036         size_t len = strlen(evsel->name);
3037         int err;
3038
3039         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3040         if (ev == NULL)
3041                 return -ENOMEM;
3042
3043         strncpy(ev->data, evsel->name, len);
3044         err = process(tool, (union perf_event*) ev, NULL, NULL);
3045         free(ev);
3046         return err;
3047 }
3048
3049 int
3050 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3051                                         struct perf_evsel *evsel,
3052                                         perf_event__handler_t process)
3053 {
3054         size_t size = sizeof(struct event_update_event);
3055         struct event_update_event *ev;
3056         int max, err;
3057         u16 type;
3058
3059         if (!evsel->own_cpus)
3060                 return 0;
3061
3062         ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3063         if (!ev)
3064                 return -ENOMEM;
3065
3066         ev->header.type = PERF_RECORD_EVENT_UPDATE;
3067         ev->header.size = (u16)size;
3068         ev->type = PERF_EVENT_UPDATE__CPUS;
3069         ev->id   = evsel->id[0];
3070
3071         cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3072                                  evsel->own_cpus,
3073                                  type, max);
3074
3075         err = process(tool, (union perf_event*) ev, NULL, NULL);
3076         free(ev);
3077         return err;
3078 }
3079
3080 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3081 {
3082         struct event_update_event *ev = &event->event_update;
3083         struct event_update_event_scale *ev_scale;
3084         struct event_update_event_cpus *ev_cpus;
3085         struct cpu_map *map;
3086         size_t ret;
3087
3088         ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3089
3090         switch (ev->type) {
3091         case PERF_EVENT_UPDATE__SCALE:
3092                 ev_scale = (struct event_update_event_scale *) ev->data;
3093                 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3094                 break;
3095         case PERF_EVENT_UPDATE__UNIT:
3096                 ret += fprintf(fp, "... unit:  %s\n", ev->data);
3097                 break;
3098         case PERF_EVENT_UPDATE__NAME:
3099                 ret += fprintf(fp, "... name:  %s\n", ev->data);
3100                 break;
3101         case PERF_EVENT_UPDATE__CPUS:
3102                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3103                 ret += fprintf(fp, "... ");
3104
3105                 map = cpu_map__new_data(&ev_cpus->cpus);
3106                 if (map)
3107                         ret += cpu_map__fprintf(map, fp);
3108                 else
3109                         ret += fprintf(fp, "failed to get cpus\n");
3110                 break;
3111         default:
3112                 ret += fprintf(fp, "... unknown type\n");
3113                 break;
3114         }
3115
3116         return ret;
3117 }
3118
3119 int perf_event__synthesize_attrs(struct perf_tool *tool,
3120                                    struct perf_session *session,
3121                                    perf_event__handler_t process)
3122 {
3123         struct perf_evsel *evsel;
3124         int err = 0;
3125
3126         evlist__for_each(session->evlist, evsel) {
3127                 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3128                                                   evsel->id, process);
3129                 if (err) {
3130                         pr_debug("failed to create perf header attribute\n");
3131                         return err;
3132                 }
3133         }
3134
3135         return err;
3136 }
3137
3138 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3139                              union perf_event *event,
3140                              struct perf_evlist **pevlist)
3141 {
3142         u32 i, ids, n_ids;
3143         struct perf_evsel *evsel;
3144         struct perf_evlist *evlist = *pevlist;
3145
3146         if (evlist == NULL) {
3147                 *pevlist = evlist = perf_evlist__new();
3148                 if (evlist == NULL)
3149                         return -ENOMEM;
3150         }
3151
3152         evsel = perf_evsel__new(&event->attr.attr);
3153         if (evsel == NULL)
3154                 return -ENOMEM;
3155
3156         perf_evlist__add(evlist, evsel);
3157
3158         ids = event->header.size;
3159         ids -= (void *)&event->attr.id - (void *)event;
3160         n_ids = ids / sizeof(u64);
3161         /*
3162          * We don't have the cpu and thread maps on the header, so
3163          * for allocating the perf_sample_id table we fake 1 cpu and
3164          * hattr->ids threads.
3165          */
3166         if (perf_evsel__alloc_id(evsel, 1, n_ids))
3167                 return -ENOMEM;
3168
3169         for (i = 0; i < n_ids; i++) {
3170                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3171         }
3172
3173         symbol_conf.nr_events = evlist->nr_entries;
3174
3175         return 0;
3176 }
3177
3178 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3179                                      union perf_event *event,
3180                                      struct perf_evlist **pevlist)
3181 {
3182         struct event_update_event *ev = &event->event_update;
3183         struct event_update_event_scale *ev_scale;
3184         struct event_update_event_cpus *ev_cpus;
3185         struct perf_evlist *evlist;
3186         struct perf_evsel *evsel;
3187         struct cpu_map *map;
3188
3189         if (!pevlist || *pevlist == NULL)
3190                 return -EINVAL;
3191
3192         evlist = *pevlist;
3193
3194         evsel = perf_evlist__id2evsel(evlist, ev->id);
3195         if (evsel == NULL)
3196                 return -EINVAL;
3197
3198         switch (ev->type) {
3199         case PERF_EVENT_UPDATE__UNIT:
3200                 evsel->unit = strdup(ev->data);
3201                 break;
3202         case PERF_EVENT_UPDATE__NAME:
3203                 evsel->name = strdup(ev->data);
3204                 break;
3205         case PERF_EVENT_UPDATE__SCALE:
3206                 ev_scale = (struct event_update_event_scale *) ev->data;
3207                 evsel->scale = ev_scale->scale;
3208         case PERF_EVENT_UPDATE__CPUS:
3209                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3210
3211                 map = cpu_map__new_data(&ev_cpus->cpus);
3212                 if (map)
3213                         evsel->own_cpus = map;
3214                 else
3215                         pr_err("failed to get event_update cpus\n");
3216         default:
3217                 break;
3218         }
3219
3220         return 0;
3221 }
3222
3223 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3224                                         struct perf_evlist *evlist,
3225                                         perf_event__handler_t process)
3226 {
3227         union perf_event ev;
3228         struct tracing_data *tdata;
3229         ssize_t size = 0, aligned_size = 0, padding;
3230         int err __maybe_unused = 0;
3231
3232         /*
3233          * We are going to store the size of the data followed
3234          * by the data contents. Since the fd descriptor is a pipe,
3235          * we cannot seek back to store the size of the data once
3236          * we know it. Instead we:
3237          *
3238          * - write the tracing data to the temp file
3239          * - get/write the data size to pipe
3240          * - write the tracing data from the temp file
3241          *   to the pipe
3242          */
3243         tdata = tracing_data_get(&evlist->entries, fd, true);
3244         if (!tdata)
3245                 return -1;
3246
3247         memset(&ev, 0, sizeof(ev));
3248
3249         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3250         size = tdata->size;
3251         aligned_size = PERF_ALIGN(size, sizeof(u64));
3252         padding = aligned_size - size;
3253         ev.tracing_data.header.size = sizeof(ev.tracing_data);
3254         ev.tracing_data.size = aligned_size;
3255
3256         process(tool, &ev, NULL, NULL);
3257
3258         /*
3259          * The put function will copy all the tracing data
3260          * stored in temp file to the pipe.
3261          */
3262         tracing_data_put(tdata);
3263
3264         write_padded(fd, NULL, 0, padding);
3265
3266         return aligned_size;
3267 }
3268
3269 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3270                                      union perf_event *event,
3271                                      struct perf_session *session)
3272 {
3273         ssize_t size_read, padding, size = event->tracing_data.size;
3274         int fd = perf_data_file__fd(session->file);
3275         off_t offset = lseek(fd, 0, SEEK_CUR);
3276         char buf[BUFSIZ];
3277
3278         /* setup for reading amidst mmap */
3279         lseek(fd, offset + sizeof(struct tracing_data_event),
3280               SEEK_SET);
3281
3282         size_read = trace_report(fd, &session->tevent,
3283                                  session->repipe);
3284         padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3285
3286         if (readn(fd, buf, padding) < 0) {
3287                 pr_err("%s: reading input file", __func__);
3288                 return -1;
3289         }
3290         if (session->repipe) {
3291                 int retw = write(STDOUT_FILENO, buf, padding);
3292                 if (retw <= 0 || retw != padding) {
3293                         pr_err("%s: repiping tracing data padding", __func__);
3294                         return -1;
3295                 }
3296         }
3297
3298         if (size_read + padding != size) {
3299                 pr_err("%s: tracing data size mismatch", __func__);
3300                 return -1;
3301         }
3302
3303         perf_evlist__prepare_tracepoint_events(session->evlist,
3304                                                session->tevent.pevent);
3305
3306         return size_read + padding;
3307 }
3308
3309 int perf_event__synthesize_build_id(struct perf_tool *tool,
3310                                     struct dso *pos, u16 misc,
3311                                     perf_event__handler_t process,
3312                                     struct machine *machine)
3313 {
3314         union perf_event ev;
3315         size_t len;
3316         int err = 0;
3317
3318         if (!pos->hit)
3319                 return err;
3320
3321         memset(&ev, 0, sizeof(ev));
3322
3323         len = pos->long_name_len + 1;
3324         len = PERF_ALIGN(len, NAME_ALIGN);
3325         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3326         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3327         ev.build_id.header.misc = misc;
3328         ev.build_id.pid = machine->pid;
3329         ev.build_id.header.size = sizeof(ev.build_id) + len;
3330         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3331
3332         err = process(tool, &ev, NULL, machine);
3333
3334         return err;
3335 }
3336
3337 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3338                                  union perf_event *event,
3339                                  struct perf_session *session)
3340 {
3341         __event_process_build_id(&event->build_id,
3342                                  event->build_id.filename,
3343                                  session);
3344         return 0;
3345 }