]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/header.c
Merge tag 'platform-drivers-x86-v4.12-2' of git://git.infradead.org/linux-platform...
[karo-tx-linux.git] / tools / perf / util / header.c
1 #include <errno.h>
2 #include <inttypes.h>
3 #include "util.h"
4 #include "string2.h"
5 #include <sys/param.h>
6 #include <sys/types.h>
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <linux/list.h>
12 #include <linux/kernel.h>
13 #include <linux/bitops.h>
14 #include <sys/stat.h>
15 #include <sys/types.h>
16 #include <sys/utsname.h>
17 #include <unistd.h>
18
19 #include "evlist.h"
20 #include "evsel.h"
21 #include "header.h"
22 #include "memswap.h"
23 #include "../perf.h"
24 #include "trace-event.h"
25 #include "session.h"
26 #include "symbol.h"
27 #include "debug.h"
28 #include "cpumap.h"
29 #include "pmu.h"
30 #include "vdso.h"
31 #include "strbuf.h"
32 #include "build-id.h"
33 #include "data.h"
34 #include <api/fs/fs.h>
35 #include "asm/bug.h"
36
37 #include "sane_ctype.h"
38
39 /*
40  * magic2 = "PERFILE2"
41  * must be a numerical value to let the endianness
42  * determine the memory layout. That way we are able
43  * to detect endianness when reading the perf.data file
44  * back.
45  *
46  * we check for legacy (PERFFILE) format.
47  */
48 static const char *__perf_magic1 = "PERFFILE";
49 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
50 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
51
52 #define PERF_MAGIC      __perf_magic2
53
54 const char perf_version_string[] = PERF_VERSION;
55
56 struct perf_file_attr {
57         struct perf_event_attr  attr;
58         struct perf_file_section        ids;
59 };
60
61 void perf_header__set_feat(struct perf_header *header, int feat)
62 {
63         set_bit(feat, header->adds_features);
64 }
65
66 void perf_header__clear_feat(struct perf_header *header, int feat)
67 {
68         clear_bit(feat, header->adds_features);
69 }
70
71 bool perf_header__has_feat(const struct perf_header *header, int feat)
72 {
73         return test_bit(feat, header->adds_features);
74 }
75
76 static int do_write(int fd, const void *buf, size_t size)
77 {
78         while (size) {
79                 int ret = write(fd, buf, size);
80
81                 if (ret < 0)
82                         return -errno;
83
84                 size -= ret;
85                 buf += ret;
86         }
87
88         return 0;
89 }
90
91 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
92 {
93         static const char zero_buf[NAME_ALIGN];
94         int err = do_write(fd, bf, count);
95
96         if (!err)
97                 err = do_write(fd, zero_buf, count_aligned - count);
98
99         return err;
100 }
101
102 #define string_size(str)                                                \
103         (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
104
105 static int do_write_string(int fd, const char *str)
106 {
107         u32 len, olen;
108         int ret;
109
110         olen = strlen(str) + 1;
111         len = PERF_ALIGN(olen, NAME_ALIGN);
112
113         /* write len, incl. \0 */
114         ret = do_write(fd, &len, sizeof(len));
115         if (ret < 0)
116                 return ret;
117
118         return write_padded(fd, str, olen, len);
119 }
120
121 static char *do_read_string(int fd, struct perf_header *ph)
122 {
123         ssize_t sz, ret;
124         u32 len;
125         char *buf;
126
127         sz = readn(fd, &len, sizeof(len));
128         if (sz < (ssize_t)sizeof(len))
129                 return NULL;
130
131         if (ph->needs_swap)
132                 len = bswap_32(len);
133
134         buf = malloc(len);
135         if (!buf)
136                 return NULL;
137
138         ret = readn(fd, buf, len);
139         if (ret == (ssize_t)len) {
140                 /*
141                  * strings are padded by zeroes
142                  * thus the actual strlen of buf
143                  * may be less than len
144                  */
145                 return buf;
146         }
147
148         free(buf);
149         return NULL;
150 }
151
152 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
153                             struct perf_evlist *evlist)
154 {
155         return read_tracing_data(fd, &evlist->entries);
156 }
157
158
159 static int write_build_id(int fd, struct perf_header *h,
160                           struct perf_evlist *evlist __maybe_unused)
161 {
162         struct perf_session *session;
163         int err;
164
165         session = container_of(h, struct perf_session, header);
166
167         if (!perf_session__read_build_ids(session, true))
168                 return -1;
169
170         err = perf_session__write_buildid_table(session, fd);
171         if (err < 0) {
172                 pr_debug("failed to write buildid table\n");
173                 return err;
174         }
175         perf_session__cache_build_ids(session);
176
177         return 0;
178 }
179
180 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
181                           struct perf_evlist *evlist __maybe_unused)
182 {
183         struct utsname uts;
184         int ret;
185
186         ret = uname(&uts);
187         if (ret < 0)
188                 return -1;
189
190         return do_write_string(fd, uts.nodename);
191 }
192
193 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
194                            struct perf_evlist *evlist __maybe_unused)
195 {
196         struct utsname uts;
197         int ret;
198
199         ret = uname(&uts);
200         if (ret < 0)
201                 return -1;
202
203         return do_write_string(fd, uts.release);
204 }
205
206 static int write_arch(int fd, struct perf_header *h __maybe_unused,
207                       struct perf_evlist *evlist __maybe_unused)
208 {
209         struct utsname uts;
210         int ret;
211
212         ret = uname(&uts);
213         if (ret < 0)
214                 return -1;
215
216         return do_write_string(fd, uts.machine);
217 }
218
219 static int write_version(int fd, struct perf_header *h __maybe_unused,
220                          struct perf_evlist *evlist __maybe_unused)
221 {
222         return do_write_string(fd, perf_version_string);
223 }
224
225 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
226 {
227         FILE *file;
228         char *buf = NULL;
229         char *s, *p;
230         const char *search = cpuinfo_proc;
231         size_t len = 0;
232         int ret = -1;
233
234         if (!search)
235                 return -1;
236
237         file = fopen("/proc/cpuinfo", "r");
238         if (!file)
239                 return -1;
240
241         while (getline(&buf, &len, file) > 0) {
242                 ret = strncmp(buf, search, strlen(search));
243                 if (!ret)
244                         break;
245         }
246
247         if (ret) {
248                 ret = -1;
249                 goto done;
250         }
251
252         s = buf;
253
254         p = strchr(buf, ':');
255         if (p && *(p+1) == ' ' && *(p+2))
256                 s = p + 2;
257         p = strchr(s, '\n');
258         if (p)
259                 *p = '\0';
260
261         /* squash extra space characters (branding string) */
262         p = s;
263         while (*p) {
264                 if (isspace(*p)) {
265                         char *r = p + 1;
266                         char *q = r;
267                         *p = ' ';
268                         while (*q && isspace(*q))
269                                 q++;
270                         if (q != (p+1))
271                                 while ((*r++ = *q++));
272                 }
273                 p++;
274         }
275         ret = do_write_string(fd, s);
276 done:
277         free(buf);
278         fclose(file);
279         return ret;
280 }
281
282 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
283                        struct perf_evlist *evlist __maybe_unused)
284 {
285 #ifndef CPUINFO_PROC
286 #define CPUINFO_PROC {"model name", }
287 #endif
288         const char *cpuinfo_procs[] = CPUINFO_PROC;
289         unsigned int i;
290
291         for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
292                 int ret;
293                 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
294                 if (ret >= 0)
295                         return ret;
296         }
297         return -1;
298 }
299
300
301 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
302                         struct perf_evlist *evlist __maybe_unused)
303 {
304         long nr;
305         u32 nrc, nra;
306         int ret;
307
308         nrc = cpu__max_present_cpu();
309
310         nr = sysconf(_SC_NPROCESSORS_ONLN);
311         if (nr < 0)
312                 return -1;
313
314         nra = (u32)(nr & UINT_MAX);
315
316         ret = do_write(fd, &nrc, sizeof(nrc));
317         if (ret < 0)
318                 return ret;
319
320         return do_write(fd, &nra, sizeof(nra));
321 }
322
323 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
324                             struct perf_evlist *evlist)
325 {
326         struct perf_evsel *evsel;
327         u32 nre, nri, sz;
328         int ret;
329
330         nre = evlist->nr_entries;
331
332         /*
333          * write number of events
334          */
335         ret = do_write(fd, &nre, sizeof(nre));
336         if (ret < 0)
337                 return ret;
338
339         /*
340          * size of perf_event_attr struct
341          */
342         sz = (u32)sizeof(evsel->attr);
343         ret = do_write(fd, &sz, sizeof(sz));
344         if (ret < 0)
345                 return ret;
346
347         evlist__for_each_entry(evlist, evsel) {
348                 ret = do_write(fd, &evsel->attr, sz);
349                 if (ret < 0)
350                         return ret;
351                 /*
352                  * write number of unique id per event
353                  * there is one id per instance of an event
354                  *
355                  * copy into an nri to be independent of the
356                  * type of ids,
357                  */
358                 nri = evsel->ids;
359                 ret = do_write(fd, &nri, sizeof(nri));
360                 if (ret < 0)
361                         return ret;
362
363                 /*
364                  * write event string as passed on cmdline
365                  */
366                 ret = do_write_string(fd, perf_evsel__name(evsel));
367                 if (ret < 0)
368                         return ret;
369                 /*
370                  * write unique ids for this event
371                  */
372                 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
373                 if (ret < 0)
374                         return ret;
375         }
376         return 0;
377 }
378
379 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
380                          struct perf_evlist *evlist __maybe_unused)
381 {
382         char buf[MAXPATHLEN];
383         u32 n;
384         int i, ret;
385
386         /* actual path to perf binary */
387         ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
388         if (ret <= 0)
389                 return -1;
390
391         /* readlink() does not add null termination */
392         buf[ret] = '\0';
393
394         /* account for binary path */
395         n = perf_env.nr_cmdline + 1;
396
397         ret = do_write(fd, &n, sizeof(n));
398         if (ret < 0)
399                 return ret;
400
401         ret = do_write_string(fd, buf);
402         if (ret < 0)
403                 return ret;
404
405         for (i = 0 ; i < perf_env.nr_cmdline; i++) {
406                 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
407                 if (ret < 0)
408                         return ret;
409         }
410         return 0;
411 }
412
413 #define CORE_SIB_FMT \
414         "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
415 #define THRD_SIB_FMT \
416         "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
417
418 struct cpu_topo {
419         u32 cpu_nr;
420         u32 core_sib;
421         u32 thread_sib;
422         char **core_siblings;
423         char **thread_siblings;
424 };
425
426 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
427 {
428         FILE *fp;
429         char filename[MAXPATHLEN];
430         char *buf = NULL, *p;
431         size_t len = 0;
432         ssize_t sret;
433         u32 i = 0;
434         int ret = -1;
435
436         sprintf(filename, CORE_SIB_FMT, cpu);
437         fp = fopen(filename, "r");
438         if (!fp)
439                 goto try_threads;
440
441         sret = getline(&buf, &len, fp);
442         fclose(fp);
443         if (sret <= 0)
444                 goto try_threads;
445
446         p = strchr(buf, '\n');
447         if (p)
448                 *p = '\0';
449
450         for (i = 0; i < tp->core_sib; i++) {
451                 if (!strcmp(buf, tp->core_siblings[i]))
452                         break;
453         }
454         if (i == tp->core_sib) {
455                 tp->core_siblings[i] = buf;
456                 tp->core_sib++;
457                 buf = NULL;
458                 len = 0;
459         }
460         ret = 0;
461
462 try_threads:
463         sprintf(filename, THRD_SIB_FMT, cpu);
464         fp = fopen(filename, "r");
465         if (!fp)
466                 goto done;
467
468         if (getline(&buf, &len, fp) <= 0)
469                 goto done;
470
471         p = strchr(buf, '\n');
472         if (p)
473                 *p = '\0';
474
475         for (i = 0; i < tp->thread_sib; i++) {
476                 if (!strcmp(buf, tp->thread_siblings[i]))
477                         break;
478         }
479         if (i == tp->thread_sib) {
480                 tp->thread_siblings[i] = buf;
481                 tp->thread_sib++;
482                 buf = NULL;
483         }
484         ret = 0;
485 done:
486         if(fp)
487                 fclose(fp);
488         free(buf);
489         return ret;
490 }
491
492 static void free_cpu_topo(struct cpu_topo *tp)
493 {
494         u32 i;
495
496         if (!tp)
497                 return;
498
499         for (i = 0 ; i < tp->core_sib; i++)
500                 zfree(&tp->core_siblings[i]);
501
502         for (i = 0 ; i < tp->thread_sib; i++)
503                 zfree(&tp->thread_siblings[i]);
504
505         free(tp);
506 }
507
508 static struct cpu_topo *build_cpu_topology(void)
509 {
510         struct cpu_topo *tp = NULL;
511         void *addr;
512         u32 nr, i;
513         size_t sz;
514         long ncpus;
515         int ret = -1;
516         struct cpu_map *map;
517
518         ncpus = cpu__max_present_cpu();
519
520         /* build online CPU map */
521         map = cpu_map__new(NULL);
522         if (map == NULL) {
523                 pr_debug("failed to get system cpumap\n");
524                 return NULL;
525         }
526
527         nr = (u32)(ncpus & UINT_MAX);
528
529         sz = nr * sizeof(char *);
530         addr = calloc(1, sizeof(*tp) + 2 * sz);
531         if (!addr)
532                 goto out_free;
533
534         tp = addr;
535         tp->cpu_nr = nr;
536         addr += sizeof(*tp);
537         tp->core_siblings = addr;
538         addr += sz;
539         tp->thread_siblings = addr;
540
541         for (i = 0; i < nr; i++) {
542                 if (!cpu_map__has(map, i))
543                         continue;
544
545                 ret = build_cpu_topo(tp, i);
546                 if (ret < 0)
547                         break;
548         }
549
550 out_free:
551         cpu_map__put(map);
552         if (ret) {
553                 free_cpu_topo(tp);
554                 tp = NULL;
555         }
556         return tp;
557 }
558
559 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
560                           struct perf_evlist *evlist __maybe_unused)
561 {
562         struct cpu_topo *tp;
563         u32 i;
564         int ret, j;
565
566         tp = build_cpu_topology();
567         if (!tp)
568                 return -1;
569
570         ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
571         if (ret < 0)
572                 goto done;
573
574         for (i = 0; i < tp->core_sib; i++) {
575                 ret = do_write_string(fd, tp->core_siblings[i]);
576                 if (ret < 0)
577                         goto done;
578         }
579         ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
580         if (ret < 0)
581                 goto done;
582
583         for (i = 0; i < tp->thread_sib; i++) {
584                 ret = do_write_string(fd, tp->thread_siblings[i]);
585                 if (ret < 0)
586                         break;
587         }
588
589         ret = perf_env__read_cpu_topology_map(&perf_env);
590         if (ret < 0)
591                 goto done;
592
593         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
594                 ret = do_write(fd, &perf_env.cpu[j].core_id,
595                                sizeof(perf_env.cpu[j].core_id));
596                 if (ret < 0)
597                         return ret;
598                 ret = do_write(fd, &perf_env.cpu[j].socket_id,
599                                sizeof(perf_env.cpu[j].socket_id));
600                 if (ret < 0)
601                         return ret;
602         }
603 done:
604         free_cpu_topo(tp);
605         return ret;
606 }
607
608
609
610 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
611                           struct perf_evlist *evlist __maybe_unused)
612 {
613         char *buf = NULL;
614         FILE *fp;
615         size_t len = 0;
616         int ret = -1, n;
617         uint64_t mem;
618
619         fp = fopen("/proc/meminfo", "r");
620         if (!fp)
621                 return -1;
622
623         while (getline(&buf, &len, fp) > 0) {
624                 ret = strncmp(buf, "MemTotal:", 9);
625                 if (!ret)
626                         break;
627         }
628         if (!ret) {
629                 n = sscanf(buf, "%*s %"PRIu64, &mem);
630                 if (n == 1)
631                         ret = do_write(fd, &mem, sizeof(mem));
632         } else
633                 ret = -1;
634         free(buf);
635         fclose(fp);
636         return ret;
637 }
638
639 static int write_topo_node(int fd, int node)
640 {
641         char str[MAXPATHLEN];
642         char field[32];
643         char *buf = NULL, *p;
644         size_t len = 0;
645         FILE *fp;
646         u64 mem_total, mem_free, mem;
647         int ret = -1;
648
649         sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
650         fp = fopen(str, "r");
651         if (!fp)
652                 return -1;
653
654         while (getline(&buf, &len, fp) > 0) {
655                 /* skip over invalid lines */
656                 if (!strchr(buf, ':'))
657                         continue;
658                 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
659                         goto done;
660                 if (!strcmp(field, "MemTotal:"))
661                         mem_total = mem;
662                 if (!strcmp(field, "MemFree:"))
663                         mem_free = mem;
664         }
665
666         fclose(fp);
667         fp = NULL;
668
669         ret = do_write(fd, &mem_total, sizeof(u64));
670         if (ret)
671                 goto done;
672
673         ret = do_write(fd, &mem_free, sizeof(u64));
674         if (ret)
675                 goto done;
676
677         ret = -1;
678         sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
679
680         fp = fopen(str, "r");
681         if (!fp)
682                 goto done;
683
684         if (getline(&buf, &len, fp) <= 0)
685                 goto done;
686
687         p = strchr(buf, '\n');
688         if (p)
689                 *p = '\0';
690
691         ret = do_write_string(fd, buf);
692 done:
693         free(buf);
694         if (fp)
695                 fclose(fp);
696         return ret;
697 }
698
699 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
700                           struct perf_evlist *evlist __maybe_unused)
701 {
702         char *buf = NULL;
703         size_t len = 0;
704         FILE *fp;
705         struct cpu_map *node_map = NULL;
706         char *c;
707         u32 nr, i, j;
708         int ret = -1;
709
710         fp = fopen("/sys/devices/system/node/online", "r");
711         if (!fp)
712                 return -1;
713
714         if (getline(&buf, &len, fp) <= 0)
715                 goto done;
716
717         c = strchr(buf, '\n');
718         if (c)
719                 *c = '\0';
720
721         node_map = cpu_map__new(buf);
722         if (!node_map)
723                 goto done;
724
725         nr = (u32)node_map->nr;
726
727         ret = do_write(fd, &nr, sizeof(nr));
728         if (ret < 0)
729                 goto done;
730
731         for (i = 0; i < nr; i++) {
732                 j = (u32)node_map->map[i];
733                 ret = do_write(fd, &j, sizeof(j));
734                 if (ret < 0)
735                         break;
736
737                 ret = write_topo_node(fd, i);
738                 if (ret < 0)
739                         break;
740         }
741 done:
742         free(buf);
743         fclose(fp);
744         cpu_map__put(node_map);
745         return ret;
746 }
747
748 /*
749  * File format:
750  *
751  * struct pmu_mappings {
752  *      u32     pmu_num;
753  *      struct pmu_map {
754  *              u32     type;
755  *              char    name[];
756  *      }[pmu_num];
757  * };
758  */
759
760 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
761                               struct perf_evlist *evlist __maybe_unused)
762 {
763         struct perf_pmu *pmu = NULL;
764         off_t offset = lseek(fd, 0, SEEK_CUR);
765         __u32 pmu_num = 0;
766         int ret;
767
768         /* write real pmu_num later */
769         ret = do_write(fd, &pmu_num, sizeof(pmu_num));
770         if (ret < 0)
771                 return ret;
772
773         while ((pmu = perf_pmu__scan(pmu))) {
774                 if (!pmu->name)
775                         continue;
776                 pmu_num++;
777
778                 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
779                 if (ret < 0)
780                         return ret;
781
782                 ret = do_write_string(fd, pmu->name);
783                 if (ret < 0)
784                         return ret;
785         }
786
787         if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
788                 /* discard all */
789                 lseek(fd, offset, SEEK_SET);
790                 return -1;
791         }
792
793         return 0;
794 }
795
796 /*
797  * File format:
798  *
799  * struct group_descs {
800  *      u32     nr_groups;
801  *      struct group_desc {
802  *              char    name[];
803  *              u32     leader_idx;
804  *              u32     nr_members;
805  *      }[nr_groups];
806  * };
807  */
808 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
809                             struct perf_evlist *evlist)
810 {
811         u32 nr_groups = evlist->nr_groups;
812         struct perf_evsel *evsel;
813         int ret;
814
815         ret = do_write(fd, &nr_groups, sizeof(nr_groups));
816         if (ret < 0)
817                 return ret;
818
819         evlist__for_each_entry(evlist, evsel) {
820                 if (perf_evsel__is_group_leader(evsel) &&
821                     evsel->nr_members > 1) {
822                         const char *name = evsel->group_name ?: "{anon_group}";
823                         u32 leader_idx = evsel->idx;
824                         u32 nr_members = evsel->nr_members;
825
826                         ret = do_write_string(fd, name);
827                         if (ret < 0)
828                                 return ret;
829
830                         ret = do_write(fd, &leader_idx, sizeof(leader_idx));
831                         if (ret < 0)
832                                 return ret;
833
834                         ret = do_write(fd, &nr_members, sizeof(nr_members));
835                         if (ret < 0)
836                                 return ret;
837                 }
838         }
839         return 0;
840 }
841
842 /*
843  * default get_cpuid(): nothing gets recorded
844  * actual implementation must be in arch/$(ARCH)/util/header.c
845  */
846 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
847 {
848         return -1;
849 }
850
851 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
852                        struct perf_evlist *evlist __maybe_unused)
853 {
854         char buffer[64];
855         int ret;
856
857         ret = get_cpuid(buffer, sizeof(buffer));
858         if (!ret)
859                 goto write_it;
860
861         return -1;
862 write_it:
863         return do_write_string(fd, buffer);
864 }
865
866 static int write_branch_stack(int fd __maybe_unused,
867                               struct perf_header *h __maybe_unused,
868                        struct perf_evlist *evlist __maybe_unused)
869 {
870         return 0;
871 }
872
873 static int write_auxtrace(int fd, struct perf_header *h,
874                           struct perf_evlist *evlist __maybe_unused)
875 {
876         struct perf_session *session;
877         int err;
878
879         session = container_of(h, struct perf_session, header);
880
881         err = auxtrace_index__write(fd, &session->auxtrace_index);
882         if (err < 0)
883                 pr_err("Failed to write auxtrace index\n");
884         return err;
885 }
886
887 static int cpu_cache_level__sort(const void *a, const void *b)
888 {
889         struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
890         struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
891
892         return cache_a->level - cache_b->level;
893 }
894
895 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
896 {
897         if (a->level != b->level)
898                 return false;
899
900         if (a->line_size != b->line_size)
901                 return false;
902
903         if (a->sets != b->sets)
904                 return false;
905
906         if (a->ways != b->ways)
907                 return false;
908
909         if (strcmp(a->type, b->type))
910                 return false;
911
912         if (strcmp(a->size, b->size))
913                 return false;
914
915         if (strcmp(a->map, b->map))
916                 return false;
917
918         return true;
919 }
920
921 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
922 {
923         char path[PATH_MAX], file[PATH_MAX];
924         struct stat st;
925         size_t len;
926
927         scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
928         scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
929
930         if (stat(file, &st))
931                 return 1;
932
933         scnprintf(file, PATH_MAX, "%s/level", path);
934         if (sysfs__read_int(file, (int *) &cache->level))
935                 return -1;
936
937         scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
938         if (sysfs__read_int(file, (int *) &cache->line_size))
939                 return -1;
940
941         scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
942         if (sysfs__read_int(file, (int *) &cache->sets))
943                 return -1;
944
945         scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
946         if (sysfs__read_int(file, (int *) &cache->ways))
947                 return -1;
948
949         scnprintf(file, PATH_MAX, "%s/type", path);
950         if (sysfs__read_str(file, &cache->type, &len))
951                 return -1;
952
953         cache->type[len] = 0;
954         cache->type = rtrim(cache->type);
955
956         scnprintf(file, PATH_MAX, "%s/size", path);
957         if (sysfs__read_str(file, &cache->size, &len)) {
958                 free(cache->type);
959                 return -1;
960         }
961
962         cache->size[len] = 0;
963         cache->size = rtrim(cache->size);
964
965         scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
966         if (sysfs__read_str(file, &cache->map, &len)) {
967                 free(cache->map);
968                 free(cache->type);
969                 return -1;
970         }
971
972         cache->map[len] = 0;
973         cache->map = rtrim(cache->map);
974         return 0;
975 }
976
977 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
978 {
979         fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
980 }
981
982 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
983 {
984         u32 i, cnt = 0;
985         long ncpus;
986         u32 nr, cpu;
987         u16 level;
988
989         ncpus = sysconf(_SC_NPROCESSORS_CONF);
990         if (ncpus < 0)
991                 return -1;
992
993         nr = (u32)(ncpus & UINT_MAX);
994
995         for (cpu = 0; cpu < nr; cpu++) {
996                 for (level = 0; level < 10; level++) {
997                         struct cpu_cache_level c;
998                         int err;
999
1000                         err = cpu_cache_level__read(&c, cpu, level);
1001                         if (err < 0)
1002                                 return err;
1003
1004                         if (err == 1)
1005                                 break;
1006
1007                         for (i = 0; i < cnt; i++) {
1008                                 if (cpu_cache_level__cmp(&c, &caches[i]))
1009                                         break;
1010                         }
1011
1012                         if (i == cnt)
1013                                 caches[cnt++] = c;
1014                         else
1015                                 cpu_cache_level__free(&c);
1016
1017                         if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1018                                 goto out;
1019                 }
1020         }
1021  out:
1022         *cntp = cnt;
1023         return 0;
1024 }
1025
1026 #define MAX_CACHES 2000
1027
1028 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1029                           struct perf_evlist *evlist __maybe_unused)
1030 {
1031         struct cpu_cache_level caches[MAX_CACHES];
1032         u32 cnt = 0, i, version = 1;
1033         int ret;
1034
1035         ret = build_caches(caches, MAX_CACHES, &cnt);
1036         if (ret)
1037                 goto out;
1038
1039         qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1040
1041         ret = do_write(fd, &version, sizeof(u32));
1042         if (ret < 0)
1043                 goto out;
1044
1045         ret = do_write(fd, &cnt, sizeof(u32));
1046         if (ret < 0)
1047                 goto out;
1048
1049         for (i = 0; i < cnt; i++) {
1050                 struct cpu_cache_level *c = &caches[i];
1051
1052                 #define _W(v)                                   \
1053                         ret = do_write(fd, &c->v, sizeof(u32)); \
1054                         if (ret < 0)                            \
1055                                 goto out;
1056
1057                 _W(level)
1058                 _W(line_size)
1059                 _W(sets)
1060                 _W(ways)
1061                 #undef _W
1062
1063                 #define _W(v)                                           \
1064                         ret = do_write_string(fd, (const char *) c->v); \
1065                         if (ret < 0)                                    \
1066                                 goto out;
1067
1068                 _W(type)
1069                 _W(size)
1070                 _W(map)
1071                 #undef _W
1072         }
1073
1074 out:
1075         for (i = 0; i < cnt; i++)
1076                 cpu_cache_level__free(&caches[i]);
1077         return ret;
1078 }
1079
1080 static int write_stat(int fd __maybe_unused,
1081                       struct perf_header *h __maybe_unused,
1082                       struct perf_evlist *evlist __maybe_unused)
1083 {
1084         return 0;
1085 }
1086
1087 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1088                            FILE *fp)
1089 {
1090         fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1091 }
1092
1093 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1094                             FILE *fp)
1095 {
1096         fprintf(fp, "# os release : %s\n", ph->env.os_release);
1097 }
1098
1099 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1100 {
1101         fprintf(fp, "# arch : %s\n", ph->env.arch);
1102 }
1103
1104 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1105                           FILE *fp)
1106 {
1107         fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1108 }
1109
1110 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1111                          FILE *fp)
1112 {
1113         fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1114         fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1115 }
1116
1117 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1118                           FILE *fp)
1119 {
1120         fprintf(fp, "# perf version : %s\n", ph->env.version);
1121 }
1122
1123 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1124                           FILE *fp)
1125 {
1126         int nr, i;
1127
1128         nr = ph->env.nr_cmdline;
1129
1130         fprintf(fp, "# cmdline : ");
1131
1132         for (i = 0; i < nr; i++)
1133                 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1134         fputc('\n', fp);
1135 }
1136
1137 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1138                                FILE *fp)
1139 {
1140         int nr, i;
1141         char *str;
1142         int cpu_nr = ph->env.nr_cpus_avail;
1143
1144         nr = ph->env.nr_sibling_cores;
1145         str = ph->env.sibling_cores;
1146
1147         for (i = 0; i < nr; i++) {
1148                 fprintf(fp, "# sibling cores   : %s\n", str);
1149                 str += strlen(str) + 1;
1150         }
1151
1152         nr = ph->env.nr_sibling_threads;
1153         str = ph->env.sibling_threads;
1154
1155         for (i = 0; i < nr; i++) {
1156                 fprintf(fp, "# sibling threads : %s\n", str);
1157                 str += strlen(str) + 1;
1158         }
1159
1160         if (ph->env.cpu != NULL) {
1161                 for (i = 0; i < cpu_nr; i++)
1162                         fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1163                                 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1164         } else
1165                 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1166 }
1167
1168 static void free_event_desc(struct perf_evsel *events)
1169 {
1170         struct perf_evsel *evsel;
1171
1172         if (!events)
1173                 return;
1174
1175         for (evsel = events; evsel->attr.size; evsel++) {
1176                 zfree(&evsel->name);
1177                 zfree(&evsel->id);
1178         }
1179
1180         free(events);
1181 }
1182
1183 static struct perf_evsel *
1184 read_event_desc(struct perf_header *ph, int fd)
1185 {
1186         struct perf_evsel *evsel, *events = NULL;
1187         u64 *id;
1188         void *buf = NULL;
1189         u32 nre, sz, nr, i, j;
1190         ssize_t ret;
1191         size_t msz;
1192
1193         /* number of events */
1194         ret = readn(fd, &nre, sizeof(nre));
1195         if (ret != (ssize_t)sizeof(nre))
1196                 goto error;
1197
1198         if (ph->needs_swap)
1199                 nre = bswap_32(nre);
1200
1201         ret = readn(fd, &sz, sizeof(sz));
1202         if (ret != (ssize_t)sizeof(sz))
1203                 goto error;
1204
1205         if (ph->needs_swap)
1206                 sz = bswap_32(sz);
1207
1208         /* buffer to hold on file attr struct */
1209         buf = malloc(sz);
1210         if (!buf)
1211                 goto error;
1212
1213         /* the last event terminates with evsel->attr.size == 0: */
1214         events = calloc(nre + 1, sizeof(*events));
1215         if (!events)
1216                 goto error;
1217
1218         msz = sizeof(evsel->attr);
1219         if (sz < msz)
1220                 msz = sz;
1221
1222         for (i = 0, evsel = events; i < nre; evsel++, i++) {
1223                 evsel->idx = i;
1224
1225                 /*
1226                  * must read entire on-file attr struct to
1227                  * sync up with layout.
1228                  */
1229                 ret = readn(fd, buf, sz);
1230                 if (ret != (ssize_t)sz)
1231                         goto error;
1232
1233                 if (ph->needs_swap)
1234                         perf_event__attr_swap(buf);
1235
1236                 memcpy(&evsel->attr, buf, msz);
1237
1238                 ret = readn(fd, &nr, sizeof(nr));
1239                 if (ret != (ssize_t)sizeof(nr))
1240                         goto error;
1241
1242                 if (ph->needs_swap) {
1243                         nr = bswap_32(nr);
1244                         evsel->needs_swap = true;
1245                 }
1246
1247                 evsel->name = do_read_string(fd, ph);
1248
1249                 if (!nr)
1250                         continue;
1251
1252                 id = calloc(nr, sizeof(*id));
1253                 if (!id)
1254                         goto error;
1255                 evsel->ids = nr;
1256                 evsel->id = id;
1257
1258                 for (j = 0 ; j < nr; j++) {
1259                         ret = readn(fd, id, sizeof(*id));
1260                         if (ret != (ssize_t)sizeof(*id))
1261                                 goto error;
1262                         if (ph->needs_swap)
1263                                 *id = bswap_64(*id);
1264                         id++;
1265                 }
1266         }
1267 out:
1268         free(buf);
1269         return events;
1270 error:
1271         free_event_desc(events);
1272         events = NULL;
1273         goto out;
1274 }
1275
1276 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1277                                 void *priv __attribute__((unused)))
1278 {
1279         return fprintf(fp, ", %s = %s", name, val);
1280 }
1281
1282 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1283 {
1284         struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1285         u32 j;
1286         u64 *id;
1287
1288         if (!events) {
1289                 fprintf(fp, "# event desc: not available or unable to read\n");
1290                 return;
1291         }
1292
1293         for (evsel = events; evsel->attr.size; evsel++) {
1294                 fprintf(fp, "# event : name = %s, ", evsel->name);
1295
1296                 if (evsel->ids) {
1297                         fprintf(fp, ", id = {");
1298                         for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1299                                 if (j)
1300                                         fputc(',', fp);
1301                                 fprintf(fp, " %"PRIu64, *id);
1302                         }
1303                         fprintf(fp, " }");
1304                 }
1305
1306                 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1307
1308                 fputc('\n', fp);
1309         }
1310
1311         free_event_desc(events);
1312 }
1313
1314 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1315                             FILE *fp)
1316 {
1317         fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1318 }
1319
1320 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1321                                 FILE *fp)
1322 {
1323         int i;
1324         struct numa_node *n;
1325
1326         for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1327                 n = &ph->env.numa_nodes[i];
1328
1329                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1330                             " free = %"PRIu64" kB\n",
1331                         n->node, n->mem_total, n->mem_free);
1332
1333                 fprintf(fp, "# node%u cpu list : ", n->node);
1334                 cpu_map__fprintf(n->map, fp);
1335         }
1336 }
1337
1338 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1339 {
1340         fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1341 }
1342
1343 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1344                                int fd __maybe_unused, FILE *fp)
1345 {
1346         fprintf(fp, "# contains samples with branch stack\n");
1347 }
1348
1349 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1350                            int fd __maybe_unused, FILE *fp)
1351 {
1352         fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1353 }
1354
1355 static void print_stat(struct perf_header *ph __maybe_unused,
1356                        int fd __maybe_unused, FILE *fp)
1357 {
1358         fprintf(fp, "# contains stat data\n");
1359 }
1360
1361 static void print_cache(struct perf_header *ph __maybe_unused,
1362                         int fd __maybe_unused, FILE *fp __maybe_unused)
1363 {
1364         int i;
1365
1366         fprintf(fp, "# CPU cache info:\n");
1367         for (i = 0; i < ph->env.caches_cnt; i++) {
1368                 fprintf(fp, "#  ");
1369                 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1370         }
1371 }
1372
1373 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1374                                FILE *fp)
1375 {
1376         const char *delimiter = "# pmu mappings: ";
1377         char *str, *tmp;
1378         u32 pmu_num;
1379         u32 type;
1380
1381         pmu_num = ph->env.nr_pmu_mappings;
1382         if (!pmu_num) {
1383                 fprintf(fp, "# pmu mappings: not available\n");
1384                 return;
1385         }
1386
1387         str = ph->env.pmu_mappings;
1388
1389         while (pmu_num) {
1390                 type = strtoul(str, &tmp, 0);
1391                 if (*tmp != ':')
1392                         goto error;
1393
1394                 str = tmp + 1;
1395                 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1396
1397                 delimiter = ", ";
1398                 str += strlen(str) + 1;
1399                 pmu_num--;
1400         }
1401
1402         fprintf(fp, "\n");
1403
1404         if (!pmu_num)
1405                 return;
1406 error:
1407         fprintf(fp, "# pmu mappings: unable to read\n");
1408 }
1409
1410 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1411                              FILE *fp)
1412 {
1413         struct perf_session *session;
1414         struct perf_evsel *evsel;
1415         u32 nr = 0;
1416
1417         session = container_of(ph, struct perf_session, header);
1418
1419         evlist__for_each_entry(session->evlist, evsel) {
1420                 if (perf_evsel__is_group_leader(evsel) &&
1421                     evsel->nr_members > 1) {
1422                         fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1423                                 perf_evsel__name(evsel));
1424
1425                         nr = evsel->nr_members - 1;
1426                 } else if (nr) {
1427                         fprintf(fp, ",%s", perf_evsel__name(evsel));
1428
1429                         if (--nr == 0)
1430                                 fprintf(fp, "}\n");
1431                 }
1432         }
1433 }
1434
1435 static int __event_process_build_id(struct build_id_event *bev,
1436                                     char *filename,
1437                                     struct perf_session *session)
1438 {
1439         int err = -1;
1440         struct machine *machine;
1441         u16 cpumode;
1442         struct dso *dso;
1443         enum dso_kernel_type dso_type;
1444
1445         machine = perf_session__findnew_machine(session, bev->pid);
1446         if (!machine)
1447                 goto out;
1448
1449         cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1450
1451         switch (cpumode) {
1452         case PERF_RECORD_MISC_KERNEL:
1453                 dso_type = DSO_TYPE_KERNEL;
1454                 break;
1455         case PERF_RECORD_MISC_GUEST_KERNEL:
1456                 dso_type = DSO_TYPE_GUEST_KERNEL;
1457                 break;
1458         case PERF_RECORD_MISC_USER:
1459         case PERF_RECORD_MISC_GUEST_USER:
1460                 dso_type = DSO_TYPE_USER;
1461                 break;
1462         default:
1463                 goto out;
1464         }
1465
1466         dso = machine__findnew_dso(machine, filename);
1467         if (dso != NULL) {
1468                 char sbuild_id[SBUILD_ID_SIZE];
1469
1470                 dso__set_build_id(dso, &bev->build_id);
1471
1472                 if (dso_type != DSO_TYPE_USER) {
1473                         struct kmod_path m = { .name = NULL, };
1474
1475                         if (!kmod_path__parse_name(&m, filename) && m.kmod)
1476                                 dso__set_module_info(dso, &m, machine);
1477                         else
1478                                 dso->kernel = dso_type;
1479
1480                         free(m.name);
1481                 }
1482
1483                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1484                                   sbuild_id);
1485                 pr_debug("build id event received for %s: %s\n",
1486                          dso->long_name, sbuild_id);
1487                 dso__put(dso);
1488         }
1489
1490         err = 0;
1491 out:
1492         return err;
1493 }
1494
1495 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1496                                                  int input, u64 offset, u64 size)
1497 {
1498         struct perf_session *session = container_of(header, struct perf_session, header);
1499         struct {
1500                 struct perf_event_header   header;
1501                 u8                         build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1502                 char                       filename[0];
1503         } old_bev;
1504         struct build_id_event bev;
1505         char filename[PATH_MAX];
1506         u64 limit = offset + size;
1507
1508         while (offset < limit) {
1509                 ssize_t len;
1510
1511                 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1512                         return -1;
1513
1514                 if (header->needs_swap)
1515                         perf_event_header__bswap(&old_bev.header);
1516
1517                 len = old_bev.header.size - sizeof(old_bev);
1518                 if (readn(input, filename, len) != len)
1519                         return -1;
1520
1521                 bev.header = old_bev.header;
1522
1523                 /*
1524                  * As the pid is the missing value, we need to fill
1525                  * it properly. The header.misc value give us nice hint.
1526                  */
1527                 bev.pid = HOST_KERNEL_ID;
1528                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1529                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1530                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1531
1532                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1533                 __event_process_build_id(&bev, filename, session);
1534
1535                 offset += bev.header.size;
1536         }
1537
1538         return 0;
1539 }
1540
1541 static int perf_header__read_build_ids(struct perf_header *header,
1542                                        int input, u64 offset, u64 size)
1543 {
1544         struct perf_session *session = container_of(header, struct perf_session, header);
1545         struct build_id_event bev;
1546         char filename[PATH_MAX];
1547         u64 limit = offset + size, orig_offset = offset;
1548         int err = -1;
1549
1550         while (offset < limit) {
1551                 ssize_t len;
1552
1553                 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1554                         goto out;
1555
1556                 if (header->needs_swap)
1557                         perf_event_header__bswap(&bev.header);
1558
1559                 len = bev.header.size - sizeof(bev);
1560                 if (readn(input, filename, len) != len)
1561                         goto out;
1562                 /*
1563                  * The a1645ce1 changeset:
1564                  *
1565                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
1566                  *
1567                  * Added a field to struct build_id_event that broke the file
1568                  * format.
1569                  *
1570                  * Since the kernel build-id is the first entry, process the
1571                  * table using the old format if the well known
1572                  * '[kernel.kallsyms]' string for the kernel build-id has the
1573                  * first 4 characters chopped off (where the pid_t sits).
1574                  */
1575                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1576                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1577                                 return -1;
1578                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1579                 }
1580
1581                 __event_process_build_id(&bev, filename, session);
1582
1583                 offset += bev.header.size;
1584         }
1585         err = 0;
1586 out:
1587         return err;
1588 }
1589
1590 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1591                                 struct perf_header *ph __maybe_unused,
1592                                 int fd, void *data)
1593 {
1594         ssize_t ret = trace_report(fd, data, false);
1595         return ret < 0 ? -1 : 0;
1596 }
1597
1598 static int process_build_id(struct perf_file_section *section,
1599                             struct perf_header *ph, int fd,
1600                             void *data __maybe_unused)
1601 {
1602         if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1603                 pr_debug("Failed to read buildids, continuing...\n");
1604         return 0;
1605 }
1606
1607 static int process_hostname(struct perf_file_section *section __maybe_unused,
1608                             struct perf_header *ph, int fd,
1609                             void *data __maybe_unused)
1610 {
1611         ph->env.hostname = do_read_string(fd, ph);
1612         return ph->env.hostname ? 0 : -ENOMEM;
1613 }
1614
1615 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1616                              struct perf_header *ph, int fd,
1617                              void *data __maybe_unused)
1618 {
1619         ph->env.os_release = do_read_string(fd, ph);
1620         return ph->env.os_release ? 0 : -ENOMEM;
1621 }
1622
1623 static int process_version(struct perf_file_section *section __maybe_unused,
1624                            struct perf_header *ph, int fd,
1625                            void *data __maybe_unused)
1626 {
1627         ph->env.version = do_read_string(fd, ph);
1628         return ph->env.version ? 0 : -ENOMEM;
1629 }
1630
1631 static int process_arch(struct perf_file_section *section __maybe_unused,
1632                         struct perf_header *ph, int fd,
1633                         void *data __maybe_unused)
1634 {
1635         ph->env.arch = do_read_string(fd, ph);
1636         return ph->env.arch ? 0 : -ENOMEM;
1637 }
1638
1639 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1640                           struct perf_header *ph, int fd,
1641                           void *data __maybe_unused)
1642 {
1643         ssize_t ret;
1644         u32 nr;
1645
1646         ret = readn(fd, &nr, sizeof(nr));
1647         if (ret != sizeof(nr))
1648                 return -1;
1649
1650         if (ph->needs_swap)
1651                 nr = bswap_32(nr);
1652
1653         ph->env.nr_cpus_avail = nr;
1654
1655         ret = readn(fd, &nr, sizeof(nr));
1656         if (ret != sizeof(nr))
1657                 return -1;
1658
1659         if (ph->needs_swap)
1660                 nr = bswap_32(nr);
1661
1662         ph->env.nr_cpus_online = nr;
1663         return 0;
1664 }
1665
1666 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1667                            struct perf_header *ph, int fd,
1668                            void *data __maybe_unused)
1669 {
1670         ph->env.cpu_desc = do_read_string(fd, ph);
1671         return ph->env.cpu_desc ? 0 : -ENOMEM;
1672 }
1673
1674 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1675                          struct perf_header *ph,  int fd,
1676                          void *data __maybe_unused)
1677 {
1678         ph->env.cpuid = do_read_string(fd, ph);
1679         return ph->env.cpuid ? 0 : -ENOMEM;
1680 }
1681
1682 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1683                              struct perf_header *ph, int fd,
1684                              void *data __maybe_unused)
1685 {
1686         uint64_t mem;
1687         ssize_t ret;
1688
1689         ret = readn(fd, &mem, sizeof(mem));
1690         if (ret != sizeof(mem))
1691                 return -1;
1692
1693         if (ph->needs_swap)
1694                 mem = bswap_64(mem);
1695
1696         ph->env.total_mem = mem;
1697         return 0;
1698 }
1699
1700 static struct perf_evsel *
1701 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1702 {
1703         struct perf_evsel *evsel;
1704
1705         evlist__for_each_entry(evlist, evsel) {
1706                 if (evsel->idx == idx)
1707                         return evsel;
1708         }
1709
1710         return NULL;
1711 }
1712
1713 static void
1714 perf_evlist__set_event_name(struct perf_evlist *evlist,
1715                             struct perf_evsel *event)
1716 {
1717         struct perf_evsel *evsel;
1718
1719         if (!event->name)
1720                 return;
1721
1722         evsel = perf_evlist__find_by_index(evlist, event->idx);
1723         if (!evsel)
1724                 return;
1725
1726         if (evsel->name)
1727                 return;
1728
1729         evsel->name = strdup(event->name);
1730 }
1731
1732 static int
1733 process_event_desc(struct perf_file_section *section __maybe_unused,
1734                    struct perf_header *header, int fd,
1735                    void *data __maybe_unused)
1736 {
1737         struct perf_session *session;
1738         struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1739
1740         if (!events)
1741                 return 0;
1742
1743         session = container_of(header, struct perf_session, header);
1744         for (evsel = events; evsel->attr.size; evsel++)
1745                 perf_evlist__set_event_name(session->evlist, evsel);
1746
1747         free_event_desc(events);
1748
1749         return 0;
1750 }
1751
1752 static int process_cmdline(struct perf_file_section *section,
1753                            struct perf_header *ph, int fd,
1754                            void *data __maybe_unused)
1755 {
1756         ssize_t ret;
1757         char *str, *cmdline = NULL, **argv = NULL;
1758         u32 nr, i, len = 0;
1759
1760         ret = readn(fd, &nr, sizeof(nr));
1761         if (ret != sizeof(nr))
1762                 return -1;
1763
1764         if (ph->needs_swap)
1765                 nr = bswap_32(nr);
1766
1767         ph->env.nr_cmdline = nr;
1768
1769         cmdline = zalloc(section->size + nr + 1);
1770         if (!cmdline)
1771                 return -1;
1772
1773         argv = zalloc(sizeof(char *) * (nr + 1));
1774         if (!argv)
1775                 goto error;
1776
1777         for (i = 0; i < nr; i++) {
1778                 str = do_read_string(fd, ph);
1779                 if (!str)
1780                         goto error;
1781
1782                 argv[i] = cmdline + len;
1783                 memcpy(argv[i], str, strlen(str) + 1);
1784                 len += strlen(str) + 1;
1785                 free(str);
1786         }
1787         ph->env.cmdline = cmdline;
1788         ph->env.cmdline_argv = (const char **) argv;
1789         return 0;
1790
1791 error:
1792         free(argv);
1793         free(cmdline);
1794         return -1;
1795 }
1796
1797 static int process_cpu_topology(struct perf_file_section *section,
1798                                 struct perf_header *ph, int fd,
1799                                 void *data __maybe_unused)
1800 {
1801         ssize_t ret;
1802         u32 nr, i;
1803         char *str;
1804         struct strbuf sb;
1805         int cpu_nr = ph->env.nr_cpus_avail;
1806         u64 size = 0;
1807
1808         ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1809         if (!ph->env.cpu)
1810                 return -1;
1811
1812         ret = readn(fd, &nr, sizeof(nr));
1813         if (ret != sizeof(nr))
1814                 goto free_cpu;
1815
1816         if (ph->needs_swap)
1817                 nr = bswap_32(nr);
1818
1819         ph->env.nr_sibling_cores = nr;
1820         size += sizeof(u32);
1821         if (strbuf_init(&sb, 128) < 0)
1822                 goto free_cpu;
1823
1824         for (i = 0; i < nr; i++) {
1825                 str = do_read_string(fd, ph);
1826                 if (!str)
1827                         goto error;
1828
1829                 /* include a NULL character at the end */
1830                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1831                         goto error;
1832                 size += string_size(str);
1833                 free(str);
1834         }
1835         ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1836
1837         ret = readn(fd, &nr, sizeof(nr));
1838         if (ret != sizeof(nr))
1839                 return -1;
1840
1841         if (ph->needs_swap)
1842                 nr = bswap_32(nr);
1843
1844         ph->env.nr_sibling_threads = nr;
1845         size += sizeof(u32);
1846
1847         for (i = 0; i < nr; i++) {
1848                 str = do_read_string(fd, ph);
1849                 if (!str)
1850                         goto error;
1851
1852                 /* include a NULL character at the end */
1853                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1854                         goto error;
1855                 size += string_size(str);
1856                 free(str);
1857         }
1858         ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1859
1860         /*
1861          * The header may be from old perf,
1862          * which doesn't include core id and socket id information.
1863          */
1864         if (section->size <= size) {
1865                 zfree(&ph->env.cpu);
1866                 return 0;
1867         }
1868
1869         for (i = 0; i < (u32)cpu_nr; i++) {
1870                 ret = readn(fd, &nr, sizeof(nr));
1871                 if (ret != sizeof(nr))
1872                         goto free_cpu;
1873
1874                 if (ph->needs_swap)
1875                         nr = bswap_32(nr);
1876
1877                 ph->env.cpu[i].core_id = nr;
1878
1879                 ret = readn(fd, &nr, sizeof(nr));
1880                 if (ret != sizeof(nr))
1881                         goto free_cpu;
1882
1883                 if (ph->needs_swap)
1884                         nr = bswap_32(nr);
1885
1886                 if (nr != (u32)-1 && nr > (u32)cpu_nr) {
1887                         pr_debug("socket_id number is too big."
1888                                  "You may need to upgrade the perf tool.\n");
1889                         goto free_cpu;
1890                 }
1891
1892                 ph->env.cpu[i].socket_id = nr;
1893         }
1894
1895         return 0;
1896
1897 error:
1898         strbuf_release(&sb);
1899 free_cpu:
1900         zfree(&ph->env.cpu);
1901         return -1;
1902 }
1903
1904 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1905                                  struct perf_header *ph, int fd,
1906                                  void *data __maybe_unused)
1907 {
1908         struct numa_node *nodes, *n;
1909         ssize_t ret;
1910         u32 nr, i;
1911         char *str;
1912
1913         /* nr nodes */
1914         ret = readn(fd, &nr, sizeof(nr));
1915         if (ret != sizeof(nr))
1916                 return -1;
1917
1918         if (ph->needs_swap)
1919                 nr = bswap_32(nr);
1920
1921         nodes = zalloc(sizeof(*nodes) * nr);
1922         if (!nodes)
1923                 return -ENOMEM;
1924
1925         for (i = 0; i < nr; i++) {
1926                 n = &nodes[i];
1927
1928                 /* node number */
1929                 ret = readn(fd, &n->node, sizeof(u32));
1930                 if (ret != sizeof(n->node))
1931                         goto error;
1932
1933                 ret = readn(fd, &n->mem_total, sizeof(u64));
1934                 if (ret != sizeof(u64))
1935                         goto error;
1936
1937                 ret = readn(fd, &n->mem_free, sizeof(u64));
1938                 if (ret != sizeof(u64))
1939                         goto error;
1940
1941                 if (ph->needs_swap) {
1942                         n->node      = bswap_32(n->node);
1943                         n->mem_total = bswap_64(n->mem_total);
1944                         n->mem_free  = bswap_64(n->mem_free);
1945                 }
1946
1947                 str = do_read_string(fd, ph);
1948                 if (!str)
1949                         goto error;
1950
1951                 n->map = cpu_map__new(str);
1952                 if (!n->map)
1953                         goto error;
1954
1955                 free(str);
1956         }
1957         ph->env.nr_numa_nodes = nr;
1958         ph->env.numa_nodes = nodes;
1959         return 0;
1960
1961 error:
1962         free(nodes);
1963         return -1;
1964 }
1965
1966 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1967                                 struct perf_header *ph, int fd,
1968                                 void *data __maybe_unused)
1969 {
1970         ssize_t ret;
1971         char *name;
1972         u32 pmu_num;
1973         u32 type;
1974         struct strbuf sb;
1975
1976         ret = readn(fd, &pmu_num, sizeof(pmu_num));
1977         if (ret != sizeof(pmu_num))
1978                 return -1;
1979
1980         if (ph->needs_swap)
1981                 pmu_num = bswap_32(pmu_num);
1982
1983         if (!pmu_num) {
1984                 pr_debug("pmu mappings not available\n");
1985                 return 0;
1986         }
1987
1988         ph->env.nr_pmu_mappings = pmu_num;
1989         if (strbuf_init(&sb, 128) < 0)
1990                 return -1;
1991
1992         while (pmu_num) {
1993                 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1994                         goto error;
1995                 if (ph->needs_swap)
1996                         type = bswap_32(type);
1997
1998                 name = do_read_string(fd, ph);
1999                 if (!name)
2000                         goto error;
2001
2002                 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2003                         goto error;
2004                 /* include a NULL character at the end */
2005                 if (strbuf_add(&sb, "", 1) < 0)
2006                         goto error;
2007
2008                 if (!strcmp(name, "msr"))
2009                         ph->env.msr_pmu_type = type;
2010
2011                 free(name);
2012                 pmu_num--;
2013         }
2014         ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2015         return 0;
2016
2017 error:
2018         strbuf_release(&sb);
2019         return -1;
2020 }
2021
2022 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2023                               struct perf_header *ph, int fd,
2024                               void *data __maybe_unused)
2025 {
2026         size_t ret = -1;
2027         u32 i, nr, nr_groups;
2028         struct perf_session *session;
2029         struct perf_evsel *evsel, *leader = NULL;
2030         struct group_desc {
2031                 char *name;
2032                 u32 leader_idx;
2033                 u32 nr_members;
2034         } *desc;
2035
2036         if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2037                 return -1;
2038
2039         if (ph->needs_swap)
2040                 nr_groups = bswap_32(nr_groups);
2041
2042         ph->env.nr_groups = nr_groups;
2043         if (!nr_groups) {
2044                 pr_debug("group desc not available\n");
2045                 return 0;
2046         }
2047
2048         desc = calloc(nr_groups, sizeof(*desc));
2049         if (!desc)
2050                 return -1;
2051
2052         for (i = 0; i < nr_groups; i++) {
2053                 desc[i].name = do_read_string(fd, ph);
2054                 if (!desc[i].name)
2055                         goto out_free;
2056
2057                 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2058                         goto out_free;
2059
2060                 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2061                         goto out_free;
2062
2063                 if (ph->needs_swap) {
2064                         desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2065                         desc[i].nr_members = bswap_32(desc[i].nr_members);
2066                 }
2067         }
2068
2069         /*
2070          * Rebuild group relationship based on the group_desc
2071          */
2072         session = container_of(ph, struct perf_session, header);
2073         session->evlist->nr_groups = nr_groups;
2074
2075         i = nr = 0;
2076         evlist__for_each_entry(session->evlist, evsel) {
2077                 if (evsel->idx == (int) desc[i].leader_idx) {
2078                         evsel->leader = evsel;
2079                         /* {anon_group} is a dummy name */
2080                         if (strcmp(desc[i].name, "{anon_group}")) {
2081                                 evsel->group_name = desc[i].name;
2082                                 desc[i].name = NULL;
2083                         }
2084                         evsel->nr_members = desc[i].nr_members;
2085
2086                         if (i >= nr_groups || nr > 0) {
2087                                 pr_debug("invalid group desc\n");
2088                                 goto out_free;
2089                         }
2090
2091                         leader = evsel;
2092                         nr = evsel->nr_members - 1;
2093                         i++;
2094                 } else if (nr) {
2095                         /* This is a group member */
2096                         evsel->leader = leader;
2097
2098                         nr--;
2099                 }
2100         }
2101
2102         if (i != nr_groups || nr != 0) {
2103                 pr_debug("invalid group desc\n");
2104                 goto out_free;
2105         }
2106
2107         ret = 0;
2108 out_free:
2109         for (i = 0; i < nr_groups; i++)
2110                 zfree(&desc[i].name);
2111         free(desc);
2112
2113         return ret;
2114 }
2115
2116 static int process_auxtrace(struct perf_file_section *section,
2117                             struct perf_header *ph, int fd,
2118                             void *data __maybe_unused)
2119 {
2120         struct perf_session *session;
2121         int err;
2122
2123         session = container_of(ph, struct perf_session, header);
2124
2125         err = auxtrace_index__process(fd, section->size, session,
2126                                       ph->needs_swap);
2127         if (err < 0)
2128                 pr_err("Failed to process auxtrace index\n");
2129         return err;
2130 }
2131
2132 static int process_cache(struct perf_file_section *section __maybe_unused,
2133                          struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2134                          void *data __maybe_unused)
2135 {
2136         struct cpu_cache_level *caches;
2137         u32 cnt, i, version;
2138
2139         if (readn(fd, &version, sizeof(version)) != sizeof(version))
2140                 return -1;
2141
2142         if (ph->needs_swap)
2143                 version = bswap_32(version);
2144
2145         if (version != 1)
2146                 return -1;
2147
2148         if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2149                 return -1;
2150
2151         if (ph->needs_swap)
2152                 cnt = bswap_32(cnt);
2153
2154         caches = zalloc(sizeof(*caches) * cnt);
2155         if (!caches)
2156                 return -1;
2157
2158         for (i = 0; i < cnt; i++) {
2159                 struct cpu_cache_level c;
2160
2161                 #define _R(v)                                           \
2162                         if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2163                                 goto out_free_caches;                   \
2164                         if (ph->needs_swap)                             \
2165                                 c.v = bswap_32(c.v);                    \
2166
2167                 _R(level)
2168                 _R(line_size)
2169                 _R(sets)
2170                 _R(ways)
2171                 #undef _R
2172
2173                 #define _R(v)                           \
2174                         c.v = do_read_string(fd, ph);   \
2175                         if (!c.v)                       \
2176                                 goto out_free_caches;
2177
2178                 _R(type)
2179                 _R(size)
2180                 _R(map)
2181                 #undef _R
2182
2183                 caches[i] = c;
2184         }
2185
2186         ph->env.caches = caches;
2187         ph->env.caches_cnt = cnt;
2188         return 0;
2189 out_free_caches:
2190         free(caches);
2191         return -1;
2192 }
2193
2194 struct feature_ops {
2195         int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2196         void (*print)(struct perf_header *h, int fd, FILE *fp);
2197         int (*process)(struct perf_file_section *section,
2198                        struct perf_header *h, int fd, void *data);
2199         const char *name;
2200         bool full_only;
2201 };
2202
2203 #define FEAT_OPA(n, func) \
2204         [n] = { .name = #n, .write = write_##func, .print = print_##func }
2205 #define FEAT_OPP(n, func) \
2206         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2207                 .process = process_##func }
2208 #define FEAT_OPF(n, func) \
2209         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2210                 .process = process_##func, .full_only = true }
2211
2212 /* feature_ops not implemented: */
2213 #define print_tracing_data      NULL
2214 #define print_build_id          NULL
2215
2216 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2217         FEAT_OPP(HEADER_TRACING_DATA,   tracing_data),
2218         FEAT_OPP(HEADER_BUILD_ID,       build_id),
2219         FEAT_OPP(HEADER_HOSTNAME,       hostname),
2220         FEAT_OPP(HEADER_OSRELEASE,      osrelease),
2221         FEAT_OPP(HEADER_VERSION,        version),
2222         FEAT_OPP(HEADER_ARCH,           arch),
2223         FEAT_OPP(HEADER_NRCPUS,         nrcpus),
2224         FEAT_OPP(HEADER_CPUDESC,        cpudesc),
2225         FEAT_OPP(HEADER_CPUID,          cpuid),
2226         FEAT_OPP(HEADER_TOTAL_MEM,      total_mem),
2227         FEAT_OPP(HEADER_EVENT_DESC,     event_desc),
2228         FEAT_OPP(HEADER_CMDLINE,        cmdline),
2229         FEAT_OPF(HEADER_CPU_TOPOLOGY,   cpu_topology),
2230         FEAT_OPF(HEADER_NUMA_TOPOLOGY,  numa_topology),
2231         FEAT_OPA(HEADER_BRANCH_STACK,   branch_stack),
2232         FEAT_OPP(HEADER_PMU_MAPPINGS,   pmu_mappings),
2233         FEAT_OPP(HEADER_GROUP_DESC,     group_desc),
2234         FEAT_OPP(HEADER_AUXTRACE,       auxtrace),
2235         FEAT_OPA(HEADER_STAT,           stat),
2236         FEAT_OPF(HEADER_CACHE,          cache),
2237 };
2238
2239 struct header_print_data {
2240         FILE *fp;
2241         bool full; /* extended list of headers */
2242 };
2243
2244 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2245                                            struct perf_header *ph,
2246                                            int feat, int fd, void *data)
2247 {
2248         struct header_print_data *hd = data;
2249
2250         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2251                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2252                                 "%d, continuing...\n", section->offset, feat);
2253                 return 0;
2254         }
2255         if (feat >= HEADER_LAST_FEATURE) {
2256                 pr_warning("unknown feature %d\n", feat);
2257                 return 0;
2258         }
2259         if (!feat_ops[feat].print)
2260                 return 0;
2261
2262         if (!feat_ops[feat].full_only || hd->full)
2263                 feat_ops[feat].print(ph, fd, hd->fp);
2264         else
2265                 fprintf(hd->fp, "# %s info available, use -I to display\n",
2266                         feat_ops[feat].name);
2267
2268         return 0;
2269 }
2270
2271 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2272 {
2273         struct header_print_data hd;
2274         struct perf_header *header = &session->header;
2275         int fd = perf_data_file__fd(session->file);
2276         struct stat st;
2277         int ret, bit;
2278
2279         hd.fp = fp;
2280         hd.full = full;
2281
2282         ret = fstat(fd, &st);
2283         if (ret == -1)
2284                 return -1;
2285
2286         fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2287
2288         perf_header__process_sections(header, fd, &hd,
2289                                       perf_file_section__fprintf_info);
2290
2291         if (session->file->is_pipe)
2292                 return 0;
2293
2294         fprintf(fp, "# missing features: ");
2295         for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2296                 if (bit)
2297                         fprintf(fp, "%s ", feat_ops[bit].name);
2298         }
2299
2300         fprintf(fp, "\n");
2301         return 0;
2302 }
2303
2304 static int do_write_feat(int fd, struct perf_header *h, int type,
2305                          struct perf_file_section **p,
2306                          struct perf_evlist *evlist)
2307 {
2308         int err;
2309         int ret = 0;
2310
2311         if (perf_header__has_feat(h, type)) {
2312                 if (!feat_ops[type].write)
2313                         return -1;
2314
2315                 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2316
2317                 err = feat_ops[type].write(fd, h, evlist);
2318                 if (err < 0) {
2319                         pr_debug("failed to write feature %s\n", feat_ops[type].name);
2320
2321                         /* undo anything written */
2322                         lseek(fd, (*p)->offset, SEEK_SET);
2323
2324                         return -1;
2325                 }
2326                 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2327                 (*p)++;
2328         }
2329         return ret;
2330 }
2331
2332 static int perf_header__adds_write(struct perf_header *header,
2333                                    struct perf_evlist *evlist, int fd)
2334 {
2335         int nr_sections;
2336         struct perf_file_section *feat_sec, *p;
2337         int sec_size;
2338         u64 sec_start;
2339         int feat;
2340         int err;
2341
2342         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2343         if (!nr_sections)
2344                 return 0;
2345
2346         feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2347         if (feat_sec == NULL)
2348                 return -ENOMEM;
2349
2350         sec_size = sizeof(*feat_sec) * nr_sections;
2351
2352         sec_start = header->feat_offset;
2353         lseek(fd, sec_start + sec_size, SEEK_SET);
2354
2355         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2356                 if (do_write_feat(fd, header, feat, &p, evlist))
2357                         perf_header__clear_feat(header, feat);
2358         }
2359
2360         lseek(fd, sec_start, SEEK_SET);
2361         /*
2362          * may write more than needed due to dropped feature, but
2363          * this is okay, reader will skip the mising entries
2364          */
2365         err = do_write(fd, feat_sec, sec_size);
2366         if (err < 0)
2367                 pr_debug("failed to write feature section\n");
2368         free(feat_sec);
2369         return err;
2370 }
2371
2372 int perf_header__write_pipe(int fd)
2373 {
2374         struct perf_pipe_file_header f_header;
2375         int err;
2376
2377         f_header = (struct perf_pipe_file_header){
2378                 .magic     = PERF_MAGIC,
2379                 .size      = sizeof(f_header),
2380         };
2381
2382         err = do_write(fd, &f_header, sizeof(f_header));
2383         if (err < 0) {
2384                 pr_debug("failed to write perf pipe header\n");
2385                 return err;
2386         }
2387
2388         return 0;
2389 }
2390
2391 int perf_session__write_header(struct perf_session *session,
2392                                struct perf_evlist *evlist,
2393                                int fd, bool at_exit)
2394 {
2395         struct perf_file_header f_header;
2396         struct perf_file_attr   f_attr;
2397         struct perf_header *header = &session->header;
2398         struct perf_evsel *evsel;
2399         u64 attr_offset;
2400         int err;
2401
2402         lseek(fd, sizeof(f_header), SEEK_SET);
2403
2404         evlist__for_each_entry(session->evlist, evsel) {
2405                 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2406                 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2407                 if (err < 0) {
2408                         pr_debug("failed to write perf header\n");
2409                         return err;
2410                 }
2411         }
2412
2413         attr_offset = lseek(fd, 0, SEEK_CUR);
2414
2415         evlist__for_each_entry(evlist, evsel) {
2416                 f_attr = (struct perf_file_attr){
2417                         .attr = evsel->attr,
2418                         .ids  = {
2419                                 .offset = evsel->id_offset,
2420                                 .size   = evsel->ids * sizeof(u64),
2421                         }
2422                 };
2423                 err = do_write(fd, &f_attr, sizeof(f_attr));
2424                 if (err < 0) {
2425                         pr_debug("failed to write perf header attribute\n");
2426                         return err;
2427                 }
2428         }
2429
2430         if (!header->data_offset)
2431                 header->data_offset = lseek(fd, 0, SEEK_CUR);
2432         header->feat_offset = header->data_offset + header->data_size;
2433
2434         if (at_exit) {
2435                 err = perf_header__adds_write(header, evlist, fd);
2436                 if (err < 0)
2437                         return err;
2438         }
2439
2440         f_header = (struct perf_file_header){
2441                 .magic     = PERF_MAGIC,
2442                 .size      = sizeof(f_header),
2443                 .attr_size = sizeof(f_attr),
2444                 .attrs = {
2445                         .offset = attr_offset,
2446                         .size   = evlist->nr_entries * sizeof(f_attr),
2447                 },
2448                 .data = {
2449                         .offset = header->data_offset,
2450                         .size   = header->data_size,
2451                 },
2452                 /* event_types is ignored, store zeros */
2453         };
2454
2455         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2456
2457         lseek(fd, 0, SEEK_SET);
2458         err = do_write(fd, &f_header, sizeof(f_header));
2459         if (err < 0) {
2460                 pr_debug("failed to write perf header\n");
2461                 return err;
2462         }
2463         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2464
2465         return 0;
2466 }
2467
2468 static int perf_header__getbuffer64(struct perf_header *header,
2469                                     int fd, void *buf, size_t size)
2470 {
2471         if (readn(fd, buf, size) <= 0)
2472                 return -1;
2473
2474         if (header->needs_swap)
2475                 mem_bswap_64(buf, size);
2476
2477         return 0;
2478 }
2479
2480 int perf_header__process_sections(struct perf_header *header, int fd,
2481                                   void *data,
2482                                   int (*process)(struct perf_file_section *section,
2483                                                  struct perf_header *ph,
2484                                                  int feat, int fd, void *data))
2485 {
2486         struct perf_file_section *feat_sec, *sec;
2487         int nr_sections;
2488         int sec_size;
2489         int feat;
2490         int err;
2491
2492         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2493         if (!nr_sections)
2494                 return 0;
2495
2496         feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2497         if (!feat_sec)
2498                 return -1;
2499
2500         sec_size = sizeof(*feat_sec) * nr_sections;
2501
2502         lseek(fd, header->feat_offset, SEEK_SET);
2503
2504         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2505         if (err < 0)
2506                 goto out_free;
2507
2508         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2509                 err = process(sec++, header, feat, fd, data);
2510                 if (err < 0)
2511                         goto out_free;
2512         }
2513         err = 0;
2514 out_free:
2515         free(feat_sec);
2516         return err;
2517 }
2518
2519 static const int attr_file_abi_sizes[] = {
2520         [0] = PERF_ATTR_SIZE_VER0,
2521         [1] = PERF_ATTR_SIZE_VER1,
2522         [2] = PERF_ATTR_SIZE_VER2,
2523         [3] = PERF_ATTR_SIZE_VER3,
2524         [4] = PERF_ATTR_SIZE_VER4,
2525         0,
2526 };
2527
2528 /*
2529  * In the legacy file format, the magic number is not used to encode endianness.
2530  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2531  * on ABI revisions, we need to try all combinations for all endianness to
2532  * detect the endianness.
2533  */
2534 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2535 {
2536         uint64_t ref_size, attr_size;
2537         int i;
2538
2539         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2540                 ref_size = attr_file_abi_sizes[i]
2541                          + sizeof(struct perf_file_section);
2542                 if (hdr_sz != ref_size) {
2543                         attr_size = bswap_64(hdr_sz);
2544                         if (attr_size != ref_size)
2545                                 continue;
2546
2547                         ph->needs_swap = true;
2548                 }
2549                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2550                          i,
2551                          ph->needs_swap);
2552                 return 0;
2553         }
2554         /* could not determine endianness */
2555         return -1;
2556 }
2557
2558 #define PERF_PIPE_HDR_VER0      16
2559
2560 static const size_t attr_pipe_abi_sizes[] = {
2561         [0] = PERF_PIPE_HDR_VER0,
2562         0,
2563 };
2564
2565 /*
2566  * In the legacy pipe format, there is an implicit assumption that endiannesss
2567  * between host recording the samples, and host parsing the samples is the
2568  * same. This is not always the case given that the pipe output may always be
2569  * redirected into a file and analyzed on a different machine with possibly a
2570  * different endianness and perf_event ABI revsions in the perf tool itself.
2571  */
2572 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2573 {
2574         u64 attr_size;
2575         int i;
2576
2577         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2578                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2579                         attr_size = bswap_64(hdr_sz);
2580                         if (attr_size != hdr_sz)
2581                                 continue;
2582
2583                         ph->needs_swap = true;
2584                 }
2585                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2586                 return 0;
2587         }
2588         return -1;
2589 }
2590
2591 bool is_perf_magic(u64 magic)
2592 {
2593         if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2594                 || magic == __perf_magic2
2595                 || magic == __perf_magic2_sw)
2596                 return true;
2597
2598         return false;
2599 }
2600
2601 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2602                               bool is_pipe, struct perf_header *ph)
2603 {
2604         int ret;
2605
2606         /* check for legacy format */
2607         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2608         if (ret == 0) {
2609                 ph->version = PERF_HEADER_VERSION_1;
2610                 pr_debug("legacy perf.data format\n");
2611                 if (is_pipe)
2612                         return try_all_pipe_abis(hdr_sz, ph);
2613
2614                 return try_all_file_abis(hdr_sz, ph);
2615         }
2616         /*
2617          * the new magic number serves two purposes:
2618          * - unique number to identify actual perf.data files
2619          * - encode endianness of file
2620          */
2621         ph->version = PERF_HEADER_VERSION_2;
2622
2623         /* check magic number with one endianness */
2624         if (magic == __perf_magic2)
2625                 return 0;
2626
2627         /* check magic number with opposite endianness */
2628         if (magic != __perf_magic2_sw)
2629                 return -1;
2630
2631         ph->needs_swap = true;
2632
2633         return 0;
2634 }
2635
2636 int perf_file_header__read(struct perf_file_header *header,
2637                            struct perf_header *ph, int fd)
2638 {
2639         ssize_t ret;
2640
2641         lseek(fd, 0, SEEK_SET);
2642
2643         ret = readn(fd, header, sizeof(*header));
2644         if (ret <= 0)
2645                 return -1;
2646
2647         if (check_magic_endian(header->magic,
2648                                header->attr_size, false, ph) < 0) {
2649                 pr_debug("magic/endian check failed\n");
2650                 return -1;
2651         }
2652
2653         if (ph->needs_swap) {
2654                 mem_bswap_64(header, offsetof(struct perf_file_header,
2655                              adds_features));
2656         }
2657
2658         if (header->size != sizeof(*header)) {
2659                 /* Support the previous format */
2660                 if (header->size == offsetof(typeof(*header), adds_features))
2661                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2662                 else
2663                         return -1;
2664         } else if (ph->needs_swap) {
2665                 /*
2666                  * feature bitmap is declared as an array of unsigned longs --
2667                  * not good since its size can differ between the host that
2668                  * generated the data file and the host analyzing the file.
2669                  *
2670                  * We need to handle endianness, but we don't know the size of
2671                  * the unsigned long where the file was generated. Take a best
2672                  * guess at determining it: try 64-bit swap first (ie., file
2673                  * created on a 64-bit host), and check if the hostname feature
2674                  * bit is set (this feature bit is forced on as of fbe96f2).
2675                  * If the bit is not, undo the 64-bit swap and try a 32-bit
2676                  * swap. If the hostname bit is still not set (e.g., older data
2677                  * file), punt and fallback to the original behavior --
2678                  * clearing all feature bits and setting buildid.
2679                  */
2680                 mem_bswap_64(&header->adds_features,
2681                             BITS_TO_U64(HEADER_FEAT_BITS));
2682
2683                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2684                         /* unswap as u64 */
2685                         mem_bswap_64(&header->adds_features,
2686                                     BITS_TO_U64(HEADER_FEAT_BITS));
2687
2688                         /* unswap as u32 */
2689                         mem_bswap_32(&header->adds_features,
2690                                     BITS_TO_U32(HEADER_FEAT_BITS));
2691                 }
2692
2693                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2694                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2695                         set_bit(HEADER_BUILD_ID, header->adds_features);
2696                 }
2697         }
2698
2699         memcpy(&ph->adds_features, &header->adds_features,
2700                sizeof(ph->adds_features));
2701
2702         ph->data_offset  = header->data.offset;
2703         ph->data_size    = header->data.size;
2704         ph->feat_offset  = header->data.offset + header->data.size;
2705         return 0;
2706 }
2707
2708 static int perf_file_section__process(struct perf_file_section *section,
2709                                       struct perf_header *ph,
2710                                       int feat, int fd, void *data)
2711 {
2712         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2713                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2714                           "%d, continuing...\n", section->offset, feat);
2715                 return 0;
2716         }
2717
2718         if (feat >= HEADER_LAST_FEATURE) {
2719                 pr_debug("unknown feature %d, continuing...\n", feat);
2720                 return 0;
2721         }
2722
2723         if (!feat_ops[feat].process)
2724                 return 0;
2725
2726         return feat_ops[feat].process(section, ph, fd, data);
2727 }
2728
2729 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2730                                        struct perf_header *ph, int fd,
2731                                        bool repipe)
2732 {
2733         ssize_t ret;
2734
2735         ret = readn(fd, header, sizeof(*header));
2736         if (ret <= 0)
2737                 return -1;
2738
2739         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2740                 pr_debug("endian/magic failed\n");
2741                 return -1;
2742         }
2743
2744         if (ph->needs_swap)
2745                 header->size = bswap_64(header->size);
2746
2747         if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2748                 return -1;
2749
2750         return 0;
2751 }
2752
2753 static int perf_header__read_pipe(struct perf_session *session)
2754 {
2755         struct perf_header *header = &session->header;
2756         struct perf_pipe_file_header f_header;
2757
2758         if (perf_file_header__read_pipe(&f_header, header,
2759                                         perf_data_file__fd(session->file),
2760                                         session->repipe) < 0) {
2761                 pr_debug("incompatible file format\n");
2762                 return -EINVAL;
2763         }
2764
2765         return 0;
2766 }
2767
2768 static int read_attr(int fd, struct perf_header *ph,
2769                      struct perf_file_attr *f_attr)
2770 {
2771         struct perf_event_attr *attr = &f_attr->attr;
2772         size_t sz, left;
2773         size_t our_sz = sizeof(f_attr->attr);
2774         ssize_t ret;
2775
2776         memset(f_attr, 0, sizeof(*f_attr));
2777
2778         /* read minimal guaranteed structure */
2779         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2780         if (ret <= 0) {
2781                 pr_debug("cannot read %d bytes of header attr\n",
2782                          PERF_ATTR_SIZE_VER0);
2783                 return -1;
2784         }
2785
2786         /* on file perf_event_attr size */
2787         sz = attr->size;
2788
2789         if (ph->needs_swap)
2790                 sz = bswap_32(sz);
2791
2792         if (sz == 0) {
2793                 /* assume ABI0 */
2794                 sz =  PERF_ATTR_SIZE_VER0;
2795         } else if (sz > our_sz) {
2796                 pr_debug("file uses a more recent and unsupported ABI"
2797                          " (%zu bytes extra)\n", sz - our_sz);
2798                 return -1;
2799         }
2800         /* what we have not yet read and that we know about */
2801         left = sz - PERF_ATTR_SIZE_VER0;
2802         if (left) {
2803                 void *ptr = attr;
2804                 ptr += PERF_ATTR_SIZE_VER0;
2805
2806                 ret = readn(fd, ptr, left);
2807         }
2808         /* read perf_file_section, ids are read in caller */
2809         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2810
2811         return ret <= 0 ? -1 : 0;
2812 }
2813
2814 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2815                                                 struct pevent *pevent)
2816 {
2817         struct event_format *event;
2818         char bf[128];
2819
2820         /* already prepared */
2821         if (evsel->tp_format)
2822                 return 0;
2823
2824         if (pevent == NULL) {
2825                 pr_debug("broken or missing trace data\n");
2826                 return -1;
2827         }
2828
2829         event = pevent_find_event(pevent, evsel->attr.config);
2830         if (event == NULL) {
2831                 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
2832                 return -1;
2833         }
2834
2835         if (!evsel->name) {
2836                 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2837                 evsel->name = strdup(bf);
2838                 if (evsel->name == NULL)
2839                         return -1;
2840         }
2841
2842         evsel->tp_format = event;
2843         return 0;
2844 }
2845
2846 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2847                                                   struct pevent *pevent)
2848 {
2849         struct perf_evsel *pos;
2850
2851         evlist__for_each_entry(evlist, pos) {
2852                 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2853                     perf_evsel__prepare_tracepoint_event(pos, pevent))
2854                         return -1;
2855         }
2856
2857         return 0;
2858 }
2859
2860 int perf_session__read_header(struct perf_session *session)
2861 {
2862         struct perf_data_file *file = session->file;
2863         struct perf_header *header = &session->header;
2864         struct perf_file_header f_header;
2865         struct perf_file_attr   f_attr;
2866         u64                     f_id;
2867         int nr_attrs, nr_ids, i, j;
2868         int fd = perf_data_file__fd(file);
2869
2870         session->evlist = perf_evlist__new();
2871         if (session->evlist == NULL)
2872                 return -ENOMEM;
2873
2874         session->evlist->env = &header->env;
2875         session->machines.host.env = &header->env;
2876         if (perf_data_file__is_pipe(file))
2877                 return perf_header__read_pipe(session);
2878
2879         if (perf_file_header__read(&f_header, header, fd) < 0)
2880                 return -EINVAL;
2881
2882         /*
2883          * Sanity check that perf.data was written cleanly; data size is
2884          * initialized to 0 and updated only if the on_exit function is run.
2885          * If data size is still 0 then the file contains only partial
2886          * information.  Just warn user and process it as much as it can.
2887          */
2888         if (f_header.data.size == 0) {
2889                 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2890                            "Was the 'perf record' command properly terminated?\n",
2891                            file->path);
2892         }
2893
2894         nr_attrs = f_header.attrs.size / f_header.attr_size;
2895         lseek(fd, f_header.attrs.offset, SEEK_SET);
2896
2897         for (i = 0; i < nr_attrs; i++) {
2898                 struct perf_evsel *evsel;
2899                 off_t tmp;
2900
2901                 if (read_attr(fd, header, &f_attr) < 0)
2902                         goto out_errno;
2903
2904                 if (header->needs_swap) {
2905                         f_attr.ids.size   = bswap_64(f_attr.ids.size);
2906                         f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2907                         perf_event__attr_swap(&f_attr.attr);
2908                 }
2909
2910                 tmp = lseek(fd, 0, SEEK_CUR);
2911                 evsel = perf_evsel__new(&f_attr.attr);
2912
2913                 if (evsel == NULL)
2914                         goto out_delete_evlist;
2915
2916                 evsel->needs_swap = header->needs_swap;
2917                 /*
2918                  * Do it before so that if perf_evsel__alloc_id fails, this
2919                  * entry gets purged too at perf_evlist__delete().
2920                  */
2921                 perf_evlist__add(session->evlist, evsel);
2922
2923                 nr_ids = f_attr.ids.size / sizeof(u64);
2924                 /*
2925                  * We don't have the cpu and thread maps on the header, so
2926                  * for allocating the perf_sample_id table we fake 1 cpu and
2927                  * hattr->ids threads.
2928                  */
2929                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2930                         goto out_delete_evlist;
2931
2932                 lseek(fd, f_attr.ids.offset, SEEK_SET);
2933
2934                 for (j = 0; j < nr_ids; j++) {
2935                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2936                                 goto out_errno;
2937
2938                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2939                 }
2940
2941                 lseek(fd, tmp, SEEK_SET);
2942         }
2943
2944         symbol_conf.nr_events = nr_attrs;
2945
2946         perf_header__process_sections(header, fd, &session->tevent,
2947                                       perf_file_section__process);
2948
2949         if (perf_evlist__prepare_tracepoint_events(session->evlist,
2950                                                    session->tevent.pevent))
2951                 goto out_delete_evlist;
2952
2953         return 0;
2954 out_errno:
2955         return -errno;
2956
2957 out_delete_evlist:
2958         perf_evlist__delete(session->evlist);
2959         session->evlist = NULL;
2960         return -ENOMEM;
2961 }
2962
2963 int perf_event__synthesize_attr(struct perf_tool *tool,
2964                                 struct perf_event_attr *attr, u32 ids, u64 *id,
2965                                 perf_event__handler_t process)
2966 {
2967         union perf_event *ev;
2968         size_t size;
2969         int err;
2970
2971         size = sizeof(struct perf_event_attr);
2972         size = PERF_ALIGN(size, sizeof(u64));
2973         size += sizeof(struct perf_event_header);
2974         size += ids * sizeof(u64);
2975
2976         ev = malloc(size);
2977
2978         if (ev == NULL)
2979                 return -ENOMEM;
2980
2981         ev->attr.attr = *attr;
2982         memcpy(ev->attr.id, id, ids * sizeof(u64));
2983
2984         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2985         ev->attr.header.size = (u16)size;
2986
2987         if (ev->attr.header.size == size)
2988                 err = process(tool, ev, NULL, NULL);
2989         else
2990                 err = -E2BIG;
2991
2992         free(ev);
2993
2994         return err;
2995 }
2996
2997 static struct event_update_event *
2998 event_update_event__new(size_t size, u64 type, u64 id)
2999 {
3000         struct event_update_event *ev;
3001
3002         size += sizeof(*ev);
3003         size  = PERF_ALIGN(size, sizeof(u64));
3004
3005         ev = zalloc(size);
3006         if (ev) {
3007                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3008                 ev->header.size = (u16)size;
3009                 ev->type = type;
3010                 ev->id = id;
3011         }
3012         return ev;
3013 }
3014
3015 int
3016 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3017                                          struct perf_evsel *evsel,
3018                                          perf_event__handler_t process)
3019 {
3020         struct event_update_event *ev;
3021         size_t size = strlen(evsel->unit);
3022         int err;
3023
3024         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3025         if (ev == NULL)
3026                 return -ENOMEM;
3027
3028         strncpy(ev->data, evsel->unit, size);
3029         err = process(tool, (union perf_event *)ev, NULL, NULL);
3030         free(ev);
3031         return err;
3032 }
3033
3034 int
3035 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3036                                           struct perf_evsel *evsel,
3037                                           perf_event__handler_t process)
3038 {
3039         struct event_update_event *ev;
3040         struct event_update_event_scale *ev_data;
3041         int err;
3042
3043         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3044         if (ev == NULL)
3045                 return -ENOMEM;
3046
3047         ev_data = (struct event_update_event_scale *) ev->data;
3048         ev_data->scale = evsel->scale;
3049         err = process(tool, (union perf_event*) ev, NULL, NULL);
3050         free(ev);
3051         return err;
3052 }
3053
3054 int
3055 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3056                                          struct perf_evsel *evsel,
3057                                          perf_event__handler_t process)
3058 {
3059         struct event_update_event *ev;
3060         size_t len = strlen(evsel->name);
3061         int err;
3062
3063         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3064         if (ev == NULL)
3065                 return -ENOMEM;
3066
3067         strncpy(ev->data, evsel->name, len);
3068         err = process(tool, (union perf_event*) ev, NULL, NULL);
3069         free(ev);
3070         return err;
3071 }
3072
3073 int
3074 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3075                                         struct perf_evsel *evsel,
3076                                         perf_event__handler_t process)
3077 {
3078         size_t size = sizeof(struct event_update_event);
3079         struct event_update_event *ev;
3080         int max, err;
3081         u16 type;
3082
3083         if (!evsel->own_cpus)
3084                 return 0;
3085
3086         ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3087         if (!ev)
3088                 return -ENOMEM;
3089
3090         ev->header.type = PERF_RECORD_EVENT_UPDATE;
3091         ev->header.size = (u16)size;
3092         ev->type = PERF_EVENT_UPDATE__CPUS;
3093         ev->id   = evsel->id[0];
3094
3095         cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3096                                  evsel->own_cpus,
3097                                  type, max);
3098
3099         err = process(tool, (union perf_event*) ev, NULL, NULL);
3100         free(ev);
3101         return err;
3102 }
3103
3104 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3105 {
3106         struct event_update_event *ev = &event->event_update;
3107         struct event_update_event_scale *ev_scale;
3108         struct event_update_event_cpus *ev_cpus;
3109         struct cpu_map *map;
3110         size_t ret;
3111
3112         ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3113
3114         switch (ev->type) {
3115         case PERF_EVENT_UPDATE__SCALE:
3116                 ev_scale = (struct event_update_event_scale *) ev->data;
3117                 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3118                 break;
3119         case PERF_EVENT_UPDATE__UNIT:
3120                 ret += fprintf(fp, "... unit:  %s\n", ev->data);
3121                 break;
3122         case PERF_EVENT_UPDATE__NAME:
3123                 ret += fprintf(fp, "... name:  %s\n", ev->data);
3124                 break;
3125         case PERF_EVENT_UPDATE__CPUS:
3126                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3127                 ret += fprintf(fp, "... ");
3128
3129                 map = cpu_map__new_data(&ev_cpus->cpus);
3130                 if (map)
3131                         ret += cpu_map__fprintf(map, fp);
3132                 else
3133                         ret += fprintf(fp, "failed to get cpus\n");
3134                 break;
3135         default:
3136                 ret += fprintf(fp, "... unknown type\n");
3137                 break;
3138         }
3139
3140         return ret;
3141 }
3142
3143 int perf_event__synthesize_attrs(struct perf_tool *tool,
3144                                    struct perf_session *session,
3145                                    perf_event__handler_t process)
3146 {
3147         struct perf_evsel *evsel;
3148         int err = 0;
3149
3150         evlist__for_each_entry(session->evlist, evsel) {
3151                 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3152                                                   evsel->id, process);
3153                 if (err) {
3154                         pr_debug("failed to create perf header attribute\n");
3155                         return err;
3156                 }
3157         }
3158
3159         return err;
3160 }
3161
3162 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3163                              union perf_event *event,
3164                              struct perf_evlist **pevlist)
3165 {
3166         u32 i, ids, n_ids;
3167         struct perf_evsel *evsel;
3168         struct perf_evlist *evlist = *pevlist;
3169
3170         if (evlist == NULL) {
3171                 *pevlist = evlist = perf_evlist__new();
3172                 if (evlist == NULL)
3173                         return -ENOMEM;
3174         }
3175
3176         evsel = perf_evsel__new(&event->attr.attr);
3177         if (evsel == NULL)
3178                 return -ENOMEM;
3179
3180         perf_evlist__add(evlist, evsel);
3181
3182         ids = event->header.size;
3183         ids -= (void *)&event->attr.id - (void *)event;
3184         n_ids = ids / sizeof(u64);
3185         /*
3186          * We don't have the cpu and thread maps on the header, so
3187          * for allocating the perf_sample_id table we fake 1 cpu and
3188          * hattr->ids threads.
3189          */
3190         if (perf_evsel__alloc_id(evsel, 1, n_ids))
3191                 return -ENOMEM;
3192
3193         for (i = 0; i < n_ids; i++) {
3194                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3195         }
3196
3197         symbol_conf.nr_events = evlist->nr_entries;
3198
3199         return 0;
3200 }
3201
3202 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3203                                      union perf_event *event,
3204                                      struct perf_evlist **pevlist)
3205 {
3206         struct event_update_event *ev = &event->event_update;
3207         struct event_update_event_scale *ev_scale;
3208         struct event_update_event_cpus *ev_cpus;
3209         struct perf_evlist *evlist;
3210         struct perf_evsel *evsel;
3211         struct cpu_map *map;
3212
3213         if (!pevlist || *pevlist == NULL)
3214                 return -EINVAL;
3215
3216         evlist = *pevlist;
3217
3218         evsel = perf_evlist__id2evsel(evlist, ev->id);
3219         if (evsel == NULL)
3220                 return -EINVAL;
3221
3222         switch (ev->type) {
3223         case PERF_EVENT_UPDATE__UNIT:
3224                 evsel->unit = strdup(ev->data);
3225                 break;
3226         case PERF_EVENT_UPDATE__NAME:
3227                 evsel->name = strdup(ev->data);
3228                 break;
3229         case PERF_EVENT_UPDATE__SCALE:
3230                 ev_scale = (struct event_update_event_scale *) ev->data;
3231                 evsel->scale = ev_scale->scale;
3232                 break;
3233         case PERF_EVENT_UPDATE__CPUS:
3234                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3235
3236                 map = cpu_map__new_data(&ev_cpus->cpus);
3237                 if (map)
3238                         evsel->own_cpus = map;
3239                 else
3240                         pr_err("failed to get event_update cpus\n");
3241         default:
3242                 break;
3243         }
3244
3245         return 0;
3246 }
3247
3248 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3249                                         struct perf_evlist *evlist,
3250                                         perf_event__handler_t process)
3251 {
3252         union perf_event ev;
3253         struct tracing_data *tdata;
3254         ssize_t size = 0, aligned_size = 0, padding;
3255         int err __maybe_unused = 0;
3256
3257         /*
3258          * We are going to store the size of the data followed
3259          * by the data contents. Since the fd descriptor is a pipe,
3260          * we cannot seek back to store the size of the data once
3261          * we know it. Instead we:
3262          *
3263          * - write the tracing data to the temp file
3264          * - get/write the data size to pipe
3265          * - write the tracing data from the temp file
3266          *   to the pipe
3267          */
3268         tdata = tracing_data_get(&evlist->entries, fd, true);
3269         if (!tdata)
3270                 return -1;
3271
3272         memset(&ev, 0, sizeof(ev));
3273
3274         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3275         size = tdata->size;
3276         aligned_size = PERF_ALIGN(size, sizeof(u64));
3277         padding = aligned_size - size;
3278         ev.tracing_data.header.size = sizeof(ev.tracing_data);
3279         ev.tracing_data.size = aligned_size;
3280
3281         process(tool, &ev, NULL, NULL);
3282
3283         /*
3284          * The put function will copy all the tracing data
3285          * stored in temp file to the pipe.
3286          */
3287         tracing_data_put(tdata);
3288
3289         write_padded(fd, NULL, 0, padding);
3290
3291         return aligned_size;
3292 }
3293
3294 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3295                                      union perf_event *event,
3296                                      struct perf_session *session)
3297 {
3298         ssize_t size_read, padding, size = event->tracing_data.size;
3299         int fd = perf_data_file__fd(session->file);
3300         off_t offset = lseek(fd, 0, SEEK_CUR);
3301         char buf[BUFSIZ];
3302
3303         /* setup for reading amidst mmap */
3304         lseek(fd, offset + sizeof(struct tracing_data_event),
3305               SEEK_SET);
3306
3307         size_read = trace_report(fd, &session->tevent,
3308                                  session->repipe);
3309         padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3310
3311         if (readn(fd, buf, padding) < 0) {
3312                 pr_err("%s: reading input file", __func__);
3313                 return -1;
3314         }
3315         if (session->repipe) {
3316                 int retw = write(STDOUT_FILENO, buf, padding);
3317                 if (retw <= 0 || retw != padding) {
3318                         pr_err("%s: repiping tracing data padding", __func__);
3319                         return -1;
3320                 }
3321         }
3322
3323         if (size_read + padding != size) {
3324                 pr_err("%s: tracing data size mismatch", __func__);
3325                 return -1;
3326         }
3327
3328         perf_evlist__prepare_tracepoint_events(session->evlist,
3329                                                session->tevent.pevent);
3330
3331         return size_read + padding;
3332 }
3333
3334 int perf_event__synthesize_build_id(struct perf_tool *tool,
3335                                     struct dso *pos, u16 misc,
3336                                     perf_event__handler_t process,
3337                                     struct machine *machine)
3338 {
3339         union perf_event ev;
3340         size_t len;
3341         int err = 0;
3342
3343         if (!pos->hit)
3344                 return err;
3345
3346         memset(&ev, 0, sizeof(ev));
3347
3348         len = pos->long_name_len + 1;
3349         len = PERF_ALIGN(len, NAME_ALIGN);
3350         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3351         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3352         ev.build_id.header.misc = misc;
3353         ev.build_id.pid = machine->pid;
3354         ev.build_id.header.size = sizeof(ev.build_id) + len;
3355         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3356
3357         err = process(tool, &ev, NULL, machine);
3358
3359         return err;
3360 }
3361
3362 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3363                                  union perf_event *event,
3364                                  struct perf_session *session)
3365 {
3366         __event_process_build_id(&event->build_id,
3367                                  event->build_id.filename,
3368                                  session);
3369         return 0;
3370 }