]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/builtin-record.c
Merge tag 'perf-core-for-mingo-20160419' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / tools / perf / builtin-record.c
1 /*
2  * builtin-record.c
3  *
4  * Builtin record command: Record the profile of a workload
5  * (or a CPU, or a PID) into the perf.data output file - for
6  * later analysis via perf report.
7  */
8 #include "builtin.h"
9
10 #include "perf.h"
11
12 #include "util/build-id.h"
13 #include "util/util.h"
14 #include <subcmd/parse-options.h>
15 #include "util/parse-events.h"
16
17 #include "util/callchain.h"
18 #include "util/cgroup.h"
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evlist.h"
22 #include "util/evsel.h"
23 #include "util/debug.h"
24 #include "util/session.h"
25 #include "util/tool.h"
26 #include "util/symbol.h"
27 #include "util/cpumap.h"
28 #include "util/thread_map.h"
29 #include "util/data.h"
30 #include "util/perf_regs.h"
31 #include "util/auxtrace.h"
32 #include "util/tsc.h"
33 #include "util/parse-branch-options.h"
34 #include "util/parse-regs-options.h"
35 #include "util/llvm-utils.h"
36 #include "util/bpf-loader.h"
37 #include "asm/bug.h"
38
39 #include <unistd.h>
40 #include <sched.h>
41 #include <sys/mman.h>
42
43
44 struct record {
45         struct perf_tool        tool;
46         struct record_opts      opts;
47         u64                     bytes_written;
48         struct perf_data_file   file;
49         struct auxtrace_record  *itr;
50         struct perf_evlist      *evlist;
51         struct perf_session     *session;
52         const char              *progname;
53         int                     realtime_prio;
54         bool                    no_buildid;
55         bool                    no_buildid_set;
56         bool                    no_buildid_cache;
57         bool                    no_buildid_cache_set;
58         bool                    buildid_all;
59         bool                    timestamp_filename;
60         unsigned long long      samples;
61 };
62
63 static int record__write(struct record *rec, void *bf, size_t size)
64 {
65         if (perf_data_file__write(rec->session->file, bf, size) < 0) {
66                 pr_err("failed to write perf data, error: %m\n");
67                 return -1;
68         }
69
70         rec->bytes_written += size;
71         return 0;
72 }
73
74 static int process_synthesized_event(struct perf_tool *tool,
75                                      union perf_event *event,
76                                      struct perf_sample *sample __maybe_unused,
77                                      struct machine *machine __maybe_unused)
78 {
79         struct record *rec = container_of(tool, struct record, tool);
80         return record__write(rec, event, event->header.size);
81 }
82
83 static int record__mmap_read(struct record *rec, int idx)
84 {
85         struct perf_mmap *md = &rec->evlist->mmap[idx];
86         u64 head = perf_mmap__read_head(md);
87         u64 old = md->prev;
88         unsigned char *data = md->base + page_size;
89         unsigned long size;
90         void *buf;
91         int rc = 0;
92
93         if (old == head)
94                 return 0;
95
96         rec->samples++;
97
98         size = head - old;
99
100         if ((old & md->mask) + size != (head & md->mask)) {
101                 buf = &data[old & md->mask];
102                 size = md->mask + 1 - (old & md->mask);
103                 old += size;
104
105                 if (record__write(rec, buf, size) < 0) {
106                         rc = -1;
107                         goto out;
108                 }
109         }
110
111         buf = &data[old & md->mask];
112         size = head - old;
113         old += size;
114
115         if (record__write(rec, buf, size) < 0) {
116                 rc = -1;
117                 goto out;
118         }
119
120         md->prev = old;
121         perf_evlist__mmap_consume(rec->evlist, idx);
122 out:
123         return rc;
124 }
125
126 static volatile int done;
127 static volatile int signr = -1;
128 static volatile int child_finished;
129
130 static volatile enum {
131         AUXTRACE_SNAPSHOT_OFF = -1,
132         AUXTRACE_SNAPSHOT_DISABLED = 0,
133         AUXTRACE_SNAPSHOT_ENABLED = 1,
134 } auxtrace_snapshot_state = AUXTRACE_SNAPSHOT_OFF;
135
136 static inline void
137 auxtrace_snapshot_on(void)
138 {
139         auxtrace_snapshot_state = AUXTRACE_SNAPSHOT_DISABLED;
140 }
141
142 static inline void
143 auxtrace_snapshot_enable(void)
144 {
145         if (auxtrace_snapshot_state == AUXTRACE_SNAPSHOT_OFF)
146                 return;
147         auxtrace_snapshot_state = AUXTRACE_SNAPSHOT_ENABLED;
148 }
149
150 static inline void
151 auxtrace_snapshot_disable(void)
152 {
153         if (auxtrace_snapshot_state == AUXTRACE_SNAPSHOT_OFF)
154                 return;
155         auxtrace_snapshot_state = AUXTRACE_SNAPSHOT_DISABLED;
156 }
157
158 static inline bool
159 auxtrace_snapshot_is_enabled(void)
160 {
161         if (auxtrace_snapshot_state == AUXTRACE_SNAPSHOT_OFF)
162                 return false;
163         return auxtrace_snapshot_state == AUXTRACE_SNAPSHOT_ENABLED;
164 }
165
166 static volatile int auxtrace_snapshot_err;
167 static volatile int auxtrace_record__snapshot_started;
168
169 static void sig_handler(int sig)
170 {
171         if (sig == SIGCHLD)
172                 child_finished = 1;
173         else
174                 signr = sig;
175
176         done = 1;
177 }
178
179 static void record__sig_exit(void)
180 {
181         if (signr == -1)
182                 return;
183
184         signal(signr, SIG_DFL);
185         raise(signr);
186 }
187
188 #ifdef HAVE_AUXTRACE_SUPPORT
189
190 static int record__process_auxtrace(struct perf_tool *tool,
191                                     union perf_event *event, void *data1,
192                                     size_t len1, void *data2, size_t len2)
193 {
194         struct record *rec = container_of(tool, struct record, tool);
195         struct perf_data_file *file = &rec->file;
196         size_t padding;
197         u8 pad[8] = {0};
198
199         if (!perf_data_file__is_pipe(file)) {
200                 off_t file_offset;
201                 int fd = perf_data_file__fd(file);
202                 int err;
203
204                 file_offset = lseek(fd, 0, SEEK_CUR);
205                 if (file_offset == -1)
206                         return -1;
207                 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
208                                                      event, file_offset);
209                 if (err)
210                         return err;
211         }
212
213         /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
214         padding = (len1 + len2) & 7;
215         if (padding)
216                 padding = 8 - padding;
217
218         record__write(rec, event, event->header.size);
219         record__write(rec, data1, len1);
220         if (len2)
221                 record__write(rec, data2, len2);
222         record__write(rec, &pad, padding);
223
224         return 0;
225 }
226
227 static int record__auxtrace_mmap_read(struct record *rec,
228                                       struct auxtrace_mmap *mm)
229 {
230         int ret;
231
232         ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
233                                   record__process_auxtrace);
234         if (ret < 0)
235                 return ret;
236
237         if (ret)
238                 rec->samples++;
239
240         return 0;
241 }
242
243 static int record__auxtrace_mmap_read_snapshot(struct record *rec,
244                                                struct auxtrace_mmap *mm)
245 {
246         int ret;
247
248         ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
249                                            record__process_auxtrace,
250                                            rec->opts.auxtrace_snapshot_size);
251         if (ret < 0)
252                 return ret;
253
254         if (ret)
255                 rec->samples++;
256
257         return 0;
258 }
259
260 static int record__auxtrace_read_snapshot_all(struct record *rec)
261 {
262         int i;
263         int rc = 0;
264
265         for (i = 0; i < rec->evlist->nr_mmaps; i++) {
266                 struct auxtrace_mmap *mm =
267                                 &rec->evlist->mmap[i].auxtrace_mmap;
268
269                 if (!mm->base)
270                         continue;
271
272                 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
273                         rc = -1;
274                         goto out;
275                 }
276         }
277 out:
278         return rc;
279 }
280
281 static void record__read_auxtrace_snapshot(struct record *rec)
282 {
283         pr_debug("Recording AUX area tracing snapshot\n");
284         if (record__auxtrace_read_snapshot_all(rec) < 0) {
285                 auxtrace_snapshot_err = -1;
286         } else {
287                 auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
288                 if (!auxtrace_snapshot_err)
289                         auxtrace_snapshot_enable();
290         }
291 }
292
293 #else
294
295 static inline
296 int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
297                                struct auxtrace_mmap *mm __maybe_unused)
298 {
299         return 0;
300 }
301
302 static inline
303 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
304 {
305 }
306
307 static inline
308 int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
309 {
310         return 0;
311 }
312
313 #endif
314
315 static int record__open(struct record *rec)
316 {
317         char msg[512];
318         struct perf_evsel *pos;
319         struct perf_evlist *evlist = rec->evlist;
320         struct perf_session *session = rec->session;
321         struct record_opts *opts = &rec->opts;
322         int rc = 0;
323
324         perf_evlist__config(evlist, opts, &callchain_param);
325
326         evlist__for_each(evlist, pos) {
327 try_again:
328                 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
329                         if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
330                                 if (verbose)
331                                         ui__warning("%s\n", msg);
332                                 goto try_again;
333                         }
334
335                         rc = -errno;
336                         perf_evsel__open_strerror(pos, &opts->target,
337                                                   errno, msg, sizeof(msg));
338                         ui__error("%s\n", msg);
339                         goto out;
340                 }
341         }
342
343         if (perf_evlist__apply_filters(evlist, &pos)) {
344                 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
345                         pos->filter, perf_evsel__name(pos), errno,
346                         strerror_r(errno, msg, sizeof(msg)));
347                 rc = -1;
348                 goto out;
349         }
350
351         if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
352                                  opts->auxtrace_mmap_pages,
353                                  opts->auxtrace_snapshot_mode) < 0) {
354                 if (errno == EPERM) {
355                         pr_err("Permission error mapping pages.\n"
356                                "Consider increasing "
357                                "/proc/sys/kernel/perf_event_mlock_kb,\n"
358                                "or try again with a smaller value of -m/--mmap_pages.\n"
359                                "(current value: %u,%u)\n",
360                                opts->mmap_pages, opts->auxtrace_mmap_pages);
361                         rc = -errno;
362                 } else {
363                         pr_err("failed to mmap with %d (%s)\n", errno,
364                                 strerror_r(errno, msg, sizeof(msg)));
365                         if (errno)
366                                 rc = -errno;
367                         else
368                                 rc = -EINVAL;
369                 }
370                 goto out;
371         }
372
373         session->evlist = evlist;
374         perf_session__set_id_hdr_size(session);
375 out:
376         return rc;
377 }
378
379 static int process_sample_event(struct perf_tool *tool,
380                                 union perf_event *event,
381                                 struct perf_sample *sample,
382                                 struct perf_evsel *evsel,
383                                 struct machine *machine)
384 {
385         struct record *rec = container_of(tool, struct record, tool);
386
387         rec->samples++;
388
389         return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
390 }
391
392 static int process_buildids(struct record *rec)
393 {
394         struct perf_data_file *file  = &rec->file;
395         struct perf_session *session = rec->session;
396
397         if (file->size == 0)
398                 return 0;
399
400         /*
401          * During this process, it'll load kernel map and replace the
402          * dso->long_name to a real pathname it found.  In this case
403          * we prefer the vmlinux path like
404          *   /lib/modules/3.16.4/build/vmlinux
405          *
406          * rather than build-id path (in debug directory).
407          *   $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
408          */
409         symbol_conf.ignore_vmlinux_buildid = true;
410
411         /*
412          * If --buildid-all is given, it marks all DSO regardless of hits,
413          * so no need to process samples.
414          */
415         if (rec->buildid_all)
416                 rec->tool.sample = NULL;
417
418         return perf_session__process_events(session);
419 }
420
421 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
422 {
423         int err;
424         struct perf_tool *tool = data;
425         /*
426          *As for guest kernel when processing subcommand record&report,
427          *we arrange module mmap prior to guest kernel mmap and trigger
428          *a preload dso because default guest module symbols are loaded
429          *from guest kallsyms instead of /lib/modules/XXX/XXX. This
430          *method is used to avoid symbol missing when the first addr is
431          *in module instead of in guest kernel.
432          */
433         err = perf_event__synthesize_modules(tool, process_synthesized_event,
434                                              machine);
435         if (err < 0)
436                 pr_err("Couldn't record guest kernel [%d]'s reference"
437                        " relocation symbol.\n", machine->pid);
438
439         /*
440          * We use _stext for guest kernel because guest kernel's /proc/kallsyms
441          * have no _text sometimes.
442          */
443         err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
444                                                  machine);
445         if (err < 0)
446                 pr_err("Couldn't record guest kernel [%d]'s reference"
447                        " relocation symbol.\n", machine->pid);
448 }
449
450 static struct perf_event_header finished_round_event = {
451         .size = sizeof(struct perf_event_header),
452         .type = PERF_RECORD_FINISHED_ROUND,
453 };
454
455 static int record__mmap_read_all(struct record *rec)
456 {
457         u64 bytes_written = rec->bytes_written;
458         int i;
459         int rc = 0;
460
461         for (i = 0; i < rec->evlist->nr_mmaps; i++) {
462                 struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
463
464                 if (rec->evlist->mmap[i].base) {
465                         if (record__mmap_read(rec, i) != 0) {
466                                 rc = -1;
467                                 goto out;
468                         }
469                 }
470
471                 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
472                     record__auxtrace_mmap_read(rec, mm) != 0) {
473                         rc = -1;
474                         goto out;
475                 }
476         }
477
478         /*
479          * Mark the round finished in case we wrote
480          * at least one event.
481          */
482         if (bytes_written != rec->bytes_written)
483                 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
484
485 out:
486         return rc;
487 }
488
489 static void record__init_features(struct record *rec)
490 {
491         struct perf_session *session = rec->session;
492         int feat;
493
494         for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
495                 perf_header__set_feat(&session->header, feat);
496
497         if (rec->no_buildid)
498                 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
499
500         if (!have_tracepoints(&rec->evlist->entries))
501                 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
502
503         if (!rec->opts.branch_stack)
504                 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
505
506         if (!rec->opts.full_auxtrace)
507                 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
508
509         perf_header__clear_feat(&session->header, HEADER_STAT);
510 }
511
512 static void
513 record__finish_output(struct record *rec)
514 {
515         struct perf_data_file *file = &rec->file;
516         int fd = perf_data_file__fd(file);
517
518         if (file->is_pipe)
519                 return;
520
521         rec->session->header.data_size += rec->bytes_written;
522         file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
523
524         if (!rec->no_buildid) {
525                 process_buildids(rec);
526
527                 if (rec->buildid_all)
528                         dsos__hit_all(rec->session);
529         }
530         perf_session__write_header(rec->session, rec->evlist, fd, true);
531
532         return;
533 }
534
535 static int
536 record__switch_output(struct record *rec, bool at_exit)
537 {
538         struct perf_data_file *file = &rec->file;
539         int fd, err;
540
541         /* Same Size:      "2015122520103046"*/
542         char timestamp[] = "InvalidTimestamp";
543
544         rec->samples = 0;
545         record__finish_output(rec);
546         err = fetch_current_timestamp(timestamp, sizeof(timestamp));
547         if (err) {
548                 pr_err("Failed to get current timestamp\n");
549                 return -EINVAL;
550         }
551
552         fd = perf_data_file__switch(file, timestamp,
553                                     rec->session->header.data_offset,
554                                     at_exit);
555         if (fd >= 0 && !at_exit) {
556                 rec->bytes_written = 0;
557                 rec->session->header.data_size = 0;
558         }
559
560         if (!quiet)
561                 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
562                         file->path, timestamp);
563         return fd;
564 }
565
566 static volatile int workload_exec_errno;
567
568 /*
569  * perf_evlist__prepare_workload will send a SIGUSR1
570  * if the fork fails, since we asked by setting its
571  * want_signal to true.
572  */
573 static void workload_exec_failed_signal(int signo __maybe_unused,
574                                         siginfo_t *info,
575                                         void *ucontext __maybe_unused)
576 {
577         workload_exec_errno = info->si_value.sival_int;
578         done = 1;
579         child_finished = 1;
580 }
581
582 static void snapshot_sig_handler(int sig);
583
584 int __weak
585 perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
586                             struct perf_tool *tool __maybe_unused,
587                             perf_event__handler_t process __maybe_unused,
588                             struct machine *machine __maybe_unused)
589 {
590         return 0;
591 }
592
593 static int record__synthesize(struct record *rec)
594 {
595         struct perf_session *session = rec->session;
596         struct machine *machine = &session->machines.host;
597         struct perf_data_file *file = &rec->file;
598         struct record_opts *opts = &rec->opts;
599         struct perf_tool *tool = &rec->tool;
600         int fd = perf_data_file__fd(file);
601         int err = 0;
602
603         if (file->is_pipe) {
604                 err = perf_event__synthesize_attrs(tool, session,
605                                                    process_synthesized_event);
606                 if (err < 0) {
607                         pr_err("Couldn't synthesize attrs.\n");
608                         goto out;
609                 }
610
611                 if (have_tracepoints(&rec->evlist->entries)) {
612                         /*
613                          * FIXME err <= 0 here actually means that
614                          * there were no tracepoints so its not really
615                          * an error, just that we don't need to
616                          * synthesize anything.  We really have to
617                          * return this more properly and also
618                          * propagate errors that now are calling die()
619                          */
620                         err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
621                                                                   process_synthesized_event);
622                         if (err <= 0) {
623                                 pr_err("Couldn't record tracing data.\n");
624                                 goto out;
625                         }
626                         rec->bytes_written += err;
627                 }
628         }
629
630         err = perf_event__synth_time_conv(rec->evlist->mmap[0].base, tool,
631                                           process_synthesized_event, machine);
632         if (err)
633                 goto out;
634
635         if (rec->opts.full_auxtrace) {
636                 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
637                                         session, process_synthesized_event);
638                 if (err)
639                         goto out;
640         }
641
642         err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
643                                                  machine);
644         WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
645                            "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
646                            "Check /proc/kallsyms permission or run as root.\n");
647
648         err = perf_event__synthesize_modules(tool, process_synthesized_event,
649                                              machine);
650         WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
651                            "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
652                            "Check /proc/modules permission or run as root.\n");
653
654         if (perf_guest) {
655                 machines__process_guests(&session->machines,
656                                          perf_event__synthesize_guest_os, tool);
657         }
658
659         err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
660                                             process_synthesized_event, opts->sample_address,
661                                             opts->proc_map_timeout);
662 out:
663         return err;
664 }
665
666 static int __cmd_record(struct record *rec, int argc, const char **argv)
667 {
668         int err;
669         int status = 0;
670         unsigned long waking = 0;
671         const bool forks = argc > 0;
672         struct machine *machine;
673         struct perf_tool *tool = &rec->tool;
674         struct record_opts *opts = &rec->opts;
675         struct perf_data_file *file = &rec->file;
676         struct perf_session *session;
677         bool disabled = false, draining = false;
678         int fd;
679
680         rec->progname = argv[0];
681
682         atexit(record__sig_exit);
683         signal(SIGCHLD, sig_handler);
684         signal(SIGINT, sig_handler);
685         signal(SIGTERM, sig_handler);
686
687         if (rec->opts.auxtrace_snapshot_mode) {
688                 signal(SIGUSR2, snapshot_sig_handler);
689                 auxtrace_snapshot_on();
690         } else {
691                 signal(SIGUSR2, SIG_IGN);
692         }
693
694         session = perf_session__new(file, false, tool);
695         if (session == NULL) {
696                 pr_err("Perf session creation failed.\n");
697                 return -1;
698         }
699
700         fd = perf_data_file__fd(file);
701         rec->session = session;
702
703         record__init_features(rec);
704
705         if (forks) {
706                 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
707                                                     argv, file->is_pipe,
708                                                     workload_exec_failed_signal);
709                 if (err < 0) {
710                         pr_err("Couldn't run the workload!\n");
711                         status = err;
712                         goto out_delete_session;
713                 }
714         }
715
716         if (record__open(rec) != 0) {
717                 err = -1;
718                 goto out_child;
719         }
720
721         err = bpf__apply_obj_config();
722         if (err) {
723                 char errbuf[BUFSIZ];
724
725                 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
726                 pr_err("ERROR: Apply config to BPF failed: %s\n",
727                          errbuf);
728                 goto out_child;
729         }
730
731         /*
732          * Normally perf_session__new would do this, but it doesn't have the
733          * evlist.
734          */
735         if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
736                 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
737                 rec->tool.ordered_events = false;
738         }
739
740         if (!rec->evlist->nr_groups)
741                 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
742
743         if (file->is_pipe) {
744                 err = perf_header__write_pipe(fd);
745                 if (err < 0)
746                         goto out_child;
747         } else {
748                 err = perf_session__write_header(session, rec->evlist, fd, false);
749                 if (err < 0)
750                         goto out_child;
751         }
752
753         if (!rec->no_buildid
754             && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
755                 pr_err("Couldn't generate buildids. "
756                        "Use --no-buildid to profile anyway.\n");
757                 err = -1;
758                 goto out_child;
759         }
760
761         machine = &session->machines.host;
762
763         err = record__synthesize(rec);
764         if (err < 0)
765                 goto out_child;
766
767         if (rec->realtime_prio) {
768                 struct sched_param param;
769
770                 param.sched_priority = rec->realtime_prio;
771                 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
772                         pr_err("Could not set realtime priority.\n");
773                         err = -1;
774                         goto out_child;
775                 }
776         }
777
778         /*
779          * When perf is starting the traced process, all the events
780          * (apart from group members) have enable_on_exec=1 set,
781          * so don't spoil it by prematurely enabling them.
782          */
783         if (!target__none(&opts->target) && !opts->initial_delay)
784                 perf_evlist__enable(rec->evlist);
785
786         /*
787          * Let the child rip
788          */
789         if (forks) {
790                 union perf_event *event;
791
792                 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
793                 if (event == NULL) {
794                         err = -ENOMEM;
795                         goto out_child;
796                 }
797
798                 /*
799                  * Some H/W events are generated before COMM event
800                  * which is emitted during exec(), so perf script
801                  * cannot see a correct process name for those events.
802                  * Synthesize COMM event to prevent it.
803                  */
804                 perf_event__synthesize_comm(tool, event,
805                                             rec->evlist->workload.pid,
806                                             process_synthesized_event,
807                                             machine);
808                 free(event);
809
810                 perf_evlist__start_workload(rec->evlist);
811         }
812
813         if (opts->initial_delay) {
814                 usleep(opts->initial_delay * 1000);
815                 perf_evlist__enable(rec->evlist);
816         }
817
818         auxtrace_snapshot_enable();
819         for (;;) {
820                 unsigned long long hits = rec->samples;
821
822                 if (record__mmap_read_all(rec) < 0) {
823                         auxtrace_snapshot_disable();
824                         err = -1;
825                         goto out_child;
826                 }
827
828                 if (auxtrace_record__snapshot_started) {
829                         auxtrace_record__snapshot_started = 0;
830                         if (!auxtrace_snapshot_err)
831                                 record__read_auxtrace_snapshot(rec);
832                         if (auxtrace_snapshot_err) {
833                                 pr_err("AUX area tracing snapshot failed\n");
834                                 err = -1;
835                                 goto out_child;
836                         }
837                 }
838
839                 if (hits == rec->samples) {
840                         if (done || draining)
841                                 break;
842                         err = perf_evlist__poll(rec->evlist, -1);
843                         /*
844                          * Propagate error, only if there's any. Ignore positive
845                          * number of returned events and interrupt error.
846                          */
847                         if (err > 0 || (err < 0 && errno == EINTR))
848                                 err = 0;
849                         waking++;
850
851                         if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
852                                 draining = true;
853                 }
854
855                 /*
856                  * When perf is starting the traced process, at the end events
857                  * die with the process and we wait for that. Thus no need to
858                  * disable events in this case.
859                  */
860                 if (done && !disabled && !target__none(&opts->target)) {
861                         auxtrace_snapshot_disable();
862                         perf_evlist__disable(rec->evlist);
863                         disabled = true;
864                 }
865         }
866         auxtrace_snapshot_disable();
867
868         if (forks && workload_exec_errno) {
869                 char msg[STRERR_BUFSIZE];
870                 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
871                 pr_err("Workload failed: %s\n", emsg);
872                 err = -1;
873                 goto out_child;
874         }
875
876         if (!quiet)
877                 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
878
879 out_child:
880         if (forks) {
881                 int exit_status;
882
883                 if (!child_finished)
884                         kill(rec->evlist->workload.pid, SIGTERM);
885
886                 wait(&exit_status);
887
888                 if (err < 0)
889                         status = err;
890                 else if (WIFEXITED(exit_status))
891                         status = WEXITSTATUS(exit_status);
892                 else if (WIFSIGNALED(exit_status))
893                         signr = WTERMSIG(exit_status);
894         } else
895                 status = err;
896
897         /* this will be recalculated during process_buildids() */
898         rec->samples = 0;
899
900         if (!err) {
901                 if (!rec->timestamp_filename) {
902                         record__finish_output(rec);
903                 } else {
904                         fd = record__switch_output(rec, true);
905                         if (fd < 0) {
906                                 status = fd;
907                                 goto out_delete_session;
908                         }
909                 }
910         }
911
912         if (!err && !quiet) {
913                 char samples[128];
914                 const char *postfix = rec->timestamp_filename ?
915                                         ".<timestamp>" : "";
916
917                 if (rec->samples && !rec->opts.full_auxtrace)
918                         scnprintf(samples, sizeof(samples),
919                                   " (%" PRIu64 " samples)", rec->samples);
920                 else
921                         samples[0] = '\0';
922
923                 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
924                         perf_data_file__size(file) / 1024.0 / 1024.0,
925                         file->path, postfix, samples);
926         }
927
928 out_delete_session:
929         perf_session__delete(session);
930         return status;
931 }
932
933 static void callchain_debug(struct callchain_param *callchain)
934 {
935         static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
936
937         pr_debug("callchain: type %s\n", str[callchain->record_mode]);
938
939         if (callchain->record_mode == CALLCHAIN_DWARF)
940                 pr_debug("callchain: stack dump size %d\n",
941                          callchain->dump_size);
942 }
943
944 int record_opts__parse_callchain(struct record_opts *record,
945                                  struct callchain_param *callchain,
946                                  const char *arg, bool unset)
947 {
948         int ret;
949         callchain->enabled = !unset;
950
951         /* --no-call-graph */
952         if (unset) {
953                 callchain->record_mode = CALLCHAIN_NONE;
954                 pr_debug("callchain: disabled\n");
955                 return 0;
956         }
957
958         ret = parse_callchain_record_opt(arg, callchain);
959         if (!ret) {
960                 /* Enable data address sampling for DWARF unwind. */
961                 if (callchain->record_mode == CALLCHAIN_DWARF)
962                         record->sample_address = true;
963                 callchain_debug(callchain);
964         }
965
966         return ret;
967 }
968
969 int record_parse_callchain_opt(const struct option *opt,
970                                const char *arg,
971                                int unset)
972 {
973         return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
974 }
975
976 int record_callchain_opt(const struct option *opt,
977                          const char *arg __maybe_unused,
978                          int unset __maybe_unused)
979 {
980         struct callchain_param *callchain = opt->value;
981
982         callchain->enabled = true;
983
984         if (callchain->record_mode == CALLCHAIN_NONE)
985                 callchain->record_mode = CALLCHAIN_FP;
986
987         callchain_debug(callchain);
988         return 0;
989 }
990
991 static int perf_record_config(const char *var, const char *value, void *cb)
992 {
993         struct record *rec = cb;
994
995         if (!strcmp(var, "record.build-id")) {
996                 if (!strcmp(value, "cache"))
997                         rec->no_buildid_cache = false;
998                 else if (!strcmp(value, "no-cache"))
999                         rec->no_buildid_cache = true;
1000                 else if (!strcmp(value, "skip"))
1001                         rec->no_buildid = true;
1002                 else
1003                         return -1;
1004                 return 0;
1005         }
1006         if (!strcmp(var, "record.call-graph"))
1007                 var = "call-graph.record-mode"; /* fall-through */
1008
1009         return perf_default_config(var, value, cb);
1010 }
1011
1012 struct clockid_map {
1013         const char *name;
1014         int clockid;
1015 };
1016
1017 #define CLOCKID_MAP(n, c)       \
1018         { .name = n, .clockid = (c), }
1019
1020 #define CLOCKID_END     { .name = NULL, }
1021
1022
1023 /*
1024  * Add the missing ones, we need to build on many distros...
1025  */
1026 #ifndef CLOCK_MONOTONIC_RAW
1027 #define CLOCK_MONOTONIC_RAW 4
1028 #endif
1029 #ifndef CLOCK_BOOTTIME
1030 #define CLOCK_BOOTTIME 7
1031 #endif
1032 #ifndef CLOCK_TAI
1033 #define CLOCK_TAI 11
1034 #endif
1035
1036 static const struct clockid_map clockids[] = {
1037         /* available for all events, NMI safe */
1038         CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1039         CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1040
1041         /* available for some events */
1042         CLOCKID_MAP("realtime", CLOCK_REALTIME),
1043         CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1044         CLOCKID_MAP("tai", CLOCK_TAI),
1045
1046         /* available for the lazy */
1047         CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1048         CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1049         CLOCKID_MAP("real", CLOCK_REALTIME),
1050         CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1051
1052         CLOCKID_END,
1053 };
1054
1055 static int parse_clockid(const struct option *opt, const char *str, int unset)
1056 {
1057         struct record_opts *opts = (struct record_opts *)opt->value;
1058         const struct clockid_map *cm;
1059         const char *ostr = str;
1060
1061         if (unset) {
1062                 opts->use_clockid = 0;
1063                 return 0;
1064         }
1065
1066         /* no arg passed */
1067         if (!str)
1068                 return 0;
1069
1070         /* no setting it twice */
1071         if (opts->use_clockid)
1072                 return -1;
1073
1074         opts->use_clockid = true;
1075
1076         /* if its a number, we're done */
1077         if (sscanf(str, "%d", &opts->clockid) == 1)
1078                 return 0;
1079
1080         /* allow a "CLOCK_" prefix to the name */
1081         if (!strncasecmp(str, "CLOCK_", 6))
1082                 str += 6;
1083
1084         for (cm = clockids; cm->name; cm++) {
1085                 if (!strcasecmp(str, cm->name)) {
1086                         opts->clockid = cm->clockid;
1087                         return 0;
1088                 }
1089         }
1090
1091         opts->use_clockid = false;
1092         ui__warning("unknown clockid %s, check man page\n", ostr);
1093         return -1;
1094 }
1095
1096 static int record__parse_mmap_pages(const struct option *opt,
1097                                     const char *str,
1098                                     int unset __maybe_unused)
1099 {
1100         struct record_opts *opts = opt->value;
1101         char *s, *p;
1102         unsigned int mmap_pages;
1103         int ret;
1104
1105         if (!str)
1106                 return -EINVAL;
1107
1108         s = strdup(str);
1109         if (!s)
1110                 return -ENOMEM;
1111
1112         p = strchr(s, ',');
1113         if (p)
1114                 *p = '\0';
1115
1116         if (*s) {
1117                 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1118                 if (ret)
1119                         goto out_free;
1120                 opts->mmap_pages = mmap_pages;
1121         }
1122
1123         if (!p) {
1124                 ret = 0;
1125                 goto out_free;
1126         }
1127
1128         ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1129         if (ret)
1130                 goto out_free;
1131
1132         opts->auxtrace_mmap_pages = mmap_pages;
1133
1134 out_free:
1135         free(s);
1136         return ret;
1137 }
1138
1139 static const char * const __record_usage[] = {
1140         "perf record [<options>] [<command>]",
1141         "perf record [<options>] -- <command> [<options>]",
1142         NULL
1143 };
1144 const char * const *record_usage = __record_usage;
1145
1146 /*
1147  * XXX Ideally would be local to cmd_record() and passed to a record__new
1148  * because we need to have access to it in record__exit, that is called
1149  * after cmd_record() exits, but since record_options need to be accessible to
1150  * builtin-script, leave it here.
1151  *
1152  * At least we don't ouch it in all the other functions here directly.
1153  *
1154  * Just say no to tons of global variables, sigh.
1155  */
1156 static struct record record = {
1157         .opts = {
1158                 .sample_time         = true,
1159                 .mmap_pages          = UINT_MAX,
1160                 .user_freq           = UINT_MAX,
1161                 .user_interval       = ULLONG_MAX,
1162                 .freq                = 4000,
1163                 .target              = {
1164                         .uses_mmap   = true,
1165                         .default_per_cpu = true,
1166                 },
1167                 .proc_map_timeout     = 500,
1168         },
1169         .tool = {
1170                 .sample         = process_sample_event,
1171                 .fork           = perf_event__process_fork,
1172                 .exit           = perf_event__process_exit,
1173                 .comm           = perf_event__process_comm,
1174                 .mmap           = perf_event__process_mmap,
1175                 .mmap2          = perf_event__process_mmap2,
1176                 .ordered_events = true,
1177         },
1178 };
1179
1180 const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1181         "\n\t\t\t\tDefault: fp";
1182
1183 /*
1184  * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1185  * with it and switch to use the library functions in perf_evlist that came
1186  * from builtin-record.c, i.e. use record_opts,
1187  * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1188  * using pipes, etc.
1189  */
1190 struct option __record_options[] = {
1191         OPT_CALLBACK('e', "event", &record.evlist, "event",
1192                      "event selector. use 'perf list' to list available events",
1193                      parse_events_option),
1194         OPT_CALLBACK(0, "filter", &record.evlist, "filter",
1195                      "event filter", parse_filter),
1196         OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1197                            NULL, "don't record events from perf itself",
1198                            exclude_perf),
1199         OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
1200                     "record events on existing process id"),
1201         OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
1202                     "record events on existing thread id"),
1203         OPT_INTEGER('r', "realtime", &record.realtime_prio,
1204                     "collect data with this RT SCHED_FIFO priority"),
1205         OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
1206                     "collect data without buffering"),
1207         OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
1208                     "collect raw sample records from all opened counters"),
1209         OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
1210                             "system-wide collection from all CPUs"),
1211         OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
1212                     "list of cpus to monitor"),
1213         OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
1214         OPT_STRING('o', "output", &record.file.path, "file",
1215                     "output file name"),
1216         OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1217                         &record.opts.no_inherit_set,
1218                         "child tasks do not inherit counters"),
1219         OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
1220         OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1221                      "number of mmap data pages and AUX area tracing mmap pages",
1222                      record__parse_mmap_pages),
1223         OPT_BOOLEAN(0, "group", &record.opts.group,
1224                     "put the counters into a counter group"),
1225         OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1226                            NULL, "enables call-graph recording" ,
1227                            &record_callchain_opt),
1228         OPT_CALLBACK(0, "call-graph", &record.opts,
1229                      "record_mode[,record_size]", record_callchain_help,
1230                      &record_parse_callchain_opt),
1231         OPT_INCR('v', "verbose", &verbose,
1232                     "be more verbose (show counter open errors, etc)"),
1233         OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
1234         OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
1235                     "per thread counts"),
1236         OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
1237         OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1238                         &record.opts.sample_time_set,
1239                         "Record the sample timestamps"),
1240         OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
1241         OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
1242                     "don't sample"),
1243         OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1244                         &record.no_buildid_cache_set,
1245                         "do not update the buildid cache"),
1246         OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1247                         &record.no_buildid_set,
1248                         "do not collect buildids in perf.data"),
1249         OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
1250                      "monitor event in cgroup name only",
1251                      parse_cgroups),
1252         OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
1253                   "ms to wait before starting measurement after program start"),
1254         OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1255                    "user to profile"),
1256
1257         OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1258                      "branch any", "sample any taken branches",
1259                      parse_branch_stack),
1260
1261         OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1262                      "branch filter mask", "branch stack filter modes",
1263                      parse_branch_stack),
1264         OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1265                     "sample by weight (on special events only)"),
1266         OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1267                     "sample transaction flags (special events only)"),
1268         OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1269                     "use per-thread mmaps"),
1270         OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1271                     "sample selected machine registers on interrupt,"
1272                     " use -I ? to list register names", parse_regs),
1273         OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1274                     "Record running/enabled time of read (:S) events"),
1275         OPT_CALLBACK('k', "clockid", &record.opts,
1276         "clockid", "clockid to use for events, see clock_gettime()",
1277         parse_clockid),
1278         OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1279                           "opts", "AUX area tracing Snapshot Mode", ""),
1280         OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1281                         "per thread proc mmap processing timeout in ms"),
1282         OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1283                     "Record context switch events"),
1284         OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1285                          "Configure all used events to run in kernel space.",
1286                          PARSE_OPT_EXCLUSIVE),
1287         OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1288                          "Configure all used events to run in user space.",
1289                          PARSE_OPT_EXCLUSIVE),
1290         OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1291                    "clang binary to use for compiling BPF scriptlets"),
1292         OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1293                    "options passed to clang when compiling BPF scriptlets"),
1294         OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1295                    "file", "vmlinux pathname"),
1296         OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1297                     "Record build-id of all DSOs regardless of hits"),
1298         OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1299                     "append timestamp to output filename"),
1300         OPT_END()
1301 };
1302
1303 struct option *record_options = __record_options;
1304
1305 int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
1306 {
1307         int err;
1308         struct record *rec = &record;
1309         char errbuf[BUFSIZ];
1310
1311 #ifndef HAVE_LIBBPF_SUPPORT
1312 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1313         set_nobuild('\0', "clang-path", true);
1314         set_nobuild('\0', "clang-opt", true);
1315 # undef set_nobuild
1316 #endif
1317
1318 #ifndef HAVE_BPF_PROLOGUE
1319 # if !defined (HAVE_DWARF_SUPPORT)
1320 #  define REASON  "NO_DWARF=1"
1321 # elif !defined (HAVE_LIBBPF_SUPPORT)
1322 #  define REASON  "NO_LIBBPF=1"
1323 # else
1324 #  define REASON  "this architecture doesn't support BPF prologue"
1325 # endif
1326 # define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1327         set_nobuild('\0', "vmlinux", true);
1328 # undef set_nobuild
1329 # undef REASON
1330 #endif
1331
1332         rec->evlist = perf_evlist__new();
1333         if (rec->evlist == NULL)
1334                 return -ENOMEM;
1335
1336         perf_config(perf_record_config, rec);
1337
1338         argc = parse_options(argc, argv, record_options, record_usage,
1339                             PARSE_OPT_STOP_AT_NON_OPTION);
1340         if (!argc && target__none(&rec->opts.target))
1341                 usage_with_options(record_usage, record_options);
1342
1343         if (nr_cgroups && !rec->opts.target.system_wide) {
1344                 usage_with_options_msg(record_usage, record_options,
1345                         "cgroup monitoring only available in system-wide mode");
1346
1347         }
1348         if (rec->opts.record_switch_events &&
1349             !perf_can_record_switch_events()) {
1350                 ui__error("kernel does not support recording context switch events\n");
1351                 parse_options_usage(record_usage, record_options, "switch-events", 0);
1352                 return -EINVAL;
1353         }
1354
1355         if (!rec->itr) {
1356                 rec->itr = auxtrace_record__init(rec->evlist, &err);
1357                 if (err)
1358                         return err;
1359         }
1360
1361         err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1362                                               rec->opts.auxtrace_snapshot_opts);
1363         if (err)
1364                 return err;
1365
1366         err = bpf__setup_stdout(rec->evlist);
1367         if (err) {
1368                 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1369                 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1370                          errbuf);
1371                 return err;
1372         }
1373
1374         err = -ENOMEM;
1375
1376         symbol__init(NULL);
1377
1378         if (symbol_conf.kptr_restrict)
1379                 pr_warning(
1380 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1381 "check /proc/sys/kernel/kptr_restrict.\n\n"
1382 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1383 "file is not found in the buildid cache or in the vmlinux path.\n\n"
1384 "Samples in kernel modules won't be resolved at all.\n\n"
1385 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1386 "even with a suitable vmlinux or kallsyms file.\n\n");
1387
1388         if (rec->no_buildid_cache || rec->no_buildid)
1389                 disable_buildid_cache();
1390
1391         if (rec->evlist->nr_entries == 0 &&
1392             perf_evlist__add_default(rec->evlist) < 0) {
1393                 pr_err("Not enough memory for event selector list\n");
1394                 goto out_symbol_exit;
1395         }
1396
1397         if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1398                 rec->opts.no_inherit = true;
1399
1400         err = target__validate(&rec->opts.target);
1401         if (err) {
1402                 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1403                 ui__warning("%s", errbuf);
1404         }
1405
1406         err = target__parse_uid(&rec->opts.target);
1407         if (err) {
1408                 int saved_errno = errno;
1409
1410                 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
1411                 ui__error("%s", errbuf);
1412
1413                 err = -saved_errno;
1414                 goto out_symbol_exit;
1415         }
1416
1417         err = -ENOMEM;
1418         if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
1419                 usage_with_options(record_usage, record_options);
1420
1421         err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1422         if (err)
1423                 goto out_symbol_exit;
1424
1425         /*
1426          * We take all buildids when the file contains
1427          * AUX area tracing data because we do not decode the
1428          * trace because it would take too long.
1429          */
1430         if (rec->opts.full_auxtrace)
1431                 rec->buildid_all = true;
1432
1433         if (record_opts__config(&rec->opts)) {
1434                 err = -EINVAL;
1435                 goto out_symbol_exit;
1436         }
1437
1438         err = __cmd_record(&record, argc, argv);
1439 out_symbol_exit:
1440         perf_evlist__delete(rec->evlist);
1441         symbol__exit();
1442         auxtrace_record__free(rec->itr);
1443         return err;
1444 }
1445
1446 static void snapshot_sig_handler(int sig __maybe_unused)
1447 {
1448         if (!auxtrace_snapshot_is_enabled())
1449                 return;
1450         auxtrace_snapshot_disable();
1451         auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
1452         auxtrace_record__snapshot_started = 1;
1453 }