4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
12 #include "util/build-id.h"
13 #include "util/util.h"
14 #include "util/parse-options.h"
15 #include "util/parse-events.h"
17 #include "util/header.h"
18 #include "util/event.h"
19 #include "util/evlist.h"
20 #include "util/evsel.h"
21 #include "util/debug.h"
22 #include "util/session.h"
23 #include "util/tool.h"
24 #include "util/symbol.h"
25 #include "util/cpumap.h"
26 #include "util/thread_map.h"
27 #include "util/data.h"
33 #ifndef HAVE_ON_EXIT_SUPPORT
37 static int __on_exit_count = 0;
38 typedef void (*on_exit_func_t) (int, void *);
39 static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
40 static void *__on_exit_args[ATEXIT_MAX];
41 static int __exitcode = 0;
42 static void __handle_on_exit_funcs(void);
43 static int on_exit(on_exit_func_t function, void *arg);
44 #define exit(x) (exit)(__exitcode = (x))
46 static int on_exit(on_exit_func_t function, void *arg)
48 if (__on_exit_count == ATEXIT_MAX)
50 else if (__on_exit_count == 0)
51 atexit(__handle_on_exit_funcs);
52 __on_exit_funcs[__on_exit_count] = function;
53 __on_exit_args[__on_exit_count++] = arg;
57 static void __handle_on_exit_funcs(void)
60 for (i = 0; i < __on_exit_count; i++)
61 __on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
66 struct perf_tool tool;
67 struct perf_record_opts opts;
69 struct perf_data_file file;
70 struct perf_evlist *evlist;
71 struct perf_session *session;
75 bool no_buildid_cache;
79 static int perf_record__write(struct perf_record *rec, void *bf, size_t size)
81 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
82 pr_err("failed to write perf data, error: %m\n");
86 rec->bytes_written += size;
90 static int process_synthesized_event(struct perf_tool *tool,
91 union perf_event *event,
92 struct perf_sample *sample __maybe_unused,
93 struct machine *machine __maybe_unused)
95 struct perf_record *rec = container_of(tool, struct perf_record, tool);
96 return perf_record__write(rec, event, event->header.size);
99 static int perf_record__mmap_read(struct perf_record *rec,
100 struct perf_mmap *md)
102 unsigned int head = perf_mmap__read_head(md);
103 unsigned int old = md->prev;
104 unsigned char *data = md->base + page_size;
116 if ((old & md->mask) + size != (head & md->mask)) {
117 buf = &data[old & md->mask];
118 size = md->mask + 1 - (old & md->mask);
121 if (perf_record__write(rec, buf, size) < 0) {
127 buf = &data[old & md->mask];
131 if (perf_record__write(rec, buf, size) < 0) {
137 perf_mmap__write_tail(md, old);
143 static volatile int done = 0;
144 static volatile int signr = -1;
145 static volatile int child_finished = 0;
147 static void sig_handler(int sig)
156 static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg)
158 struct perf_record *rec = arg;
161 if (rec->evlist->workload.pid > 0) {
163 kill(rec->evlist->workload.pid, SIGTERM);
166 if (WIFSIGNALED(status))
167 psignal(WTERMSIG(status), rec->progname);
170 if (signr == -1 || signr == SIGUSR1)
173 signal(signr, SIG_DFL);
176 static int perf_record__open(struct perf_record *rec)
179 struct perf_evsel *pos;
180 struct perf_evlist *evlist = rec->evlist;
181 struct perf_session *session = rec->session;
182 struct perf_record_opts *opts = &rec->opts;
185 perf_evlist__config(evlist, opts);
187 list_for_each_entry(pos, &evlist->entries, node) {
189 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
190 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
192 ui__warning("%s\n", msg);
197 perf_evsel__open_strerror(pos, &opts->target,
198 errno, msg, sizeof(msg));
199 ui__error("%s\n", msg);
204 if (perf_evlist__apply_filters(evlist)) {
205 error("failed to set filter with %d (%s)\n", errno,
211 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
212 if (errno == EPERM) {
213 pr_err("Permission error mapping pages.\n"
214 "Consider increasing "
215 "/proc/sys/kernel/perf_event_mlock_kb,\n"
216 "or try again with a smaller value of -m/--mmap_pages.\n"
217 "(current value: %u)\n", opts->mmap_pages);
220 pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
226 session->evlist = evlist;
227 perf_session__set_id_hdr_size(session);
232 static int process_buildids(struct perf_record *rec)
234 struct perf_data_file *file = &rec->file;
235 struct perf_session *session = rec->session;
236 u64 start = session->header.data_offset;
238 u64 size = lseek(file->fd, 0, SEEK_CUR);
242 return __perf_session__process_events(session, start,
244 size, &build_id__mark_dso_hit_ops);
247 static void perf_record__exit(int status, void *arg)
249 struct perf_record *rec = arg;
250 struct perf_data_file *file = &rec->file;
255 if (!file->is_pipe) {
256 rec->session->header.data_size += rec->bytes_written;
258 if (!rec->no_buildid)
259 process_buildids(rec);
260 perf_session__write_header(rec->session, rec->evlist,
262 perf_session__delete(rec->session);
263 perf_evlist__delete(rec->evlist);
268 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
271 struct perf_tool *tool = data;
273 *As for guest kernel when processing subcommand record&report,
274 *we arrange module mmap prior to guest kernel mmap and trigger
275 *a preload dso because default guest module symbols are loaded
276 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
277 *method is used to avoid symbol missing when the first addr is
278 *in module instead of in guest kernel.
280 err = perf_event__synthesize_modules(tool, process_synthesized_event,
283 pr_err("Couldn't record guest kernel [%d]'s reference"
284 " relocation symbol.\n", machine->pid);
287 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
288 * have no _text sometimes.
290 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
293 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
296 pr_err("Couldn't record guest kernel [%d]'s reference"
297 " relocation symbol.\n", machine->pid);
300 static struct perf_event_header finished_round_event = {
301 .size = sizeof(struct perf_event_header),
302 .type = PERF_RECORD_FINISHED_ROUND,
305 static int perf_record__mmap_read_all(struct perf_record *rec)
310 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
311 if (rec->evlist->mmap[i].base) {
312 if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
319 if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
320 rc = perf_record__write(rec, &finished_round_event,
321 sizeof(finished_round_event));
327 static void perf_record__init_features(struct perf_record *rec)
329 struct perf_evlist *evsel_list = rec->evlist;
330 struct perf_session *session = rec->session;
333 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
334 perf_header__set_feat(&session->header, feat);
337 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
339 if (!have_tracepoints(&evsel_list->entries))
340 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
342 if (!rec->opts.branch_stack)
343 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
346 static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
349 unsigned long waking = 0;
350 const bool forks = argc > 0;
351 struct machine *machine;
352 struct perf_tool *tool = &rec->tool;
353 struct perf_record_opts *opts = &rec->opts;
354 struct perf_evlist *evsel_list = rec->evlist;
355 struct perf_data_file *file = &rec->file;
356 struct perf_session *session;
357 bool disabled = false;
359 rec->progname = argv[0];
361 on_exit(perf_record__sig_exit, rec);
362 signal(SIGCHLD, sig_handler);
363 signal(SIGINT, sig_handler);
364 signal(SIGUSR1, sig_handler);
365 signal(SIGTERM, sig_handler);
367 session = perf_session__new(file, false, NULL);
368 if (session == NULL) {
369 pr_err("Not enough memory for reading perf file header\n");
373 rec->session = session;
375 perf_record__init_features(rec);
378 err = perf_evlist__prepare_workload(evsel_list, &opts->target,
382 pr_err("Couldn't run the workload!\n");
383 goto out_delete_session;
387 if (perf_record__open(rec) != 0) {
389 goto out_delete_session;
392 if (!evsel_list->nr_groups)
393 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
396 * perf_session__delete(session) will be called at perf_record__exit()
398 on_exit(perf_record__exit, rec);
401 err = perf_header__write_pipe(file->fd);
403 goto out_delete_session;
405 err = perf_session__write_header(session, evsel_list,
408 goto out_delete_session;
412 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
413 pr_err("Couldn't generate buildids. "
414 "Use --no-buildid to profile anyway.\n");
416 goto out_delete_session;
419 machine = &session->machines.host;
422 err = perf_event__synthesize_attrs(tool, session,
423 process_synthesized_event);
425 pr_err("Couldn't synthesize attrs.\n");
426 goto out_delete_session;
429 if (have_tracepoints(&evsel_list->entries)) {
431 * FIXME err <= 0 here actually means that
432 * there were no tracepoints so its not really
433 * an error, just that we don't need to
434 * synthesize anything. We really have to
435 * return this more properly and also
436 * propagate errors that now are calling die()
438 err = perf_event__synthesize_tracing_data(tool, file->fd, evsel_list,
439 process_synthesized_event);
441 pr_err("Couldn't record tracing data.\n");
442 goto out_delete_session;
444 rec->bytes_written += err;
448 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
451 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
454 pr_err("Couldn't record kernel reference relocation symbol\n"
455 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
456 "Check /proc/kallsyms permission or run as root.\n");
458 err = perf_event__synthesize_modules(tool, process_synthesized_event,
461 pr_err("Couldn't record kernel module information.\n"
462 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
463 "Check /proc/modules permission or run as root.\n");
466 machines__process_guests(&session->machines,
467 perf_event__synthesize_guest_os, tool);
470 err = __machine__synthesize_threads(machine, tool, &opts->target, evsel_list->threads,
471 process_synthesized_event, opts->sample_address);
473 goto out_delete_session;
475 if (rec->realtime_prio) {
476 struct sched_param param;
478 param.sched_priority = rec->realtime_prio;
479 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
480 pr_err("Could not set realtime priority.\n");
482 goto out_delete_session;
487 * When perf is starting the traced process, all the events
488 * (apart from group members) have enable_on_exec=1 set,
489 * so don't spoil it by prematurely enabling them.
491 if (!target__none(&opts->target))
492 perf_evlist__enable(evsel_list);
498 perf_evlist__start_workload(evsel_list);
501 int hits = rec->samples;
503 if (perf_record__mmap_read_all(rec) < 0) {
505 goto out_delete_session;
508 if (hits == rec->samples) {
511 err = poll(evsel_list->pollfd, evsel_list->nr_fds, -1);
516 * When perf is starting the traced process, at the end events
517 * die with the process and we wait for that. Thus no need to
518 * disable events in this case.
520 if (done && !disabled && !target__none(&opts->target)) {
521 perf_evlist__disable(evsel_list);
526 if (quiet || signr == SIGUSR1)
529 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
532 * Approximate RIP event size: 24 bytes.
535 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
536 (double)rec->bytes_written / 1024.0 / 1024.0,
538 rec->bytes_written / 24);
543 perf_session__delete(session);
547 #define BRANCH_OPT(n, m) \
548 { .name = n, .mode = (m) }
550 #define BRANCH_END { .name = NULL }
557 static const struct branch_mode branch_modes[] = {
558 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
559 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
560 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
561 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
562 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
563 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
564 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
565 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
566 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
567 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
572 parse_branch_stack(const struct option *opt, const char *str, int unset)
575 (PERF_SAMPLE_BRANCH_USER |\
576 PERF_SAMPLE_BRANCH_KERNEL |\
577 PERF_SAMPLE_BRANCH_HV)
579 uint64_t *mode = (uint64_t *)opt->value;
580 const struct branch_mode *br;
581 char *s, *os = NULL, *p;
588 * cannot set it twice, -b + --branch-filter for instance
593 /* str may be NULL in case no arg is passed to -b */
595 /* because str is read-only */
596 s = os = strdup(str);
605 for (br = branch_modes; br->name; br++) {
606 if (!strcasecmp(s, br->name))
610 ui__warning("unknown branch filter %s,"
611 " check man page\n", s);
625 /* default to any branch */
626 if ((*mode & ~ONLY_PLM) == 0) {
627 *mode = PERF_SAMPLE_BRANCH_ANY;
634 #ifdef HAVE_LIBUNWIND_SUPPORT
635 static int get_stack_size(char *str, unsigned long *_size)
639 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
641 size = strtoul(str, &endptr, 0);
647 size = round_up(size, sizeof(u64));
648 if (!size || size > max_size)
656 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
660 #endif /* HAVE_LIBUNWIND_SUPPORT */
662 int record_parse_callchain(const char *arg, struct perf_record_opts *opts)
664 char *tok, *name, *saveptr = NULL;
668 /* We need buffer that we know we can write to. */
669 buf = malloc(strlen(arg) + 1);
675 tok = strtok_r((char *)buf, ",", &saveptr);
676 name = tok ? : (char *)buf;
679 /* Framepointer style */
680 if (!strncmp(name, "fp", sizeof("fp"))) {
681 if (!strtok_r(NULL, ",", &saveptr)) {
682 opts->call_graph = CALLCHAIN_FP;
685 pr_err("callchain: No more arguments "
686 "needed for -g fp\n");
689 #ifdef HAVE_LIBUNWIND_SUPPORT
691 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
692 const unsigned long default_stack_dump_size = 8192;
695 opts->call_graph = CALLCHAIN_DWARF;
696 opts->stack_dump_size = default_stack_dump_size;
698 tok = strtok_r(NULL, ",", &saveptr);
700 unsigned long size = 0;
702 ret = get_stack_size(tok, &size);
703 opts->stack_dump_size = size;
705 #endif /* HAVE_LIBUNWIND_SUPPORT */
707 pr_err("callchain: Unknown --call-graph option "
718 static void callchain_debug(struct perf_record_opts *opts)
720 pr_debug("callchain: type %d\n", opts->call_graph);
722 if (opts->call_graph == CALLCHAIN_DWARF)
723 pr_debug("callchain: stack dump size %d\n",
724 opts->stack_dump_size);
727 int record_parse_callchain_opt(const struct option *opt,
731 struct perf_record_opts *opts = opt->value;
734 /* --no-call-graph */
736 opts->call_graph = CALLCHAIN_NONE;
737 pr_debug("callchain: disabled\n");
741 ret = record_parse_callchain(arg, opts);
743 callchain_debug(opts);
748 int record_callchain_opt(const struct option *opt,
749 const char *arg __maybe_unused,
750 int unset __maybe_unused)
752 struct perf_record_opts *opts = opt->value;
754 if (opts->call_graph == CALLCHAIN_NONE)
755 opts->call_graph = CALLCHAIN_FP;
757 callchain_debug(opts);
761 static const char * const record_usage[] = {
762 "perf record [<options>] [<command>]",
763 "perf record [<options>] -- <command> [<options>]",
768 * XXX Ideally would be local to cmd_record() and passed to a perf_record__new
769 * because we need to have access to it in perf_record__exit, that is called
770 * after cmd_record() exits, but since record_options need to be accessible to
771 * builtin-script, leave it here.
773 * At least we don't ouch it in all the other functions here directly.
775 * Just say no to tons of global variables, sigh.
777 static struct perf_record record = {
779 .mmap_pages = UINT_MAX,
780 .user_freq = UINT_MAX,
781 .user_interval = ULLONG_MAX,
785 .default_per_cpu = true,
790 #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
792 #ifdef HAVE_LIBUNWIND_SUPPORT
793 const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
795 const char record_callchain_help[] = CALLCHAIN_HELP "fp";
799 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
800 * with it and switch to use the library functions in perf_evlist that came
801 * from builtin-record.c, i.e. use perf_record_opts,
802 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
805 const struct option record_options[] = {
806 OPT_CALLBACK('e', "event", &record.evlist, "event",
807 "event selector. use 'perf list' to list available events",
808 parse_events_option),
809 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
810 "event filter", parse_filter),
811 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
812 "record events on existing process id"),
813 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
814 "record events on existing thread id"),
815 OPT_INTEGER('r', "realtime", &record.realtime_prio,
816 "collect data with this RT SCHED_FIFO priority"),
817 OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
818 "collect data without buffering"),
819 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
820 "collect raw sample records from all opened counters"),
821 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
822 "system-wide collection from all CPUs"),
823 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
824 "list of cpus to monitor"),
825 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
826 OPT_STRING('o', "output", &record.file.path, "file",
828 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
829 &record.opts.no_inherit_set,
830 "child tasks do not inherit counters"),
831 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
832 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
833 "number of mmap data pages",
834 perf_evlist__parse_mmap_pages),
835 OPT_BOOLEAN(0, "group", &record.opts.group,
836 "put the counters into a counter group"),
837 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
838 NULL, "enables call-graph recording" ,
839 &record_callchain_opt),
840 OPT_CALLBACK(0, "call-graph", &record.opts,
841 "mode[,dump_size]", record_callchain_help,
842 &record_parse_callchain_opt),
843 OPT_INCR('v', "verbose", &verbose,
844 "be more verbose (show counter open errors, etc)"),
845 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
846 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
847 "per thread counts"),
848 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
850 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
851 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
852 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
854 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
855 "do not update the buildid cache"),
856 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
857 "do not collect buildids in perf.data"),
858 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
859 "monitor event in cgroup name only",
861 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
864 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
865 "branch any", "sample any taken branches",
868 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
869 "branch filter mask", "branch stack filter modes",
871 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
872 "sample by weight (on special events only)"),
873 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
874 "sample transaction flags (special events only)"),
875 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
876 "use per-thread mmaps"),
880 int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
883 struct perf_evlist *evsel_list;
884 struct perf_record *rec = &record;
887 evsel_list = perf_evlist__new();
888 if (evsel_list == NULL)
891 rec->evlist = evsel_list;
893 argc = parse_options(argc, argv, record_options, record_usage,
894 PARSE_OPT_STOP_AT_NON_OPTION);
895 if (!argc && target__none(&rec->opts.target))
896 usage_with_options(record_usage, record_options);
898 if (nr_cgroups && !rec->opts.target.system_wide) {
899 ui__error("cgroup monitoring only available in"
900 " system-wide mode\n");
901 usage_with_options(record_usage, record_options);
906 if (symbol_conf.kptr_restrict)
908 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
909 "check /proc/sys/kernel/kptr_restrict.\n\n"
910 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
911 "file is not found in the buildid cache or in the vmlinux path.\n\n"
912 "Samples in kernel modules won't be resolved at all.\n\n"
913 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
914 "even with a suitable vmlinux or kallsyms file.\n\n");
916 if (rec->no_buildid_cache || rec->no_buildid)
917 disable_buildid_cache();
919 if (evsel_list->nr_entries == 0 &&
920 perf_evlist__add_default(evsel_list) < 0) {
921 pr_err("Not enough memory for event selector list\n");
922 goto out_symbol_exit;
925 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
926 rec->opts.no_inherit = true;
928 err = target__validate(&rec->opts.target);
930 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
931 ui__warning("%s", errbuf);
934 err = target__parse_uid(&rec->opts.target);
936 int saved_errno = errno;
938 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
939 ui__error("%s", errbuf);
942 goto out_symbol_exit;
946 if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
947 usage_with_options(record_usage, record_options);
949 if (perf_record_opts__config(&rec->opts)) {
954 err = __cmd_record(&record, argc, argv);
956 perf_evlist__munmap(evsel_list);
957 perf_evlist__close(evsel_list);
959 perf_evlist__delete_maps(evsel_list);