1 #include <traceevent/event-parse.h>
3 #include "util/color.h"
4 #include "util/debug.h"
5 #include "util/evlist.h"
6 #include "util/machine.h"
7 #include "util/session.h"
8 #include "util/thread.h"
9 #include "util/parse-options.h"
10 #include "util/strlist.h"
11 #include "util/intlist.h"
12 #include "util/thread_map.h"
18 static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
19 unsigned long arg, u8 *arg_mask __maybe_unused)
21 return scnprintf(bf, size, "%#lx", arg);
24 #define SCA_HEX syscall_arg__scnprintf_hex
26 static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
27 unsigned long arg, u8 *arg_mask __maybe_unused)
29 int printed = 0, prot = arg;
31 if (prot == PROT_NONE)
32 return scnprintf(bf, size, "NONE");
33 #define P_MMAP_PROT(n) \
34 if (prot & PROT_##n) { \
35 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
45 P_MMAP_PROT(GROWSDOWN);
50 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
55 #define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
57 static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
58 unsigned long arg, u8 *arg_mask __maybe_unused)
60 int printed = 0, flags = arg;
62 #define P_MMAP_FLAG(n) \
63 if (flags & MAP_##n) { \
64 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
71 P_MMAP_FLAG(ANONYMOUS);
72 P_MMAP_FLAG(DENYWRITE);
73 P_MMAP_FLAG(EXECUTABLE);
76 P_MMAP_FLAG(GROWSDOWN);
81 P_MMAP_FLAG(NONBLOCK);
82 P_MMAP_FLAG(NORESERVE);
83 P_MMAP_FLAG(POPULATE);
85 #ifdef MAP_UNINITIALIZED
86 P_MMAP_FLAG(UNINITIALIZED);
91 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
96 #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
98 static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
99 unsigned long arg, u8 *arg_mask __maybe_unused)
104 #define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
107 P_MADV_BHV(SEQUENTIAL);
108 P_MADV_BHV(WILLNEED);
109 P_MADV_BHV(DONTNEED);
111 P_MADV_BHV(DONTFORK);
113 P_MADV_BHV(HWPOISON);
114 #ifdef MADV_SOFT_OFFLINE
115 P_MADV_BHV(SOFT_OFFLINE);
117 P_MADV_BHV(MERGEABLE);
118 P_MADV_BHV(UNMERGEABLE);
120 P_MADV_BHV(HUGEPAGE);
122 #ifdef MADV_NOHUGEPAGE
123 P_MADV_BHV(NOHUGEPAGE);
126 P_MADV_BHV(DONTDUMP);
135 return scnprintf(bf, size, "%#x", behavior);
138 #define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
140 static struct syscall_fmt {
143 size_t (*arg_scnprintf[6])(char *bf, size_t size, unsigned long arg, u8 *arg_mask);
148 { .name = "access", .errmsg = true, },
149 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
150 { .name = "brk", .hexret = true,
151 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
152 { .name = "mmap", .hexret = true, },
153 { .name = "connect", .errmsg = true, },
154 { .name = "fstat", .errmsg = true, .alias = "newfstat", },
155 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
156 { .name = "futex", .errmsg = true, },
157 { .name = "ioctl", .errmsg = true,
158 .arg_scnprintf = { [2] = SCA_HEX, /* arg */ }, },
159 { .name = "lstat", .errmsg = true, .alias = "newlstat", },
160 { .name = "madvise", .errmsg = true,
161 .arg_scnprintf = { [0] = SCA_HEX, /* start */
162 [2] = SCA_MADV_BHV, /* behavior */ }, },
163 { .name = "mmap", .hexret = true,
164 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
165 [2] = SCA_MMAP_PROT, /* prot */
166 [3] = SCA_MMAP_FLAGS, /* flags */ }, },
167 { .name = "mprotect", .errmsg = true,
168 .arg_scnprintf = { [0] = SCA_HEX, /* start */
169 [2] = SCA_MMAP_PROT, /* prot */ }, },
170 { .name = "mremap", .hexret = true,
171 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
172 [4] = SCA_HEX, /* new_addr */ }, },
173 { .name = "munmap", .errmsg = true,
174 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
175 { .name = "open", .errmsg = true, },
176 { .name = "poll", .errmsg = true, .timeout = true, },
177 { .name = "ppoll", .errmsg = true, .timeout = true, },
178 { .name = "pread", .errmsg = true, .alias = "pread64", },
179 { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
180 { .name = "read", .errmsg = true, },
181 { .name = "recvfrom", .errmsg = true, },
182 { .name = "select", .errmsg = true, .timeout = true, },
183 { .name = "socket", .errmsg = true, },
184 { .name = "stat", .errmsg = true, .alias = "newstat", },
185 { .name = "uname", .errmsg = true, .alias = "newuname", },
188 static int syscall_fmt__cmp(const void *name, const void *fmtp)
190 const struct syscall_fmt *fmt = fmtp;
191 return strcmp(name, fmt->name);
194 static struct syscall_fmt *syscall_fmt__find(const char *name)
196 const int nmemb = ARRAY_SIZE(syscall_fmts);
197 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
201 struct event_format *tp_format;
204 struct syscall_fmt *fmt;
205 size_t (**arg_scnprintf)(char *bf, size_t size,
206 unsigned long arg, u8 *args_mask);
209 static size_t fprintf_duration(unsigned long t, FILE *fp)
211 double duration = (double)t / NSEC_PER_MSEC;
212 size_t printed = fprintf(fp, "(");
215 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
216 else if (duration >= 0.01)
217 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
219 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
220 return printed + fprintf(fp, "): ");
223 struct thread_trace {
227 unsigned long nr_events;
232 static struct thread_trace *thread_trace__new(void)
234 return zalloc(sizeof(struct thread_trace));
237 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
239 struct thread_trace *ttrace;
244 if (thread->priv == NULL)
245 thread->priv = thread_trace__new();
247 if (thread->priv == NULL)
250 ttrace = thread->priv;
255 color_fprintf(fp, PERF_COLOR_RED,
256 "WARNING: not enough memory, dropping samples!\n");
261 struct perf_tool tool;
265 struct syscall *table;
267 struct perf_record_opts opts;
271 unsigned long nr_events;
272 struct strlist *ev_qualifier;
273 bool not_ev_qualifier;
274 struct intlist *tid_list;
275 struct intlist *pid_list;
277 bool multiple_threads;
278 double duration_filter;
282 static bool trace__filter_duration(struct trace *trace, double t)
284 return t < (trace->duration_filter * NSEC_PER_MSEC);
287 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
289 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
291 return fprintf(fp, "%10.3f ", ts);
294 static bool done = false;
296 static void sig_handler(int sig __maybe_unused)
301 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
302 u64 duration, u64 tstamp, FILE *fp)
304 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
305 printed += fprintf_duration(duration, fp);
307 if (trace->multiple_threads)
308 printed += fprintf(fp, "%d ", thread->tid);
313 static int trace__process_event(struct trace *trace, struct machine *machine,
314 union perf_event *event)
318 switch (event->header.type) {
319 case PERF_RECORD_LOST:
320 color_fprintf(trace->output, PERF_COLOR_RED,
321 "LOST %" PRIu64 " events!\n", event->lost.lost);
322 ret = machine__process_lost_event(machine, event);
324 ret = machine__process_event(machine, event);
331 static int trace__tool_process(struct perf_tool *tool,
332 union perf_event *event,
333 struct perf_sample *sample __maybe_unused,
334 struct machine *machine)
336 struct trace *trace = container_of(tool, struct trace, tool);
337 return trace__process_event(trace, machine, event);
340 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
342 int err = symbol__init();
347 machine__init(&trace->host, "", HOST_KERNEL_ID);
348 machine__create_kernel_maps(&trace->host);
350 if (perf_target__has_task(&trace->opts.target)) {
351 err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
355 err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
365 static int syscall__set_arg_fmts(struct syscall *sc)
367 struct format_field *field;
370 sc->arg_scnprintf = calloc(sc->tp_format->format.nr_fields - 1, sizeof(void *));
371 if (sc->arg_scnprintf == NULL)
374 for (field = sc->tp_format->format.fields->next; field; field = field->next) {
375 if (sc->fmt && sc->fmt->arg_scnprintf[idx])
376 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
377 else if (field->flags & FIELD_IS_POINTER)
378 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
385 static int trace__read_syscall_info(struct trace *trace, int id)
389 const char *name = audit_syscall_to_name(id, trace->audit_machine);
394 if (id > trace->syscalls.max) {
395 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
397 if (nsyscalls == NULL)
400 if (trace->syscalls.max != -1) {
401 memset(nsyscalls + trace->syscalls.max + 1, 0,
402 (id - trace->syscalls.max) * sizeof(*sc));
404 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
407 trace->syscalls.table = nsyscalls;
408 trace->syscalls.max = id;
411 sc = trace->syscalls.table + id;
414 if (trace->ev_qualifier) {
415 bool in = strlist__find(trace->ev_qualifier, name) != NULL;
417 if (!(in ^ trace->not_ev_qualifier)) {
420 * No need to do read tracepoint information since this will be
427 sc->fmt = syscall_fmt__find(sc->name);
429 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
430 sc->tp_format = event_format__new("syscalls", tp_name);
432 if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
433 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
434 sc->tp_format = event_format__new("syscalls", tp_name);
437 if (sc->tp_format == NULL)
440 return syscall__set_arg_fmts(sc);
443 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
449 if (sc->tp_format != NULL) {
450 struct format_field *field;
451 u8 mask = 0, bit = 1;
453 for (field = sc->tp_format->format.fields->next; field;
454 field = field->next, ++i, bit <<= 1) {
458 printed += scnprintf(bf + printed, size - printed,
459 "%s%s: ", printed ? ", " : "", field->name);
461 if (sc->arg_scnprintf && sc->arg_scnprintf[i]) {
462 printed += sc->arg_scnprintf[i](bf + printed, size - printed,
465 printed += scnprintf(bf + printed, size - printed,
471 printed += scnprintf(bf + printed, size - printed,
473 printed ? ", " : "", i, args[i]);
481 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
482 struct perf_sample *sample);
484 static struct syscall *trace__syscall_info(struct trace *trace,
485 struct perf_evsel *evsel,
486 struct perf_sample *sample)
488 int id = perf_evsel__intval(evsel, sample, "id");
493 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
494 * before that, leaving at a higher verbosity level till that is
495 * explained. Reproduced with plain ftrace with:
497 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
498 * grep "NR -1 " /t/trace_pipe
500 * After generating some load on the machine.
504 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
505 id, perf_evsel__name(evsel), ++n);
510 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
511 trace__read_syscall_info(trace, id))
514 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
517 return &trace->syscalls.table[id];
521 fprintf(trace->output, "Problems reading syscall %d", id);
522 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
523 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
524 fputs(" information\n", trace->output);
529 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
530 struct perf_sample *sample)
535 struct thread *thread;
536 struct syscall *sc = trace__syscall_info(trace, evsel, sample);
537 struct thread_trace *ttrace;
545 thread = machine__findnew_thread(&trace->host, sample->pid,
547 ttrace = thread__trace(thread, trace->output);
551 args = perf_evsel__rawptr(evsel, sample, "args");
553 fprintf(trace->output, "Problems reading syscall arguments\n");
557 ttrace = thread->priv;
559 if (ttrace->entry_str == NULL) {
560 ttrace->entry_str = malloc(1024);
561 if (!ttrace->entry_str)
565 ttrace->entry_time = sample->time;
566 msg = ttrace->entry_str;
567 printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
569 printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
571 if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
572 if (!trace->duration_filter) {
573 trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
574 fprintf(trace->output, "%-70s\n", ttrace->entry_str);
577 ttrace->entry_pending = true;
582 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
583 struct perf_sample *sample)
587 struct thread *thread;
588 struct syscall *sc = trace__syscall_info(trace, evsel, sample);
589 struct thread_trace *ttrace;
597 thread = machine__findnew_thread(&trace->host, sample->pid,
599 ttrace = thread__trace(thread, trace->output);
603 ret = perf_evsel__intval(evsel, sample, "ret");
605 ttrace = thread->priv;
607 ttrace->exit_time = sample->time;
609 if (ttrace->entry_time) {
610 duration = sample->time - ttrace->entry_time;
611 if (trace__filter_duration(trace, duration))
613 } else if (trace->duration_filter)
616 trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
618 if (ttrace->entry_pending) {
619 fprintf(trace->output, "%-70s", ttrace->entry_str);
621 fprintf(trace->output, " ... [");
622 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
623 fprintf(trace->output, "]: %s()", sc->name);
626 if (sc->fmt == NULL) {
628 fprintf(trace->output, ") = %d", ret);
629 } else if (ret < 0 && sc->fmt->errmsg) {
631 const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
632 *e = audit_errno_to_name(-ret);
634 fprintf(trace->output, ") = -1 %s %s", e, emsg);
635 } else if (ret == 0 && sc->fmt->timeout)
636 fprintf(trace->output, ") = 0 Timeout");
637 else if (sc->fmt->hexret)
638 fprintf(trace->output, ") = %#x", ret);
642 fputc('\n', trace->output);
644 ttrace->entry_pending = false;
649 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
650 struct perf_sample *sample)
652 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
653 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
654 struct thread *thread = machine__findnew_thread(&trace->host,
657 struct thread_trace *ttrace = thread__trace(thread, trace->output);
662 ttrace->runtime_ms += runtime_ms;
663 trace->runtime_ms += runtime_ms;
667 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
669 perf_evsel__strval(evsel, sample, "comm"),
670 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
672 perf_evsel__intval(evsel, sample, "vruntime"));
676 static bool skip_sample(struct trace *trace, struct perf_sample *sample)
678 if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
679 (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
682 if (trace->pid_list || trace->tid_list)
688 static int trace__process_sample(struct perf_tool *tool,
689 union perf_event *event __maybe_unused,
690 struct perf_sample *sample,
691 struct perf_evsel *evsel,
692 struct machine *machine __maybe_unused)
694 struct trace *trace = container_of(tool, struct trace, tool);
697 tracepoint_handler handler = evsel->handler.func;
699 if (skip_sample(trace, sample))
702 if (trace->base_time == 0)
703 trace->base_time = sample->time;
706 handler(trace, evsel, sample);
712 perf_session__has_tp(struct perf_session *session, const char *name)
714 struct perf_evsel *evsel;
716 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, name);
718 return evsel != NULL;
721 static int parse_target_str(struct trace *trace)
723 if (trace->opts.target.pid) {
724 trace->pid_list = intlist__new(trace->opts.target.pid);
725 if (trace->pid_list == NULL) {
726 pr_err("Error parsing process id string\n");
731 if (trace->opts.target.tid) {
732 trace->tid_list = intlist__new(trace->opts.target.tid);
733 if (trace->tid_list == NULL) {
734 pr_err("Error parsing thread id string\n");
742 static int trace__run(struct trace *trace, int argc, const char **argv)
744 struct perf_evlist *evlist = perf_evlist__new();
745 struct perf_evsel *evsel;
747 unsigned long before;
748 const bool forks = argc > 0;
750 if (evlist == NULL) {
751 fprintf(trace->output, "Not enough memory to run!\n");
755 if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
756 perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
757 fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n");
758 goto out_delete_evlist;
762 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
763 trace__sched_stat_runtime)) {
764 fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n");
765 goto out_delete_evlist;
768 err = perf_evlist__create_maps(evlist, &trace->opts.target);
770 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
771 goto out_delete_evlist;
774 err = trace__symbols_init(trace, evlist);
776 fprintf(trace->output, "Problems initializing symbol libraries!\n");
777 goto out_delete_maps;
780 perf_evlist__config(evlist, &trace->opts);
782 signal(SIGCHLD, sig_handler);
783 signal(SIGINT, sig_handler);
786 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
789 fprintf(trace->output, "Couldn't run the workload!\n");
790 goto out_delete_maps;
794 err = perf_evlist__open(evlist);
796 fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno));
797 goto out_delete_maps;
800 err = perf_evlist__mmap(evlist, UINT_MAX, false);
802 fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
803 goto out_close_evlist;
806 perf_evlist__enable(evlist);
809 perf_evlist__start_workload(evlist);
811 trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
813 before = trace->nr_events;
815 for (i = 0; i < evlist->nr_mmaps; i++) {
816 union perf_event *event;
818 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
819 const u32 type = event->header.type;
820 tracepoint_handler handler;
821 struct perf_sample sample;
825 err = perf_evlist__parse_sample(evlist, event, &sample);
827 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
831 if (trace->base_time == 0)
832 trace->base_time = sample.time;
834 if (type != PERF_RECORD_SAMPLE) {
835 trace__process_event(trace, &trace->host, event);
839 evsel = perf_evlist__id2evsel(evlist, sample.id);
841 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
845 if (sample.raw_data == NULL) {
846 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
847 perf_evsel__name(evsel), sample.tid,
848 sample.cpu, sample.raw_size);
852 handler = evsel->handler.func;
853 handler(trace, evsel, &sample);
857 if (trace->nr_events == before) {
859 goto out_unmap_evlist;
861 poll(evlist->pollfd, evlist->nr_fds, -1);
865 perf_evlist__disable(evlist);
870 perf_evlist__munmap(evlist);
872 perf_evlist__close(evlist);
874 perf_evlist__delete_maps(evlist);
876 perf_evlist__delete(evlist);
881 static int trace__replay(struct trace *trace)
883 const struct perf_evsel_str_handler handlers[] = {
884 { "raw_syscalls:sys_enter", trace__sys_enter, },
885 { "raw_syscalls:sys_exit", trace__sys_exit, },
888 struct perf_session *session;
891 trace->tool.sample = trace__process_sample;
892 trace->tool.mmap = perf_event__process_mmap;
893 trace->tool.comm = perf_event__process_comm;
894 trace->tool.exit = perf_event__process_exit;
895 trace->tool.fork = perf_event__process_fork;
896 trace->tool.attr = perf_event__process_attr;
897 trace->tool.tracing_data = perf_event__process_tracing_data;
898 trace->tool.build_id = perf_event__process_build_id;
900 trace->tool.ordered_samples = true;
901 trace->tool.ordering_requires_timestamps = true;
903 /* add tid to output */
904 trace->multiple_threads = true;
906 if (symbol__init() < 0)
909 session = perf_session__new(input_name, O_RDONLY, 0, false,
914 err = perf_session__set_tracepoints_handlers(session, handlers);
918 if (!perf_session__has_tp(session, "raw_syscalls:sys_enter")) {
919 pr_err("Data file does not have raw_syscalls:sys_enter events\n");
923 if (!perf_session__has_tp(session, "raw_syscalls:sys_exit")) {
924 pr_err("Data file does not have raw_syscalls:sys_exit events\n");
928 err = parse_target_str(trace);
934 err = perf_session__process_events(session, &trace->tool);
936 pr_err("Failed to process events, error %d", err);
939 perf_session__delete(session);
944 static size_t trace__fprintf_threads_header(FILE *fp)
948 printed = fprintf(fp, "\n _____________________________________________________________________\n");
949 printed += fprintf(fp," __) Summary of events (__\n\n");
950 printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
951 printed += fprintf(fp," _____________________________________________________________________\n\n");
956 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
958 size_t printed = trace__fprintf_threads_header(fp);
961 for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
962 struct thread *thread = rb_entry(nd, struct thread, rb_node);
963 struct thread_trace *ttrace = thread->priv;
970 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
972 color = PERF_COLOR_NORMAL;
974 color = PERF_COLOR_RED;
975 else if (ratio > 25.0)
976 color = PERF_COLOR_GREEN;
977 else if (ratio > 5.0)
978 color = PERF_COLOR_YELLOW;
980 printed += color_fprintf(fp, color, "%20s", thread->comm);
981 printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
982 printed += color_fprintf(fp, color, "%5.1f%%", ratio);
983 printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
989 static int trace__set_duration(const struct option *opt, const char *str,
990 int unset __maybe_unused)
992 struct trace *trace = opt->value;
994 trace->duration_filter = atof(str);
998 static int trace__open_output(struct trace *trace, const char *filename)
1002 if (!stat(filename, &st) && st.st_size) {
1003 char oldname[PATH_MAX];
1005 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
1007 rename(filename, oldname);
1010 trace->output = fopen(filename, "w");
1012 return trace->output == NULL ? -errno : 0;
1015 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
1017 const char * const trace_usage[] = {
1018 "perf trace [<options>] [<command>]",
1019 "perf trace [<options>] -- <command> [<options>]",
1022 struct trace trace = {
1023 .audit_machine = audit_detect_machine(),
1032 .user_freq = UINT_MAX,
1033 .user_interval = ULLONG_MAX,
1039 const char *output_name = NULL;
1040 const char *ev_qualifier_str = NULL;
1041 const struct option trace_options[] = {
1042 OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
1043 "list of events to trace"),
1044 OPT_STRING('o', "output", &output_name, "file", "output file name"),
1045 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
1046 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
1047 "trace events on existing process id"),
1048 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
1049 "trace events on existing thread id"),
1050 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
1051 "system-wide collection from all CPUs"),
1052 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
1053 "list of cpus to monitor"),
1054 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
1055 "child tasks do not inherit counters"),
1056 OPT_UINTEGER('m', "mmap-pages", &trace.opts.mmap_pages,
1057 "number of mmap data pages"),
1058 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
1060 OPT_CALLBACK(0, "duration", &trace, "float",
1061 "show only events with duration > N.M ms",
1062 trace__set_duration),
1063 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
1064 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
1070 argc = parse_options(argc, argv, trace_options, trace_usage, 0);
1072 if (output_name != NULL) {
1073 err = trace__open_output(&trace, output_name);
1075 perror("failed to create output file");
1080 if (ev_qualifier_str != NULL) {
1081 const char *s = ev_qualifier_str;
1083 trace.not_ev_qualifier = *s == '!';
1084 if (trace.not_ev_qualifier)
1086 trace.ev_qualifier = strlist__new(true, s);
1087 if (trace.ev_qualifier == NULL) {
1088 fputs("Not enough memory to parse event qualifier",
1095 err = perf_target__validate(&trace.opts.target);
1097 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
1098 fprintf(trace.output, "%s", bf);
1102 err = perf_target__parse_uid(&trace.opts.target);
1104 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
1105 fprintf(trace.output, "%s", bf);
1109 if (!argc && perf_target__none(&trace.opts.target))
1110 trace.opts.target.system_wide = true;
1113 err = trace__replay(&trace);
1115 err = trace__run(&trace, argc, argv);
1117 if (trace.sched && !err)
1118 trace__fprintf_thread_summary(&trace, trace.output);
1121 if (output_name != NULL)
1122 fclose(trace.output);