4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
16 * Released under the GPL v2. (and only v2, not any later version)
19 #include <traceevent/event-parse.h>
20 #include <api/fs/tracing_path.h>
22 #include "util/color.h"
23 #include "util/debug.h"
24 #include "util/evlist.h"
25 #include <subcmd/exec-cmd.h>
26 #include "util/machine.h"
27 #include "util/session.h"
28 #include "util/thread.h"
29 #include <subcmd/parse-options.h>
30 #include "util/strlist.h"
31 #include "util/intlist.h"
32 #include "util/thread_map.h"
33 #include "util/stat.h"
34 #include "trace-event.h"
35 #include "util/parse-events.h"
36 #include "util/bpf-loader.h"
37 #include "callchain.h"
38 #include "syscalltbl.h"
39 #include "rb_resort.h"
41 #include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
44 #include <linux/err.h>
45 #include <linux/filter.h>
46 #include <linux/audit.h>
47 #include <linux/random.h>
48 #include <linux/stringify.h>
49 #include <linux/time64.h>
52 # define O_CLOEXEC 02000000
56 struct perf_tool tool;
57 struct syscalltbl *sctbl;
60 struct syscall *table;
62 struct perf_evsel *sys_enter,
66 struct record_opts opts;
67 struct perf_evlist *evlist;
69 struct thread *current;
72 unsigned long nr_events;
73 struct strlist *ev_qualifier;
82 double duration_filter;
88 unsigned int max_stack;
89 unsigned int min_stack;
90 bool not_ev_qualifier;
94 bool multiple_threads;
100 bool kernel_syscallchains;
110 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
111 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
115 #define TP_UINT_FIELD(bits) \
116 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
119 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
128 #define TP_UINT_FIELD__SWAPPED(bits) \
129 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
132 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
133 return bswap_##bits(value);\
136 TP_UINT_FIELD__SWAPPED(16);
137 TP_UINT_FIELD__SWAPPED(32);
138 TP_UINT_FIELD__SWAPPED(64);
140 static int tp_field__init_uint(struct tp_field *field,
141 struct format_field *format_field,
144 field->offset = format_field->offset;
146 switch (format_field->size) {
148 field->integer = tp_field__u8;
151 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
154 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
157 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
166 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
168 return sample->raw_data + field->offset;
171 static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
173 field->offset = format_field->offset;
174 field->pointer = tp_field__ptr;
181 struct tp_field args, ret;
185 static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
186 struct tp_field *field,
189 struct format_field *format_field = perf_evsel__field(evsel, name);
191 if (format_field == NULL)
194 return tp_field__init_uint(field, format_field, evsel->needs_swap);
197 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
198 ({ struct syscall_tp *sc = evsel->priv;\
199 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
201 static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
202 struct tp_field *field,
205 struct format_field *format_field = perf_evsel__field(evsel, name);
207 if (format_field == NULL)
210 return tp_field__init_ptr(field, format_field);
213 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
214 ({ struct syscall_tp *sc = evsel->priv;\
215 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
217 static void perf_evsel__delete_priv(struct perf_evsel *evsel)
220 perf_evsel__delete(evsel);
223 static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
225 evsel->priv = malloc(sizeof(struct syscall_tp));
226 if (evsel->priv != NULL) {
227 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
230 evsel->handler = handler;
241 static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
243 struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
245 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
247 evsel = perf_evsel__newtp("syscalls", direction);
252 if (perf_evsel__init_syscall_tp(evsel, handler))
258 perf_evsel__delete_priv(evsel);
262 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
263 ({ struct syscall_tp *fields = evsel->priv; \
264 fields->name.integer(&fields->name, sample); })
266 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
267 ({ struct syscall_tp *fields = evsel->priv; \
268 fields->name.pointer(&fields->name, sample); })
272 struct thread *thread;
282 const char **entries;
285 #define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
286 .nr_entries = ARRAY_SIZE(array), \
290 #define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
292 .nr_entries = ARRAY_SIZE(array), \
296 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
298 struct syscall_arg *arg)
300 struct strarray *sa = arg->parm;
301 int idx = arg->val - sa->offset;
303 if (idx < 0 || idx >= sa->nr_entries)
304 return scnprintf(bf, size, intfmt, arg->val);
306 return scnprintf(bf, size, "%s", sa->entries[idx]);
309 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
310 struct syscall_arg *arg)
312 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
315 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
317 #if defined(__i386__) || defined(__x86_64__)
319 * FIXME: Make this available to all arches as soon as the ioctl beautifier
320 * gets rewritten to support all arches.
322 static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
323 struct syscall_arg *arg)
325 return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg);
328 #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
329 #endif /* defined(__i386__) || defined(__x86_64__) */
331 static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
332 struct syscall_arg *arg);
334 #define SCA_FD syscall_arg__scnprintf_fd
337 #define AT_FDCWD -100
340 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
341 struct syscall_arg *arg)
346 return scnprintf(bf, size, "CWD");
348 return syscall_arg__scnprintf_fd(bf, size, arg);
351 #define SCA_FDAT syscall_arg__scnprintf_fd_at
353 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
354 struct syscall_arg *arg);
356 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
358 static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
359 struct syscall_arg *arg)
361 return scnprintf(bf, size, "%#lx", arg->val);
364 #define SCA_HEX syscall_arg__scnprintf_hex
366 static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
367 struct syscall_arg *arg)
369 return scnprintf(bf, size, "%d", arg->val);
372 #define SCA_INT syscall_arg__scnprintf_int
374 static const char *bpf_cmd[] = {
375 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
376 "MAP_GET_NEXT_KEY", "PROG_LOAD",
378 static DEFINE_STRARRAY(bpf_cmd);
380 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
381 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
383 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
384 static DEFINE_STRARRAY(itimers);
386 static const char *keyctl_options[] = {
387 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
388 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
389 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
390 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
391 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
393 static DEFINE_STRARRAY(keyctl_options);
395 static const char *whences[] = { "SET", "CUR", "END",
403 static DEFINE_STRARRAY(whences);
405 static const char *fcntl_cmds[] = {
406 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
407 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
408 "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
411 static DEFINE_STRARRAY(fcntl_cmds);
413 static const char *rlimit_resources[] = {
414 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
415 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
418 static DEFINE_STRARRAY(rlimit_resources);
420 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
421 static DEFINE_STRARRAY(sighow);
423 static const char *clockid[] = {
424 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
425 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
426 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
428 static DEFINE_STRARRAY(clockid);
430 static const char *socket_families[] = {
431 "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
432 "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
433 "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
434 "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
435 "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
436 "ALG", "NFC", "VSOCK",
438 static DEFINE_STRARRAY(socket_families);
440 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
441 struct syscall_arg *arg)
446 if (mode == F_OK) /* 0 */
447 return scnprintf(bf, size, "F");
449 if (mode & n##_OK) { \
450 printed += scnprintf(bf + printed, size - printed, "%s", #n); \
460 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
465 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
467 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
468 struct syscall_arg *arg);
470 #define SCA_FILENAME syscall_arg__scnprintf_filename
472 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
473 struct syscall_arg *arg)
475 int printed = 0, flags = arg->val;
478 if (flags & O_##n) { \
479 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
488 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
493 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
495 #if defined(__i386__) || defined(__x86_64__)
497 * FIXME: Make this available to all arches.
499 #define TCGETS 0x5401
501 static const char *tioctls[] = {
502 "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
503 "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
504 "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
505 "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
506 "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
507 "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
508 "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
509 "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
510 "TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
511 "TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
512 "TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
513 [0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
514 "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
515 "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
516 "TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
519 static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
520 #endif /* defined(__i386__) || defined(__x86_64__) */
522 #ifndef GRND_NONBLOCK
523 #define GRND_NONBLOCK 0x0001
526 #define GRND_RANDOM 0x0002
529 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
530 struct syscall_arg *arg)
532 int printed = 0, flags = arg->val;
535 if (flags & GRND_##n) { \
536 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
537 flags &= ~GRND_##n; \
545 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
550 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
552 #define STRARRAY(arg, name, array) \
553 .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
554 .arg_parm = { [arg] = &strarray__##array, }
556 #include "trace/beauty/eventfd.c"
557 #include "trace/beauty/flock.c"
558 #include "trace/beauty/futex_op.c"
559 #include "trace/beauty/mmap.c"
560 #include "trace/beauty/mode_t.c"
561 #include "trace/beauty/msg_flags.c"
562 #include "trace/beauty/open_flags.c"
563 #include "trace/beauty/perf_event_open.c"
564 #include "trace/beauty/pid.c"
565 #include "trace/beauty/sched_policy.c"
566 #include "trace/beauty/seccomp.c"
567 #include "trace/beauty/signum.c"
568 #include "trace/beauty/socket_type.c"
569 #include "trace/beauty/waitid_options.c"
571 static struct syscall_fmt {
574 size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
581 { .name = "access", .errmsg = true,
582 .arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, },
583 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
584 { .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
585 { .name = "brk", .hexret = true,
586 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
587 { .name = "chdir", .errmsg = true, },
588 { .name = "chmod", .errmsg = true, },
589 { .name = "chroot", .errmsg = true, },
590 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
591 { .name = "clone", .errpid = true, },
592 { .name = "close", .errmsg = true,
593 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
594 { .name = "connect", .errmsg = true, },
595 { .name = "creat", .errmsg = true, },
596 { .name = "dup", .errmsg = true, },
597 { .name = "dup2", .errmsg = true, },
598 { .name = "dup3", .errmsg = true, },
599 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
600 { .name = "eventfd2", .errmsg = true,
601 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
602 { .name = "faccessat", .errmsg = true, },
603 { .name = "fadvise64", .errmsg = true, },
604 { .name = "fallocate", .errmsg = true, },
605 { .name = "fchdir", .errmsg = true, },
606 { .name = "fchmod", .errmsg = true, },
607 { .name = "fchmodat", .errmsg = true,
608 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
609 { .name = "fchown", .errmsg = true, },
610 { .name = "fchownat", .errmsg = true,
611 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
612 { .name = "fcntl", .errmsg = true,
613 .arg_scnprintf = { [1] = SCA_STRARRAY, /* cmd */ },
614 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
615 { .name = "fdatasync", .errmsg = true, },
616 { .name = "flock", .errmsg = true,
617 .arg_scnprintf = { [1] = SCA_FLOCK, /* cmd */ }, },
618 { .name = "fsetxattr", .errmsg = true, },
619 { .name = "fstat", .errmsg = true, .alias = "newfstat", },
620 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
621 { .name = "fstatfs", .errmsg = true, },
622 { .name = "fsync", .errmsg = true, },
623 { .name = "ftruncate", .errmsg = true, },
624 { .name = "futex", .errmsg = true,
625 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
626 { .name = "futimesat", .errmsg = true,
627 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
628 { .name = "getdents", .errmsg = true, },
629 { .name = "getdents64", .errmsg = true, },
630 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
631 { .name = "getpid", .errpid = true, },
632 { .name = "getpgid", .errpid = true, },
633 { .name = "getppid", .errpid = true, },
634 { .name = "getrandom", .errmsg = true,
635 .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
636 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
637 { .name = "getxattr", .errmsg = true, },
638 { .name = "inotify_add_watch", .errmsg = true, },
639 { .name = "ioctl", .errmsg = true,
641 #if defined(__i386__) || defined(__x86_64__)
643 * FIXME: Make this available to all arches.
645 [1] = SCA_STRHEXARRAY, /* cmd */
646 [2] = SCA_HEX, /* arg */ },
647 .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
649 [2] = SCA_HEX, /* arg */ }, },
651 { .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), },
652 { .name = "kill", .errmsg = true,
653 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
654 { .name = "lchown", .errmsg = true, },
655 { .name = "lgetxattr", .errmsg = true, },
656 { .name = "linkat", .errmsg = true,
657 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
658 { .name = "listxattr", .errmsg = true, },
659 { .name = "llistxattr", .errmsg = true, },
660 { .name = "lremovexattr", .errmsg = true, },
661 { .name = "lseek", .errmsg = true,
662 .arg_scnprintf = { [2] = SCA_STRARRAY, /* whence */ },
663 .arg_parm = { [2] = &strarray__whences, /* whence */ }, },
664 { .name = "lsetxattr", .errmsg = true, },
665 { .name = "lstat", .errmsg = true, .alias = "newlstat", },
666 { .name = "lsxattr", .errmsg = true, },
667 { .name = "madvise", .errmsg = true,
668 .arg_scnprintf = { [0] = SCA_HEX, /* start */
669 [2] = SCA_MADV_BHV, /* behavior */ }, },
670 { .name = "mkdir", .errmsg = true, },
671 { .name = "mkdirat", .errmsg = true,
672 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
673 { .name = "mknod", .errmsg = true, },
674 { .name = "mknodat", .errmsg = true,
675 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
676 { .name = "mlock", .errmsg = true,
677 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
678 { .name = "mlockall", .errmsg = true,
679 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
680 { .name = "mmap", .hexret = true,
681 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
682 [2] = SCA_MMAP_PROT, /* prot */
683 [3] = SCA_MMAP_FLAGS, /* flags */ }, },
684 { .name = "mprotect", .errmsg = true,
685 .arg_scnprintf = { [0] = SCA_HEX, /* start */
686 [2] = SCA_MMAP_PROT, /* prot */ }, },
687 { .name = "mq_unlink", .errmsg = true,
688 .arg_scnprintf = { [0] = SCA_FILENAME, /* u_name */ }, },
689 { .name = "mremap", .hexret = true,
690 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
691 [3] = SCA_MREMAP_FLAGS, /* flags */
692 [4] = SCA_HEX, /* new_addr */ }, },
693 { .name = "munlock", .errmsg = true,
694 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
695 { .name = "munmap", .errmsg = true,
696 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
697 { .name = "name_to_handle_at", .errmsg = true,
698 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
699 { .name = "newfstatat", .errmsg = true,
700 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
701 { .name = "open", .errmsg = true,
702 .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
703 { .name = "open_by_handle_at", .errmsg = true,
704 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
705 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
706 { .name = "openat", .errmsg = true,
707 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
708 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
709 { .name = "perf_event_open", .errmsg = true,
710 .arg_scnprintf = { [2] = SCA_INT, /* cpu */
711 [3] = SCA_FD, /* group_fd */
712 [4] = SCA_PERF_FLAGS, /* flags */ }, },
713 { .name = "pipe2", .errmsg = true,
714 .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
715 { .name = "poll", .errmsg = true, .timeout = true, },
716 { .name = "ppoll", .errmsg = true, .timeout = true, },
717 { .name = "pread", .errmsg = true, .alias = "pread64", },
718 { .name = "preadv", .errmsg = true, .alias = "pread", },
719 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
720 { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
721 { .name = "pwritev", .errmsg = true, },
722 { .name = "read", .errmsg = true, },
723 { .name = "readlink", .errmsg = true, },
724 { .name = "readlinkat", .errmsg = true,
725 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
726 { .name = "readv", .errmsg = true, },
727 { .name = "recvfrom", .errmsg = true,
728 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
729 { .name = "recvmmsg", .errmsg = true,
730 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
731 { .name = "recvmsg", .errmsg = true,
732 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
733 { .name = "removexattr", .errmsg = true, },
734 { .name = "renameat", .errmsg = true,
735 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
736 { .name = "rmdir", .errmsg = true, },
737 { .name = "rt_sigaction", .errmsg = true,
738 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
739 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
740 { .name = "rt_sigqueueinfo", .errmsg = true,
741 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
742 { .name = "rt_tgsigqueueinfo", .errmsg = true,
743 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
744 { .name = "sched_getattr", .errmsg = true, },
745 { .name = "sched_setattr", .errmsg = true, },
746 { .name = "sched_setscheduler", .errmsg = true,
747 .arg_scnprintf = { [1] = SCA_SCHED_POLICY, /* policy */ }, },
748 { .name = "seccomp", .errmsg = true,
749 .arg_scnprintf = { [0] = SCA_SECCOMP_OP, /* op */
750 [1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
751 { .name = "select", .errmsg = true, .timeout = true, },
752 { .name = "sendmmsg", .errmsg = true,
753 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
754 { .name = "sendmsg", .errmsg = true,
755 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
756 { .name = "sendto", .errmsg = true,
757 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
758 { .name = "set_tid_address", .errpid = true, },
759 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
760 { .name = "setpgid", .errmsg = true, },
761 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
762 { .name = "setxattr", .errmsg = true, },
763 { .name = "shutdown", .errmsg = true, },
764 { .name = "socket", .errmsg = true,
765 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
766 [1] = SCA_SK_TYPE, /* type */ },
767 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
768 { .name = "socketpair", .errmsg = true,
769 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
770 [1] = SCA_SK_TYPE, /* type */ },
771 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
772 { .name = "stat", .errmsg = true, .alias = "newstat", },
773 { .name = "statfs", .errmsg = true, },
774 { .name = "swapoff", .errmsg = true,
775 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
776 { .name = "swapon", .errmsg = true,
777 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
778 { .name = "symlinkat", .errmsg = true,
779 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
780 { .name = "tgkill", .errmsg = true,
781 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
782 { .name = "tkill", .errmsg = true,
783 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
784 { .name = "truncate", .errmsg = true, },
785 { .name = "uname", .errmsg = true, .alias = "newuname", },
786 { .name = "unlinkat", .errmsg = true,
787 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
788 { .name = "utime", .errmsg = true, },
789 { .name = "utimensat", .errmsg = true,
790 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
791 { .name = "utimes", .errmsg = true, },
792 { .name = "vmsplice", .errmsg = true, },
793 { .name = "wait4", .errpid = true,
794 .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
795 { .name = "waitid", .errpid = true,
796 .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
797 { .name = "write", .errmsg = true, },
798 { .name = "writev", .errmsg = true, },
801 static int syscall_fmt__cmp(const void *name, const void *fmtp)
803 const struct syscall_fmt *fmt = fmtp;
804 return strcmp(name, fmt->name);
807 static struct syscall_fmt *syscall_fmt__find(const char *name)
809 const int nmemb = ARRAY_SIZE(syscall_fmts);
810 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
814 struct event_format *tp_format;
816 struct format_field *args;
819 struct syscall_fmt *fmt;
820 size_t (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
824 static size_t fprintf_duration(unsigned long t, FILE *fp)
826 double duration = (double)t / NSEC_PER_MSEC;
827 size_t printed = fprintf(fp, "(");
830 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
831 else if (duration >= 0.01)
832 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
834 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
835 return printed + fprintf(fp, "): ");
839 * filename.ptr: The filename char pointer that will be vfs_getname'd
840 * filename.entry_str_pos: Where to insert the string translated from
841 * filename.ptr by the vfs_getname tracepoint/kprobe.
843 struct thread_trace {
846 unsigned long nr_events;
847 unsigned long pfmaj, pfmin;
852 short int entry_str_pos;
854 unsigned int namelen;
862 struct intlist *syscall_stats;
865 static struct thread_trace *thread_trace__new(void)
867 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
870 ttrace->paths.max = -1;
872 ttrace->syscall_stats = intlist__new(NULL);
877 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
879 struct thread_trace *ttrace;
884 if (thread__priv(thread) == NULL)
885 thread__set_priv(thread, thread_trace__new());
887 if (thread__priv(thread) == NULL)
890 ttrace = thread__priv(thread);
895 color_fprintf(fp, PERF_COLOR_RED,
896 "WARNING: not enough memory, dropping samples!\n");
900 #define TRACE_PFMAJ (1 << 0)
901 #define TRACE_PFMIN (1 << 1)
903 static const size_t trace__entry_str_size = 2048;
905 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
907 struct thread_trace *ttrace = thread__priv(thread);
909 if (fd > ttrace->paths.max) {
910 char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
915 if (ttrace->paths.max != -1) {
916 memset(npath + ttrace->paths.max + 1, 0,
917 (fd - ttrace->paths.max) * sizeof(char *));
919 memset(npath, 0, (fd + 1) * sizeof(char *));
922 ttrace->paths.table = npath;
923 ttrace->paths.max = fd;
926 ttrace->paths.table[fd] = strdup(pathname);
928 return ttrace->paths.table[fd] != NULL ? 0 : -1;
931 static int thread__read_fd_path(struct thread *thread, int fd)
933 char linkname[PATH_MAX], pathname[PATH_MAX];
937 if (thread->pid_ == thread->tid) {
938 scnprintf(linkname, sizeof(linkname),
939 "/proc/%d/fd/%d", thread->pid_, fd);
941 scnprintf(linkname, sizeof(linkname),
942 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
945 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
948 ret = readlink(linkname, pathname, sizeof(pathname));
950 if (ret < 0 || ret > st.st_size)
953 pathname[ret] = '\0';
954 return trace__set_fd_pathname(thread, fd, pathname);
957 static const char *thread__fd_path(struct thread *thread, int fd,
960 struct thread_trace *ttrace = thread__priv(thread);
968 if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
971 ++trace->stats.proc_getname;
972 if (thread__read_fd_path(thread, fd))
976 return ttrace->paths.table[fd];
979 static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
980 struct syscall_arg *arg)
983 size_t printed = scnprintf(bf, size, "%d", fd);
984 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
987 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
992 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
993 struct syscall_arg *arg)
996 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
997 struct thread_trace *ttrace = thread__priv(arg->thread);
999 if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
1000 zfree(&ttrace->paths.table[fd]);
1005 static void thread__set_filename_pos(struct thread *thread, const char *bf,
1008 struct thread_trace *ttrace = thread__priv(thread);
1010 ttrace->filename.ptr = ptr;
1011 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1014 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1015 struct syscall_arg *arg)
1017 unsigned long ptr = arg->val;
1019 if (!arg->trace->vfs_getname)
1020 return scnprintf(bf, size, "%#x", ptr);
1022 thread__set_filename_pos(arg->thread, bf, ptr);
1026 static bool trace__filter_duration(struct trace *trace, double t)
1028 return t < (trace->duration_filter * NSEC_PER_MSEC);
1031 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1033 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1035 return fprintf(fp, "%10.3f ", ts);
1038 static bool done = false;
1039 static bool interrupted = false;
1041 static void sig_handler(int sig)
1044 interrupted = sig == SIGINT;
1047 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1048 u64 duration, u64 tstamp, FILE *fp)
1050 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
1051 printed += fprintf_duration(duration, fp);
1053 if (trace->multiple_threads) {
1054 if (trace->show_comm)
1055 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1056 printed += fprintf(fp, "%d ", thread->tid);
1062 static int trace__process_event(struct trace *trace, struct machine *machine,
1063 union perf_event *event, struct perf_sample *sample)
1067 switch (event->header.type) {
1068 case PERF_RECORD_LOST:
1069 color_fprintf(trace->output, PERF_COLOR_RED,
1070 "LOST %" PRIu64 " events!\n", event->lost.lost);
1071 ret = machine__process_lost_event(machine, event, sample);
1074 ret = machine__process_event(machine, event, sample);
1081 static int trace__tool_process(struct perf_tool *tool,
1082 union perf_event *event,
1083 struct perf_sample *sample,
1084 struct machine *machine)
1086 struct trace *trace = container_of(tool, struct trace, tool);
1087 return trace__process_event(trace, machine, event, sample);
1090 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1092 struct machine *machine = vmachine;
1094 if (machine->kptr_restrict_warned)
1097 if (symbol_conf.kptr_restrict) {
1098 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1099 "Check /proc/sys/kernel/kptr_restrict.\n\n"
1100 "Kernel samples will not be resolved.\n");
1101 machine->kptr_restrict_warned = true;
1105 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1108 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1110 int err = symbol__init(NULL);
1115 trace->host = machine__new_host();
1116 if (trace->host == NULL)
1119 if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
1122 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1123 evlist->threads, trace__tool_process, false,
1124 trace->opts.proc_map_timeout);
1131 static int syscall__set_arg_fmts(struct syscall *sc)
1133 struct format_field *field;
1136 sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
1137 if (sc->arg_scnprintf == NULL)
1141 sc->arg_parm = sc->fmt->arg_parm;
1143 for (field = sc->args; field; field = field->next) {
1144 if (sc->fmt && sc->fmt->arg_scnprintf[idx])
1145 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
1146 else if (strcmp(field->type, "const char *") == 0 &&
1147 (strcmp(field->name, "filename") == 0 ||
1148 strcmp(field->name, "path") == 0 ||
1149 strcmp(field->name, "pathname") == 0))
1150 sc->arg_scnprintf[idx] = SCA_FILENAME;
1151 else if (field->flags & FIELD_IS_POINTER)
1152 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
1153 else if (strcmp(field->type, "pid_t") == 0)
1154 sc->arg_scnprintf[idx] = SCA_PID;
1155 else if (strcmp(field->type, "umode_t") == 0)
1156 sc->arg_scnprintf[idx] = SCA_MODE_T;
1157 else if ((strcmp(field->type, "int") == 0 ||
1158 strcmp(field->type, "unsigned int") == 0 ||
1159 strcmp(field->type, "long") == 0) &&
1160 (len = strlen(field->name)) >= 2 &&
1161 strcmp(field->name + len - 2, "fd") == 0) {
1163 * /sys/kernel/tracing/events/syscalls/sys_enter*
1164 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1169 sc->arg_scnprintf[idx] = SCA_FD;
1177 static int trace__read_syscall_info(struct trace *trace, int id)
1181 const char *name = syscalltbl__name(trace->sctbl, id);
1186 if (id > trace->syscalls.max) {
1187 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1189 if (nsyscalls == NULL)
1192 if (trace->syscalls.max != -1) {
1193 memset(nsyscalls + trace->syscalls.max + 1, 0,
1194 (id - trace->syscalls.max) * sizeof(*sc));
1196 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
1199 trace->syscalls.table = nsyscalls;
1200 trace->syscalls.max = id;
1203 sc = trace->syscalls.table + id;
1206 sc->fmt = syscall_fmt__find(sc->name);
1208 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1209 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1211 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1212 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1213 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1216 if (IS_ERR(sc->tp_format))
1219 sc->args = sc->tp_format->format.fields;
1220 sc->nr_args = sc->tp_format->format.nr_fields;
1222 * We need to check and discard the first variable '__syscall_nr'
1223 * or 'nr' that mean the syscall number. It is needless here.
1224 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1226 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1227 sc->args = sc->args->next;
1231 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1233 return syscall__set_arg_fmts(sc);
1236 static int trace__validate_ev_qualifier(struct trace *trace)
1239 struct str_node *pos;
1241 trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
1242 trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
1243 sizeof(trace->ev_qualifier_ids.entries[0]));
1245 if (trace->ev_qualifier_ids.entries == NULL) {
1246 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1254 strlist__for_each_entry(pos, trace->ev_qualifier) {
1255 const char *sc = pos->s;
1256 int id = syscalltbl__id(trace->sctbl, sc);
1260 fputs("Error:\tInvalid syscall ", trace->output);
1263 fputs(", ", trace->output);
1266 fputs(sc, trace->output);
1269 trace->ev_qualifier_ids.entries[i++] = id;
1273 fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
1274 "\nHint:\tand: 'man syscalls'\n", trace->output);
1275 zfree(&trace->ev_qualifier_ids.entries);
1276 trace->ev_qualifier_ids.nr = 0;
1283 * args is to be interpreted as a series of longs but we need to handle
1284 * 8-byte unaligned accesses. args points to raw_data within the event
1285 * and raw_data is guaranteed to be 8-byte unaligned because it is
1286 * preceded by raw_size which is a u32. So we need to copy args to a temp
1287 * variable to read it. Most notably this avoids extended load instructions
1288 * on unaligned addresses
1291 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1292 unsigned char *args, struct trace *trace,
1293 struct thread *thread)
1299 if (sc->args != NULL) {
1300 struct format_field *field;
1302 struct syscall_arg arg = {
1309 for (field = sc->args; field;
1310 field = field->next, ++arg.idx, bit <<= 1) {
1314 /* special care for unaligned accesses */
1315 p = args + sizeof(unsigned long) * arg.idx;
1316 memcpy(&val, p, sizeof(val));
1319 * Suppress this argument if its value is zero and
1320 * and we don't have a string associated in an
1324 !(sc->arg_scnprintf &&
1325 sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
1326 sc->arg_parm[arg.idx]))
1329 printed += scnprintf(bf + printed, size - printed,
1330 "%s%s: ", printed ? ", " : "", field->name);
1331 if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
1334 arg.parm = sc->arg_parm[arg.idx];
1335 printed += sc->arg_scnprintf[arg.idx](bf + printed,
1336 size - printed, &arg);
1338 printed += scnprintf(bf + printed, size - printed,
1342 } else if (IS_ERR(sc->tp_format)) {
1344 * If we managed to read the tracepoint /format file, then we
1345 * may end up not having any args, like with gettid(), so only
1346 * print the raw args when we didn't manage to read it.
1351 /* special care for unaligned accesses */
1352 p = args + sizeof(unsigned long) * i;
1353 memcpy(&val, p, sizeof(val));
1354 printed += scnprintf(bf + printed, size - printed,
1356 printed ? ", " : "", i, val);
1364 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
1365 union perf_event *event,
1366 struct perf_sample *sample);
1368 static struct syscall *trace__syscall_info(struct trace *trace,
1369 struct perf_evsel *evsel, int id)
1375 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1376 * before that, leaving at a higher verbosity level till that is
1377 * explained. Reproduced with plain ftrace with:
1379 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1380 * grep "NR -1 " /t/trace_pipe
1382 * After generating some load on the machine.
1386 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1387 id, perf_evsel__name(evsel), ++n);
1392 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
1393 trace__read_syscall_info(trace, id))
1396 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
1399 return &trace->syscalls.table[id];
1403 fprintf(trace->output, "Problems reading syscall %d", id);
1404 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
1405 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1406 fputs(" information\n", trace->output);
1411 static void thread__update_stats(struct thread_trace *ttrace,
1412 int id, struct perf_sample *sample)
1414 struct int_node *inode;
1415 struct stats *stats;
1418 inode = intlist__findnew(ttrace->syscall_stats, id);
1422 stats = inode->priv;
1423 if (stats == NULL) {
1424 stats = malloc(sizeof(struct stats));
1428 inode->priv = stats;
1431 if (ttrace->entry_time && sample->time > ttrace->entry_time)
1432 duration = sample->time - ttrace->entry_time;
1434 update_stats(stats, duration);
1437 static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
1439 struct thread_trace *ttrace;
1443 if (trace->current == NULL)
1446 ttrace = thread__priv(trace->current);
1448 if (!ttrace->entry_pending)
1451 duration = sample->time - ttrace->entry_time;
1453 printed = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output);
1454 printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
1455 ttrace->entry_pending = false;
1460 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
1461 union perf_event *event __maybe_unused,
1462 struct perf_sample *sample)
1467 struct thread *thread;
1468 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
1469 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1470 struct thread_trace *ttrace;
1475 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1476 ttrace = thread__trace(thread, trace->output);
1480 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1482 if (ttrace->entry_str == NULL) {
1483 ttrace->entry_str = malloc(trace__entry_str_size);
1484 if (!ttrace->entry_str)
1488 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
1489 trace__printf_interrupted_entry(trace, sample);
1491 ttrace->entry_time = sample->time;
1492 msg = ttrace->entry_str;
1493 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1495 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1496 args, trace, thread);
1499 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
1500 trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output);
1501 fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
1504 ttrace->entry_pending = true;
1505 /* See trace__vfs_getname & trace__sys_exit */
1506 ttrace->filename.pending_open = false;
1509 if (trace->current != thread) {
1510 thread__put(trace->current);
1511 trace->current = thread__get(thread);
1515 thread__put(thread);
1519 static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
1520 struct perf_sample *sample,
1521 struct callchain_cursor *cursor)
1523 struct addr_location al;
1525 if (machine__resolve(trace->host, &al, sample) < 0 ||
1526 thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
1532 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
1534 /* TODO: user-configurable print_opts */
1535 const unsigned int print_opts = EVSEL__PRINT_SYM |
1537 EVSEL__PRINT_UNKNOWN_AS_ADDR;
1539 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
1542 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
1543 union perf_event *event __maybe_unused,
1544 struct perf_sample *sample)
1548 struct thread *thread;
1549 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
1550 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1551 struct thread_trace *ttrace;
1556 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1557 ttrace = thread__trace(thread, trace->output);
1562 thread__update_stats(ttrace, id, sample);
1564 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
1566 if (id == trace->open_id && ret >= 0 && ttrace->filename.pending_open) {
1567 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
1568 ttrace->filename.pending_open = false;
1569 ++trace->stats.vfs_getname;
1572 if (ttrace->entry_time) {
1573 duration = sample->time - ttrace->entry_time;
1574 if (trace__filter_duration(trace, duration))
1576 } else if (trace->duration_filter)
1579 if (sample->callchain) {
1580 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1581 if (callchain_ret == 0) {
1582 if (callchain_cursor.nr < trace->min_stack)
1588 if (trace->summary_only)
1591 trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output);
1593 if (ttrace->entry_pending) {
1594 fprintf(trace->output, "%-70s", ttrace->entry_str);
1596 fprintf(trace->output, " ... [");
1597 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
1598 fprintf(trace->output, "]: %s()", sc->name);
1601 if (sc->fmt == NULL) {
1603 fprintf(trace->output, ") = %ld", ret);
1604 } else if (ret < 0 && (sc->fmt->errmsg || sc->fmt->errpid)) {
1605 char bf[STRERR_BUFSIZE];
1606 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
1607 *e = audit_errno_to_name(-ret);
1609 fprintf(trace->output, ") = -1 %s %s", e, emsg);
1610 } else if (ret == 0 && sc->fmt->timeout)
1611 fprintf(trace->output, ") = 0 Timeout");
1612 else if (sc->fmt->hexret)
1613 fprintf(trace->output, ") = %#lx", ret);
1614 else if (sc->fmt->errpid) {
1615 struct thread *child = machine__find_thread(trace->host, ret, ret);
1617 if (child != NULL) {
1618 fprintf(trace->output, ") = %ld", ret);
1619 if (child->comm_set)
1620 fprintf(trace->output, " (%s)", thread__comm_str(child));
1626 fputc('\n', trace->output);
1628 if (callchain_ret > 0)
1629 trace__fprintf_callchain(trace, sample);
1630 else if (callchain_ret < 0)
1631 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1633 ttrace->entry_pending = false;
1636 thread__put(thread);
1640 static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
1641 union perf_event *event __maybe_unused,
1642 struct perf_sample *sample)
1644 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1645 struct thread_trace *ttrace;
1646 size_t filename_len, entry_str_len, to_move;
1647 ssize_t remaining_space;
1649 const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
1654 ttrace = thread__priv(thread);
1658 filename_len = strlen(filename);
1659 if (filename_len == 0)
1662 if (ttrace->filename.namelen < filename_len) {
1663 char *f = realloc(ttrace->filename.name, filename_len + 1);
1668 ttrace->filename.namelen = filename_len;
1669 ttrace->filename.name = f;
1672 strcpy(ttrace->filename.name, filename);
1673 ttrace->filename.pending_open = true;
1675 if (!ttrace->filename.ptr)
1678 entry_str_len = strlen(ttrace->entry_str);
1679 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
1680 if (remaining_space <= 0)
1683 if (filename_len > (size_t)remaining_space) {
1684 filename += filename_len - remaining_space;
1685 filename_len = remaining_space;
1688 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
1689 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
1690 memmove(pos + filename_len, pos, to_move);
1691 memcpy(pos, filename, filename_len);
1693 ttrace->filename.ptr = 0;
1694 ttrace->filename.entry_str_pos = 0;
1696 thread__put(thread);
1701 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
1702 union perf_event *event __maybe_unused,
1703 struct perf_sample *sample)
1705 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1706 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
1707 struct thread *thread = machine__findnew_thread(trace->host,
1710 struct thread_trace *ttrace = thread__trace(thread, trace->output);
1715 ttrace->runtime_ms += runtime_ms;
1716 trace->runtime_ms += runtime_ms;
1718 thread__put(thread);
1722 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
1724 perf_evsel__strval(evsel, sample, "comm"),
1725 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
1727 perf_evsel__intval(evsel, sample, "vruntime"));
1731 static void bpf_output__printer(enum binary_printer_ops op,
1732 unsigned int val, void *extra)
1734 FILE *output = extra;
1735 unsigned char ch = (unsigned char)val;
1738 case BINARY_PRINT_CHAR_DATA:
1739 fprintf(output, "%c", isprint(ch) ? ch : '.');
1741 case BINARY_PRINT_DATA_BEGIN:
1742 case BINARY_PRINT_LINE_BEGIN:
1743 case BINARY_PRINT_ADDR:
1744 case BINARY_PRINT_NUM_DATA:
1745 case BINARY_PRINT_NUM_PAD:
1746 case BINARY_PRINT_SEP:
1747 case BINARY_PRINT_CHAR_PAD:
1748 case BINARY_PRINT_LINE_END:
1749 case BINARY_PRINT_DATA_END:
1755 static void bpf_output__fprintf(struct trace *trace,
1756 struct perf_sample *sample)
1758 print_binary(sample->raw_data, sample->raw_size, 8,
1759 bpf_output__printer, trace->output);
1762 static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
1763 union perf_event *event __maybe_unused,
1764 struct perf_sample *sample)
1766 int callchain_ret = 0;
1768 if (sample->callchain) {
1769 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1770 if (callchain_ret == 0) {
1771 if (callchain_cursor.nr < trace->min_stack)
1777 trace__printf_interrupted_entry(trace, sample);
1778 trace__fprintf_tstamp(trace, sample->time, trace->output);
1780 if (trace->trace_syscalls)
1781 fprintf(trace->output, "( ): ");
1783 fprintf(trace->output, "%s:", evsel->name);
1785 if (perf_evsel__is_bpf_output(evsel)) {
1786 bpf_output__fprintf(trace, sample);
1787 } else if (evsel->tp_format) {
1788 event_format__fprintf(evsel->tp_format, sample->cpu,
1789 sample->raw_data, sample->raw_size,
1793 fprintf(trace->output, ")\n");
1795 if (callchain_ret > 0)
1796 trace__fprintf_callchain(trace, sample);
1797 else if (callchain_ret < 0)
1798 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1803 static void print_location(FILE *f, struct perf_sample *sample,
1804 struct addr_location *al,
1805 bool print_dso, bool print_sym)
1808 if ((verbose > 0 || print_dso) && al->map)
1809 fprintf(f, "%s@", al->map->dso->long_name);
1811 if ((verbose > 0 || print_sym) && al->sym)
1812 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
1813 al->addr - al->sym->start);
1815 fprintf(f, "0x%" PRIx64, al->addr);
1817 fprintf(f, "0x%" PRIx64, sample->addr);
1820 static int trace__pgfault(struct trace *trace,
1821 struct perf_evsel *evsel,
1822 union perf_event *event __maybe_unused,
1823 struct perf_sample *sample)
1825 struct thread *thread;
1826 struct addr_location al;
1827 char map_type = 'd';
1828 struct thread_trace *ttrace;
1830 int callchain_ret = 0;
1832 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1834 if (sample->callchain) {
1835 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1836 if (callchain_ret == 0) {
1837 if (callchain_cursor.nr < trace->min_stack)
1843 ttrace = thread__trace(thread, trace->output);
1847 if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
1852 if (trace->summary_only)
1855 thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
1858 trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
1860 fprintf(trace->output, "%sfault [",
1861 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
1864 print_location(trace->output, sample, &al, false, true);
1866 fprintf(trace->output, "] => ");
1868 thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE,
1872 thread__find_addr_location(thread, sample->cpumode,
1873 MAP__FUNCTION, sample->addr, &al);
1881 print_location(trace->output, sample, &al, true, false);
1883 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
1885 if (callchain_ret > 0)
1886 trace__fprintf_callchain(trace, sample);
1887 else if (callchain_ret < 0)
1888 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1892 thread__put(thread);
1896 static void trace__set_base_time(struct trace *trace,
1897 struct perf_evsel *evsel,
1898 struct perf_sample *sample)
1901 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
1902 * and don't use sample->time unconditionally, we may end up having
1903 * some other event in the future without PERF_SAMPLE_TIME for good
1904 * reason, i.e. we may not be interested in its timestamps, just in
1905 * it taking place, picking some piece of information when it
1906 * appears in our event stream (vfs_getname comes to mind).
1908 if (trace->base_time == 0 && !trace->full_time &&
1909 (evsel->attr.sample_type & PERF_SAMPLE_TIME))
1910 trace->base_time = sample->time;
1913 static int trace__process_sample(struct perf_tool *tool,
1914 union perf_event *event,
1915 struct perf_sample *sample,
1916 struct perf_evsel *evsel,
1917 struct machine *machine __maybe_unused)
1919 struct trace *trace = container_of(tool, struct trace, tool);
1920 struct thread *thread;
1923 tracepoint_handler handler = evsel->handler;
1925 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1926 if (thread && thread__is_filtered(thread))
1929 trace__set_base_time(trace, evsel, sample);
1933 handler(trace, evsel, event, sample);
1936 thread__put(thread);
1940 static int trace__record(struct trace *trace, int argc, const char **argv)
1942 unsigned int rec_argc, i, j;
1943 const char **rec_argv;
1944 const char * const record_args[] = {
1951 const char * const sc_args[] = { "-e", };
1952 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
1953 const char * const majpf_args[] = { "-e", "major-faults" };
1954 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
1955 const char * const minpf_args[] = { "-e", "minor-faults" };
1956 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
1958 /* +1 is for the event string below */
1959 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
1960 majpf_args_nr + minpf_args_nr + argc;
1961 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1963 if (rec_argv == NULL)
1967 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1968 rec_argv[j++] = record_args[i];
1970 if (trace->trace_syscalls) {
1971 for (i = 0; i < sc_args_nr; i++)
1972 rec_argv[j++] = sc_args[i];
1974 /* event string may be different for older kernels - e.g., RHEL6 */
1975 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
1976 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
1977 else if (is_valid_tracepoint("syscalls:sys_enter"))
1978 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
1980 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
1985 if (trace->trace_pgfaults & TRACE_PFMAJ)
1986 for (i = 0; i < majpf_args_nr; i++)
1987 rec_argv[j++] = majpf_args[i];
1989 if (trace->trace_pgfaults & TRACE_PFMIN)
1990 for (i = 0; i < minpf_args_nr; i++)
1991 rec_argv[j++] = minpf_args[i];
1993 for (i = 0; i < (unsigned int)argc; i++)
1994 rec_argv[j++] = argv[i];
1996 return cmd_record(j, rec_argv);
1999 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
2001 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
2003 struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
2008 if (perf_evsel__field(evsel, "pathname") == NULL) {
2009 perf_evsel__delete(evsel);
2013 evsel->handler = trace__vfs_getname;
2014 perf_evlist__add(evlist, evsel);
2018 static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
2020 struct perf_evsel *evsel;
2021 struct perf_event_attr attr = {
2022 .type = PERF_TYPE_SOFTWARE,
2026 attr.config = config;
2027 attr.sample_period = 1;
2029 event_attr_init(&attr);
2031 evsel = perf_evsel__new(&attr);
2033 evsel->handler = trace__pgfault;
2038 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2040 const u32 type = event->header.type;
2041 struct perf_evsel *evsel;
2043 if (type != PERF_RECORD_SAMPLE) {
2044 trace__process_event(trace, trace->host, event, sample);
2048 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2049 if (evsel == NULL) {
2050 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2054 trace__set_base_time(trace, evsel, sample);
2056 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
2057 sample->raw_data == NULL) {
2058 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2059 perf_evsel__name(evsel), sample->tid,
2060 sample->cpu, sample->raw_size);
2062 tracepoint_handler handler = evsel->handler;
2063 handler(trace, evsel, event, sample);
2067 static int trace__add_syscall_newtp(struct trace *trace)
2070 struct perf_evlist *evlist = trace->evlist;
2071 struct perf_evsel *sys_enter, *sys_exit;
2073 sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
2074 if (sys_enter == NULL)
2077 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2078 goto out_delete_sys_enter;
2080 sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
2081 if (sys_exit == NULL)
2082 goto out_delete_sys_enter;
2084 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2085 goto out_delete_sys_exit;
2087 perf_evlist__add(evlist, sys_enter);
2088 perf_evlist__add(evlist, sys_exit);
2090 if (callchain_param.enabled && !trace->kernel_syscallchains) {
2092 * We're interested only in the user space callchain
2093 * leading to the syscall, allow overriding that for
2094 * debugging reasons using --kernel_syscall_callchains
2096 sys_exit->attr.exclude_callchain_kernel = 1;
2099 trace->syscalls.events.sys_enter = sys_enter;
2100 trace->syscalls.events.sys_exit = sys_exit;
2106 out_delete_sys_exit:
2107 perf_evsel__delete_priv(sys_exit);
2108 out_delete_sys_enter:
2109 perf_evsel__delete_priv(sys_enter);
2113 static int trace__set_ev_qualifier_filter(struct trace *trace)
2116 struct perf_evsel *sys_exit;
2117 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2118 trace->ev_qualifier_ids.nr,
2119 trace->ev_qualifier_ids.entries);
2124 if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
2126 sys_exit = trace->syscalls.events.sys_exit;
2127 err = perf_evsel__append_tp_filter(sys_exit, filter);
2138 static int trace__run(struct trace *trace, int argc, const char **argv)
2140 struct perf_evlist *evlist = trace->evlist;
2141 struct perf_evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
2143 unsigned long before;
2144 const bool forks = argc > 0;
2145 bool draining = false;
2149 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
2150 goto out_error_raw_syscalls;
2152 if (trace->trace_syscalls)
2153 trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
2155 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
2156 pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
2157 if (pgfault_maj == NULL)
2159 perf_evlist__add(evlist, pgfault_maj);
2162 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
2163 pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
2164 if (pgfault_min == NULL)
2166 perf_evlist__add(evlist, pgfault_min);
2170 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2171 trace__sched_stat_runtime))
2172 goto out_error_sched_stat_runtime;
2174 err = perf_evlist__create_maps(evlist, &trace->opts.target);
2176 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
2177 goto out_delete_evlist;
2180 err = trace__symbols_init(trace, evlist);
2182 fprintf(trace->output, "Problems initializing symbol libraries!\n");
2183 goto out_delete_evlist;
2186 perf_evlist__config(evlist, &trace->opts, NULL);
2188 if (callchain_param.enabled) {
2189 bool use_identifier = false;
2191 if (trace->syscalls.events.sys_exit) {
2192 perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
2193 &trace->opts, &callchain_param);
2194 use_identifier = true;
2198 perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
2199 use_identifier = true;
2203 perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
2204 use_identifier = true;
2207 if (use_identifier) {
2209 * Now we have evsels with different sample_ids, use
2210 * PERF_SAMPLE_IDENTIFIER to map from sample to evsel
2211 * from a fixed position in each ring buffer record.
2213 * As of this the changeset introducing this comment, this
2214 * isn't strictly needed, as the fields that can come before
2215 * PERF_SAMPLE_ID are all used, but we'll probably disable
2216 * some of those for things like copying the payload of
2217 * pointer syscall arguments, and for vfs_getname we don't
2218 * need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
2219 * here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
2221 perf_evlist__set_sample_bit(evlist, IDENTIFIER);
2222 perf_evlist__reset_sample_bit(evlist, ID);
2226 signal(SIGCHLD, sig_handler);
2227 signal(SIGINT, sig_handler);
2230 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
2233 fprintf(trace->output, "Couldn't run the workload!\n");
2234 goto out_delete_evlist;
2238 err = perf_evlist__open(evlist);
2240 goto out_error_open;
2242 err = bpf__apply_obj_config();
2244 char errbuf[BUFSIZ];
2246 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
2247 pr_err("ERROR: Apply config to BPF failed: %s\n",
2249 goto out_error_open;
2253 * Better not use !target__has_task() here because we need to cover the
2254 * case where no threads were specified in the command line, but a
2255 * workload was, and in that case we will fill in the thread_map when
2256 * we fork the workload in perf_evlist__prepare_workload.
2258 if (trace->filter_pids.nr > 0)
2259 err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
2260 else if (thread_map__pid(evlist->threads, 0) == -1)
2261 err = perf_evlist__set_filter_pid(evlist, getpid());
2266 if (trace->ev_qualifier_ids.nr > 0) {
2267 err = trace__set_ev_qualifier_filter(trace);
2271 pr_debug("event qualifier tracepoint filter: %s\n",
2272 trace->syscalls.events.sys_exit->filter);
2275 err = perf_evlist__apply_filters(evlist, &evsel);
2277 goto out_error_apply_filters;
2279 err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
2281 goto out_error_mmap;
2283 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
2284 perf_evlist__enable(evlist);
2287 perf_evlist__start_workload(evlist);
2289 if (trace->opts.initial_delay) {
2290 usleep(trace->opts.initial_delay * 1000);
2291 perf_evlist__enable(evlist);
2294 trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
2295 evlist->threads->nr > 1 ||
2296 perf_evlist__first(evlist)->attr.inherit;
2298 before = trace->nr_events;
2300 for (i = 0; i < evlist->nr_mmaps; i++) {
2301 union perf_event *event;
2303 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
2304 struct perf_sample sample;
2308 err = perf_evlist__parse_sample(evlist, event, &sample);
2310 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
2314 trace__handle_event(trace, event, &sample);
2316 perf_evlist__mmap_consume(evlist, i);
2321 if (done && !draining) {
2322 perf_evlist__disable(evlist);
2328 if (trace->nr_events == before) {
2329 int timeout = done ? 100 : -1;
2331 if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
2332 if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
2342 thread__zput(trace->current);
2344 perf_evlist__disable(evlist);
2348 trace__fprintf_thread_summary(trace, trace->output);
2350 if (trace->show_tool_stats) {
2351 fprintf(trace->output, "Stats:\n "
2352 " vfs_getname : %" PRIu64 "\n"
2353 " proc_getname: %" PRIu64 "\n",
2354 trace->stats.vfs_getname,
2355 trace->stats.proc_getname);
2360 perf_evlist__delete(evlist);
2361 trace->evlist = NULL;
2362 trace->live = false;
2365 char errbuf[BUFSIZ];
2367 out_error_sched_stat_runtime:
2368 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2371 out_error_raw_syscalls:
2372 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2376 perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
2380 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
2383 fprintf(trace->output, "%s\n", errbuf);
2384 goto out_delete_evlist;
2386 out_error_apply_filters:
2387 fprintf(trace->output,
2388 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
2389 evsel->filter, perf_evsel__name(evsel), errno,
2390 str_error_r(errno, errbuf, sizeof(errbuf)));
2391 goto out_delete_evlist;
2394 fprintf(trace->output, "Not enough memory to run!\n");
2395 goto out_delete_evlist;
2398 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
2399 goto out_delete_evlist;
2402 static int trace__replay(struct trace *trace)
2404 const struct perf_evsel_str_handler handlers[] = {
2405 { "probe:vfs_getname", trace__vfs_getname, },
2407 struct perf_data_file file = {
2409 .mode = PERF_DATA_MODE_READ,
2410 .force = trace->force,
2412 struct perf_session *session;
2413 struct perf_evsel *evsel;
2416 trace->tool.sample = trace__process_sample;
2417 trace->tool.mmap = perf_event__process_mmap;
2418 trace->tool.mmap2 = perf_event__process_mmap2;
2419 trace->tool.comm = perf_event__process_comm;
2420 trace->tool.exit = perf_event__process_exit;
2421 trace->tool.fork = perf_event__process_fork;
2422 trace->tool.attr = perf_event__process_attr;
2423 trace->tool.tracing_data = perf_event__process_tracing_data;
2424 trace->tool.build_id = perf_event__process_build_id;
2425 trace->tool.namespaces = perf_event__process_namespaces;
2427 trace->tool.ordered_events = true;
2428 trace->tool.ordering_requires_timestamps = true;
2430 /* add tid to output */
2431 trace->multiple_threads = true;
2433 session = perf_session__new(&file, false, &trace->tool);
2434 if (session == NULL)
2437 if (trace->opts.target.pid)
2438 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
2440 if (trace->opts.target.tid)
2441 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
2443 if (symbol__init(&session->header.env) < 0)
2446 trace->host = &session->machines.host;
2448 err = perf_session__set_tracepoints_handlers(session, handlers);
2452 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2453 "raw_syscalls:sys_enter");
2454 /* older kernels have syscalls tp versus raw_syscalls */
2456 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2457 "syscalls:sys_enter");
2460 (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
2461 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
2462 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
2466 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2467 "raw_syscalls:sys_exit");
2469 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2470 "syscalls:sys_exit");
2472 (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
2473 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
2474 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
2478 evlist__for_each_entry(session->evlist, evsel) {
2479 if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
2480 (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
2481 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
2482 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
2483 evsel->handler = trace__pgfault;
2488 err = perf_session__process_events(session);
2490 pr_err("Failed to process events, error %d", err);
2492 else if (trace->summary)
2493 trace__fprintf_thread_summary(trace, trace->output);
2496 perf_session__delete(session);
2501 static size_t trace__fprintf_threads_header(FILE *fp)
2505 printed = fprintf(fp, "\n Summary of events:\n\n");
2510 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
2511 struct stats *stats;
2516 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
2517 struct stats *stats = source->priv;
2519 entry->syscall = source->i;
2520 entry->stats = stats;
2521 entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
2524 static size_t thread__dump_stats(struct thread_trace *ttrace,
2525 struct trace *trace, FILE *fp)
2530 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
2532 if (syscall_stats == NULL)
2535 printed += fprintf(fp, "\n");
2537 printed += fprintf(fp, " syscall calls total min avg max stddev\n");
2538 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
2539 printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
2541 resort_rb__for_each_entry(nd, syscall_stats) {
2542 struct stats *stats = syscall_stats_entry->stats;
2544 double min = (double)(stats->min) / NSEC_PER_MSEC;
2545 double max = (double)(stats->max) / NSEC_PER_MSEC;
2546 double avg = avg_stats(stats);
2548 u64 n = (u64) stats->n;
2550 pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
2551 avg /= NSEC_PER_MSEC;
2553 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
2554 printed += fprintf(fp, " %-15s", sc->name);
2555 printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
2556 n, syscall_stats_entry->msecs, min, avg);
2557 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
2561 resort_rb__delete(syscall_stats);
2562 printed += fprintf(fp, "\n\n");
2567 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
2570 struct thread_trace *ttrace = thread__priv(thread);
2576 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
2578 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
2579 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
2580 printed += fprintf(fp, "%.1f%%", ratio);
2582 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
2584 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
2586 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
2587 else if (fputc('\n', fp) != EOF)
2590 printed += thread__dump_stats(ttrace, trace, fp);
2595 static unsigned long thread__nr_events(struct thread_trace *ttrace)
2597 return ttrace ? ttrace->nr_events : 0;
2600 DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
2601 struct thread *thread;
2604 entry->thread = rb_entry(nd, struct thread, rb_node);
2607 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
2609 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host);
2610 size_t printed = trace__fprintf_threads_header(fp);
2613 if (threads == NULL) {
2614 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
2618 resort_rb__for_each_entry(nd, threads)
2619 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
2621 resort_rb__delete(threads);
2626 static int trace__set_duration(const struct option *opt, const char *str,
2627 int unset __maybe_unused)
2629 struct trace *trace = opt->value;
2631 trace->duration_filter = atof(str);
2635 static int trace__set_filter_pids(const struct option *opt, const char *str,
2636 int unset __maybe_unused)
2640 struct trace *trace = opt->value;
2642 * FIXME: introduce a intarray class, plain parse csv and create a
2643 * { int nr, int entries[] } struct...
2645 struct intlist *list = intlist__new(str);
2650 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
2651 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
2653 if (trace->filter_pids.entries == NULL)
2656 trace->filter_pids.entries[0] = getpid();
2658 for (i = 1; i < trace->filter_pids.nr; ++i)
2659 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
2661 intlist__delete(list);
2667 static int trace__open_output(struct trace *trace, const char *filename)
2671 if (!stat(filename, &st) && st.st_size) {
2672 char oldname[PATH_MAX];
2674 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
2676 rename(filename, oldname);
2679 trace->output = fopen(filename, "w");
2681 return trace->output == NULL ? -errno : 0;
2684 static int parse_pagefaults(const struct option *opt, const char *str,
2685 int unset __maybe_unused)
2687 int *trace_pgfaults = opt->value;
2689 if (strcmp(str, "all") == 0)
2690 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
2691 else if (strcmp(str, "maj") == 0)
2692 *trace_pgfaults |= TRACE_PFMAJ;
2693 else if (strcmp(str, "min") == 0)
2694 *trace_pgfaults |= TRACE_PFMIN;
2701 static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
2703 struct perf_evsel *evsel;
2705 evlist__for_each_entry(evlist, evsel)
2706 evsel->handler = handler;
2710 * XXX: Hackish, just splitting the combined -e+--event (syscalls
2711 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
2712 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
2714 * It'd be better to introduce a parse_options() variant that would return a
2715 * list with the terms it didn't match to an event...
2717 static int trace__parse_events_option(const struct option *opt, const char *str,
2718 int unset __maybe_unused)
2720 struct trace *trace = (struct trace *)opt->value;
2721 const char *s = str;
2722 char *sep = NULL, *lists[2] = { NULL, NULL, };
2723 int len = strlen(str), err = -1, list;
2724 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
2725 char group_name[PATH_MAX];
2727 if (strace_groups_dir == NULL)
2732 trace->not_ev_qualifier = true;
2736 if ((sep = strchr(s, ',')) != NULL)
2740 if (syscalltbl__id(trace->sctbl, s) >= 0) {
2743 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
2744 if (access(group_name, R_OK) == 0)
2749 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
2751 lists[list] = malloc(len);
2752 if (lists[list] == NULL)
2754 strcpy(lists[list], s);
2764 if (lists[1] != NULL) {
2765 struct strlist_config slist_config = {
2766 .dirname = strace_groups_dir,
2769 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
2770 if (trace->ev_qualifier == NULL) {
2771 fputs("Not enough memory to parse event qualifier", trace->output);
2775 if (trace__validate_ev_qualifier(trace))
2782 struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
2783 "event selector. use 'perf list' to list available events",
2784 parse_events_option);
2785 err = parse_events_option(&o, lists[0], 0);
2794 int cmd_trace(int argc, const char **argv)
2796 const char *trace_usage[] = {
2797 "perf trace [<options>] [<command>]",
2798 "perf trace [<options>] -- <command> [<options>]",
2799 "perf trace record [<options>] [<command>]",
2800 "perf trace record [<options>] -- <command> [<options>]",
2803 struct trace trace = {
2812 .user_freq = UINT_MAX,
2813 .user_interval = ULLONG_MAX,
2814 .no_buffering = true,
2815 .mmap_pages = UINT_MAX,
2816 .proc_map_timeout = 500,
2820 .trace_syscalls = true,
2821 .kernel_syscallchains = false,
2822 .max_stack = UINT_MAX,
2824 const char *output_name = NULL;
2825 const struct option trace_options[] = {
2826 OPT_CALLBACK('e', "event", &trace, "event",
2827 "event/syscall selector. use 'perf list' to list available events",
2828 trace__parse_events_option),
2829 OPT_BOOLEAN(0, "comm", &trace.show_comm,
2830 "show the thread COMM next to its id"),
2831 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
2832 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
2833 trace__parse_events_option),
2834 OPT_STRING('o', "output", &output_name, "file", "output file name"),
2835 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
2836 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
2837 "trace events on existing process id"),
2838 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
2839 "trace events on existing thread id"),
2840 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
2841 "pids to filter (by the kernel)", trace__set_filter_pids),
2842 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
2843 "system-wide collection from all CPUs"),
2844 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
2845 "list of cpus to monitor"),
2846 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
2847 "child tasks do not inherit counters"),
2848 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
2849 "number of mmap data pages",
2850 perf_evlist__parse_mmap_pages),
2851 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
2853 OPT_CALLBACK(0, "duration", &trace, "float",
2854 "show only events with duration > N.M ms",
2855 trace__set_duration),
2856 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
2857 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
2858 OPT_BOOLEAN('T', "time", &trace.full_time,
2859 "Show full timestamp, not time relative to first start"),
2860 OPT_BOOLEAN('s', "summary", &trace.summary_only,
2861 "Show only syscall summary with statistics"),
2862 OPT_BOOLEAN('S', "with-summary", &trace.summary,
2863 "Show all syscalls and summary with statistics"),
2864 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
2865 "Trace pagefaults", parse_pagefaults, "maj"),
2866 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
2867 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
2868 OPT_CALLBACK(0, "call-graph", &trace.opts,
2869 "record_mode[,record_size]", record_callchain_help,
2870 &record_parse_callchain_opt),
2871 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
2872 "Show the kernel callchains on the syscall exit path"),
2873 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
2874 "Set the minimum stack depth when parsing the callchain, "
2875 "anything below the specified depth will be ignored."),
2876 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
2877 "Set the maximum stack depth when parsing the callchain, "
2878 "anything beyond the specified depth will be ignored. "
2879 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
2880 OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
2881 "per thread proc mmap processing timeout in ms"),
2882 OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
2883 "ms to wait before starting measurement after program "
2887 bool __maybe_unused max_stack_user_set = true;
2888 bool mmap_pages_user_set = true;
2889 const char * const trace_subcommands[] = { "record", NULL };
2893 signal(SIGSEGV, sighandler_dump_stack);
2894 signal(SIGFPE, sighandler_dump_stack);
2896 trace.evlist = perf_evlist__new();
2897 trace.sctbl = syscalltbl__new();
2899 if (trace.evlist == NULL || trace.sctbl == NULL) {
2900 pr_err("Not enough memory to run!\n");
2905 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
2906 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2908 err = bpf__setup_stdout(trace.evlist);
2910 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
2911 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
2917 if (trace.trace_pgfaults) {
2918 trace.opts.sample_address = true;
2919 trace.opts.sample_time = true;
2922 if (trace.opts.mmap_pages == UINT_MAX)
2923 mmap_pages_user_set = false;
2925 if (trace.max_stack == UINT_MAX) {
2926 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack;
2927 max_stack_user_set = false;
2930 #ifdef HAVE_DWARF_UNWIND_SUPPORT
2931 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
2932 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
2935 if (callchain_param.enabled) {
2936 if (!mmap_pages_user_set && geteuid() == 0)
2937 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
2939 symbol_conf.use_callchain = true;
2942 if (trace.evlist->nr_entries > 0)
2943 evlist__set_evsel_handler(trace.evlist, trace__event_handler);
2945 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
2946 return trace__record(&trace, argc-1, &argv[1]);
2948 /* summary_only implies summary option, but don't overwrite summary if set */
2949 if (trace.summary_only)
2950 trace.summary = trace.summary_only;
2952 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
2953 trace.evlist->nr_entries == 0 /* Was --events used? */) {
2954 pr_err("Please specify something to trace.\n");
2958 if (!trace.trace_syscalls && trace.ev_qualifier) {
2959 pr_err("The -e option can't be used with --no-syscalls.\n");
2963 if (output_name != NULL) {
2964 err = trace__open_output(&trace, output_name);
2966 perror("failed to create output file");
2971 trace.open_id = syscalltbl__id(trace.sctbl, "open");
2973 err = target__validate(&trace.opts.target);
2975 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
2976 fprintf(trace.output, "%s", bf);
2980 err = target__parse_uid(&trace.opts.target);
2982 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
2983 fprintf(trace.output, "%s", bf);
2987 if (!argc && target__none(&trace.opts.target))
2988 trace.opts.target.system_wide = true;
2991 err = trace__replay(&trace);
2993 err = trace__run(&trace, argc, argv);
2996 if (output_name != NULL)
2997 fclose(trace.output);