4 * Builtin 'trace' command:
6 * Display a continuously updated trace of any workload, CPU, specific PID,
7 * system wide, etc. Default format is loosely strace like, but any other
8 * event may be specified using --event.
10 * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
12 * Initially based on the 'trace' prototype by Thomas Gleixner:
14 * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
16 * Released under the GPL v2. (and only v2, not any later version)
19 #include <traceevent/event-parse.h>
20 #include <api/fs/tracing_path.h>
22 #include "util/color.h"
23 #include "util/debug.h"
24 #include "util/evlist.h"
25 #include <subcmd/exec-cmd.h>
26 #include "util/machine.h"
27 #include "util/session.h"
28 #include "util/thread.h"
29 #include <subcmd/parse-options.h>
30 #include "util/strlist.h"
31 #include "util/intlist.h"
32 #include "util/thread_map.h"
33 #include "util/stat.h"
34 #include "trace-event.h"
35 #include "util/parse-events.h"
36 #include "util/bpf-loader.h"
37 #include "callchain.h"
38 #include "syscalltbl.h"
39 #include "rb_resort.h"
41 #include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
44 #include <linux/err.h>
45 #include <linux/filter.h>
46 #include <linux/audit.h>
47 #include <linux/random.h>
48 #include <linux/stringify.h>
49 #include <linux/time64.h>
52 # define O_CLOEXEC 02000000
56 struct perf_tool tool;
57 struct syscalltbl *sctbl;
60 struct syscall *table;
62 struct perf_evsel *sys_enter,
66 struct record_opts opts;
67 struct perf_evlist *evlist;
69 struct thread *current;
72 unsigned long nr_events;
73 struct strlist *ev_qualifier;
82 double duration_filter;
88 unsigned int max_stack;
89 unsigned int min_stack;
90 bool not_ev_qualifier;
94 bool multiple_threads;
100 bool kernel_syscallchains;
110 u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
111 void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
115 #define TP_UINT_FIELD(bits) \
116 static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
119 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
128 #define TP_UINT_FIELD__SWAPPED(bits) \
129 static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
132 memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
133 return bswap_##bits(value);\
136 TP_UINT_FIELD__SWAPPED(16);
137 TP_UINT_FIELD__SWAPPED(32);
138 TP_UINT_FIELD__SWAPPED(64);
140 static int tp_field__init_uint(struct tp_field *field,
141 struct format_field *format_field,
144 field->offset = format_field->offset;
146 switch (format_field->size) {
148 field->integer = tp_field__u8;
151 field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
154 field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
157 field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
166 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
168 return sample->raw_data + field->offset;
171 static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
173 field->offset = format_field->offset;
174 field->pointer = tp_field__ptr;
181 struct tp_field args, ret;
185 static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
186 struct tp_field *field,
189 struct format_field *format_field = perf_evsel__field(evsel, name);
191 if (format_field == NULL)
194 return tp_field__init_uint(field, format_field, evsel->needs_swap);
197 #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
198 ({ struct syscall_tp *sc = evsel->priv;\
199 perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
201 static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
202 struct tp_field *field,
205 struct format_field *format_field = perf_evsel__field(evsel, name);
207 if (format_field == NULL)
210 return tp_field__init_ptr(field, format_field);
213 #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
214 ({ struct syscall_tp *sc = evsel->priv;\
215 perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
217 static void perf_evsel__delete_priv(struct perf_evsel *evsel)
220 perf_evsel__delete(evsel);
223 static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
225 evsel->priv = malloc(sizeof(struct syscall_tp));
226 if (evsel->priv != NULL) {
227 if (perf_evsel__init_sc_tp_uint_field(evsel, id))
230 evsel->handler = handler;
241 static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
243 struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
245 /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
247 evsel = perf_evsel__newtp("syscalls", direction);
252 if (perf_evsel__init_syscall_tp(evsel, handler))
258 perf_evsel__delete_priv(evsel);
262 #define perf_evsel__sc_tp_uint(evsel, name, sample) \
263 ({ struct syscall_tp *fields = evsel->priv; \
264 fields->name.integer(&fields->name, sample); })
266 #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
267 ({ struct syscall_tp *fields = evsel->priv; \
268 fields->name.pointer(&fields->name, sample); })
272 struct thread *thread;
282 const char **entries;
285 #define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
286 .nr_entries = ARRAY_SIZE(array), \
290 #define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
292 .nr_entries = ARRAY_SIZE(array), \
296 static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
298 struct syscall_arg *arg)
300 struct strarray *sa = arg->parm;
301 int idx = arg->val - sa->offset;
303 if (idx < 0 || idx >= sa->nr_entries)
304 return scnprintf(bf, size, intfmt, arg->val);
306 return scnprintf(bf, size, "%s", sa->entries[idx]);
309 static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
310 struct syscall_arg *arg)
312 return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
315 #define SCA_STRARRAY syscall_arg__scnprintf_strarray
317 #if defined(__i386__) || defined(__x86_64__)
319 * FIXME: Make this available to all arches as soon as the ioctl beautifier
320 * gets rewritten to support all arches.
322 static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
323 struct syscall_arg *arg)
325 return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg);
328 #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
329 #endif /* defined(__i386__) || defined(__x86_64__) */
331 static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
332 struct syscall_arg *arg);
334 #define SCA_FD syscall_arg__scnprintf_fd
337 #define AT_FDCWD -100
340 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
341 struct syscall_arg *arg)
346 return scnprintf(bf, size, "CWD");
348 return syscall_arg__scnprintf_fd(bf, size, arg);
351 #define SCA_FDAT syscall_arg__scnprintf_fd_at
353 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
354 struct syscall_arg *arg);
356 #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
358 static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
359 struct syscall_arg *arg)
361 return scnprintf(bf, size, "%#lx", arg->val);
364 #define SCA_HEX syscall_arg__scnprintf_hex
366 static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
367 struct syscall_arg *arg)
369 return scnprintf(bf, size, "%d", arg->val);
372 #define SCA_INT syscall_arg__scnprintf_int
374 static const char *bpf_cmd[] = {
375 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
376 "MAP_GET_NEXT_KEY", "PROG_LOAD",
378 static DEFINE_STRARRAY(bpf_cmd);
380 static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
381 static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
383 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
384 static DEFINE_STRARRAY(itimers);
386 static const char *keyctl_options[] = {
387 "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
388 "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
389 "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
390 "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
391 "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
393 static DEFINE_STRARRAY(keyctl_options);
395 static const char *whences[] = { "SET", "CUR", "END",
403 static DEFINE_STRARRAY(whences);
405 static const char *fcntl_cmds[] = {
406 "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
407 "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
408 "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
411 static DEFINE_STRARRAY(fcntl_cmds);
413 static const char *rlimit_resources[] = {
414 "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
415 "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
418 static DEFINE_STRARRAY(rlimit_resources);
420 static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
421 static DEFINE_STRARRAY(sighow);
423 static const char *clockid[] = {
424 "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
425 "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
426 "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
428 static DEFINE_STRARRAY(clockid);
430 static const char *socket_families[] = {
431 "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
432 "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
433 "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
434 "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
435 "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
436 "ALG", "NFC", "VSOCK",
438 static DEFINE_STRARRAY(socket_families);
440 static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
441 struct syscall_arg *arg)
446 if (mode == F_OK) /* 0 */
447 return scnprintf(bf, size, "F");
449 if (mode & n##_OK) { \
450 printed += scnprintf(bf + printed, size - printed, "%s", #n); \
460 printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
465 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
467 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
468 struct syscall_arg *arg);
470 #define SCA_FILENAME syscall_arg__scnprintf_filename
472 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
473 struct syscall_arg *arg)
475 int printed = 0, flags = arg->val;
478 if (flags & O_##n) { \
479 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
488 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
493 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
495 #if defined(__i386__) || defined(__x86_64__)
497 * FIXME: Make this available to all arches.
499 #define TCGETS 0x5401
501 static const char *tioctls[] = {
502 "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
503 "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
504 "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
505 "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
506 "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
507 "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
508 "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
509 "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
510 "TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
511 "TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
512 "TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
513 [0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
514 "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
515 "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
516 "TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
519 static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
520 #endif /* defined(__i386__) || defined(__x86_64__) */
522 #ifndef GRND_NONBLOCK
523 #define GRND_NONBLOCK 0x0001
526 #define GRND_RANDOM 0x0002
529 static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
530 struct syscall_arg *arg)
532 int printed = 0, flags = arg->val;
535 if (flags & GRND_##n) { \
536 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
537 flags &= ~GRND_##n; \
545 printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
550 #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
552 #define STRARRAY(arg, name, array) \
553 .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
554 .arg_parm = { [arg] = &strarray__##array, }
556 #include "trace/beauty/eventfd.c"
557 #include "trace/beauty/flock.c"
558 #include "trace/beauty/futex_op.c"
559 #include "trace/beauty/mmap.c"
560 #include "trace/beauty/mode_t.c"
561 #include "trace/beauty/msg_flags.c"
562 #include "trace/beauty/open_flags.c"
563 #include "trace/beauty/perf_event_open.c"
564 #include "trace/beauty/pid.c"
565 #include "trace/beauty/sched_policy.c"
566 #include "trace/beauty/seccomp.c"
567 #include "trace/beauty/signum.c"
568 #include "trace/beauty/socket_type.c"
569 #include "trace/beauty/waitid_options.c"
571 static struct syscall_fmt {
574 size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
581 { .name = "access", .errmsg = true,
582 .arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, },
583 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
584 { .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
585 { .name = "brk", .hexret = true,
586 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
587 { .name = "chdir", .errmsg = true, },
588 { .name = "chmod", .errmsg = true, },
589 { .name = "chroot", .errmsg = true, },
590 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
591 { .name = "clone", .errpid = true, },
592 { .name = "close", .errmsg = true,
593 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
594 { .name = "connect", .errmsg = true, },
595 { .name = "creat", .errmsg = true, },
596 { .name = "dup", .errmsg = true, },
597 { .name = "dup2", .errmsg = true, },
598 { .name = "dup3", .errmsg = true, },
599 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
600 { .name = "eventfd2", .errmsg = true,
601 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
602 { .name = "faccessat", .errmsg = true, },
603 { .name = "fadvise64", .errmsg = true, },
604 { .name = "fallocate", .errmsg = true, },
605 { .name = "fchdir", .errmsg = true, },
606 { .name = "fchmod", .errmsg = true, },
607 { .name = "fchmodat", .errmsg = true,
608 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
609 { .name = "fchown", .errmsg = true, },
610 { .name = "fchownat", .errmsg = true,
611 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
612 { .name = "fcntl", .errmsg = true,
613 .arg_scnprintf = { [1] = SCA_STRARRAY, /* cmd */ },
614 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
615 { .name = "fdatasync", .errmsg = true, },
616 { .name = "flock", .errmsg = true,
617 .arg_scnprintf = { [1] = SCA_FLOCK, /* cmd */ }, },
618 { .name = "fsetxattr", .errmsg = true, },
619 { .name = "fstat", .errmsg = true, .alias = "newfstat", },
620 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
621 { .name = "fstatfs", .errmsg = true, },
622 { .name = "fsync", .errmsg = true, },
623 { .name = "ftruncate", .errmsg = true, },
624 { .name = "futex", .errmsg = true,
625 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
626 { .name = "futimesat", .errmsg = true,
627 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
628 { .name = "getdents", .errmsg = true, },
629 { .name = "getdents64", .errmsg = true, },
630 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
631 { .name = "getpid", .errpid = true, },
632 { .name = "getpgid", .errpid = true, },
633 { .name = "getppid", .errpid = true, },
634 { .name = "getrandom", .errmsg = true,
635 .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
636 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
637 { .name = "getxattr", .errmsg = true, },
638 { .name = "inotify_add_watch", .errmsg = true, },
639 { .name = "ioctl", .errmsg = true,
641 #if defined(__i386__) || defined(__x86_64__)
643 * FIXME: Make this available to all arches.
645 [1] = SCA_STRHEXARRAY, /* cmd */
646 [2] = SCA_HEX, /* arg */ },
647 .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
649 [2] = SCA_HEX, /* arg */ }, },
651 { .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), },
652 { .name = "kill", .errmsg = true,
653 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
654 { .name = "lchown", .errmsg = true, },
655 { .name = "lgetxattr", .errmsg = true, },
656 { .name = "linkat", .errmsg = true,
657 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
658 { .name = "listxattr", .errmsg = true, },
659 { .name = "llistxattr", .errmsg = true, },
660 { .name = "lremovexattr", .errmsg = true, },
661 { .name = "lseek", .errmsg = true,
662 .arg_scnprintf = { [2] = SCA_STRARRAY, /* whence */ },
663 .arg_parm = { [2] = &strarray__whences, /* whence */ }, },
664 { .name = "lsetxattr", .errmsg = true, },
665 { .name = "lstat", .errmsg = true, .alias = "newlstat", },
666 { .name = "lsxattr", .errmsg = true, },
667 { .name = "madvise", .errmsg = true,
668 .arg_scnprintf = { [0] = SCA_HEX, /* start */
669 [2] = SCA_MADV_BHV, /* behavior */ }, },
670 { .name = "mkdir", .errmsg = true, },
671 { .name = "mkdirat", .errmsg = true,
672 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
673 { .name = "mknod", .errmsg = true, },
674 { .name = "mknodat", .errmsg = true,
675 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
676 { .name = "mlock", .errmsg = true,
677 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
678 { .name = "mlockall", .errmsg = true,
679 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
680 { .name = "mmap", .hexret = true,
681 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
682 [2] = SCA_MMAP_PROT, /* prot */
683 [3] = SCA_MMAP_FLAGS, /* flags */ }, },
684 { .name = "mprotect", .errmsg = true,
685 .arg_scnprintf = { [0] = SCA_HEX, /* start */
686 [2] = SCA_MMAP_PROT, /* prot */ }, },
687 { .name = "mq_unlink", .errmsg = true,
688 .arg_scnprintf = { [0] = SCA_FILENAME, /* u_name */ }, },
689 { .name = "mremap", .hexret = true,
690 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
691 [3] = SCA_MREMAP_FLAGS, /* flags */
692 [4] = SCA_HEX, /* new_addr */ }, },
693 { .name = "munlock", .errmsg = true,
694 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
695 { .name = "munmap", .errmsg = true,
696 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
697 { .name = "name_to_handle_at", .errmsg = true,
698 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
699 { .name = "newfstatat", .errmsg = true,
700 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
701 { .name = "open", .errmsg = true,
702 .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
703 { .name = "open_by_handle_at", .errmsg = true,
704 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
705 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
706 { .name = "openat", .errmsg = true,
707 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
708 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
709 { .name = "perf_event_open", .errmsg = true,
710 .arg_scnprintf = { [2] = SCA_INT, /* cpu */
711 [3] = SCA_FD, /* group_fd */
712 [4] = SCA_PERF_FLAGS, /* flags */ }, },
713 { .name = "pipe2", .errmsg = true,
714 .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
715 { .name = "poll", .errmsg = true, .timeout = true, },
716 { .name = "ppoll", .errmsg = true, .timeout = true, },
717 { .name = "pread", .errmsg = true, .alias = "pread64", },
718 { .name = "preadv", .errmsg = true, .alias = "pread", },
719 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
720 { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
721 { .name = "pwritev", .errmsg = true, },
722 { .name = "read", .errmsg = true, },
723 { .name = "readlink", .errmsg = true, },
724 { .name = "readlinkat", .errmsg = true,
725 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
726 { .name = "readv", .errmsg = true, },
727 { .name = "recvfrom", .errmsg = true,
728 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
729 { .name = "recvmmsg", .errmsg = true,
730 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
731 { .name = "recvmsg", .errmsg = true,
732 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
733 { .name = "removexattr", .errmsg = true, },
734 { .name = "renameat", .errmsg = true,
735 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
736 { .name = "rmdir", .errmsg = true, },
737 { .name = "rt_sigaction", .errmsg = true,
738 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
739 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
740 { .name = "rt_sigqueueinfo", .errmsg = true,
741 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
742 { .name = "rt_tgsigqueueinfo", .errmsg = true,
743 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
744 { .name = "sched_getattr", .errmsg = true, },
745 { .name = "sched_setattr", .errmsg = true, },
746 { .name = "sched_setscheduler", .errmsg = true,
747 .arg_scnprintf = { [1] = SCA_SCHED_POLICY, /* policy */ }, },
748 { .name = "seccomp", .errmsg = true,
749 .arg_scnprintf = { [0] = SCA_SECCOMP_OP, /* op */
750 [1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
751 { .name = "select", .errmsg = true, .timeout = true, },
752 { .name = "sendmmsg", .errmsg = true,
753 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
754 { .name = "sendmsg", .errmsg = true,
755 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
756 { .name = "sendto", .errmsg = true,
757 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
758 { .name = "set_tid_address", .errpid = true, },
759 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
760 { .name = "setpgid", .errmsg = true, },
761 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
762 { .name = "setxattr", .errmsg = true, },
763 { .name = "shutdown", .errmsg = true, },
764 { .name = "socket", .errmsg = true,
765 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
766 [1] = SCA_SK_TYPE, /* type */ },
767 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
768 { .name = "socketpair", .errmsg = true,
769 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
770 [1] = SCA_SK_TYPE, /* type */ },
771 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
772 { .name = "stat", .errmsg = true, .alias = "newstat", },
773 { .name = "statfs", .errmsg = true, },
774 { .name = "swapoff", .errmsg = true,
775 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
776 { .name = "swapon", .errmsg = true,
777 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
778 { .name = "symlinkat", .errmsg = true,
779 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
780 { .name = "tgkill", .errmsg = true,
781 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
782 { .name = "tkill", .errmsg = true,
783 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
784 { .name = "truncate", .errmsg = true, },
785 { .name = "uname", .errmsg = true, .alias = "newuname", },
786 { .name = "unlinkat", .errmsg = true,
787 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
788 { .name = "utime", .errmsg = true, },
789 { .name = "utimensat", .errmsg = true,
790 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
791 { .name = "utimes", .errmsg = true, },
792 { .name = "vmsplice", .errmsg = true, },
793 { .name = "wait4", .errpid = true,
794 .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
795 { .name = "waitid", .errpid = true,
796 .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
797 { .name = "write", .errmsg = true, },
798 { .name = "writev", .errmsg = true, },
801 static int syscall_fmt__cmp(const void *name, const void *fmtp)
803 const struct syscall_fmt *fmt = fmtp;
804 return strcmp(name, fmt->name);
807 static struct syscall_fmt *syscall_fmt__find(const char *name)
809 const int nmemb = ARRAY_SIZE(syscall_fmts);
810 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
814 struct event_format *tp_format;
816 struct format_field *args;
819 struct syscall_fmt *fmt;
820 size_t (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
824 static size_t fprintf_duration(unsigned long t, FILE *fp)
826 double duration = (double)t / NSEC_PER_MSEC;
827 size_t printed = fprintf(fp, "(");
830 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
831 else if (duration >= 0.01)
832 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
834 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
835 return printed + fprintf(fp, "): ");
839 * filename.ptr: The filename char pointer that will be vfs_getname'd
840 * filename.entry_str_pos: Where to insert the string translated from
841 * filename.ptr by the vfs_getname tracepoint/kprobe.
843 struct thread_trace {
846 unsigned long nr_events;
847 unsigned long pfmaj, pfmin;
852 short int entry_str_pos;
854 unsigned int namelen;
862 struct intlist *syscall_stats;
865 static struct thread_trace *thread_trace__new(void)
867 struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
870 ttrace->paths.max = -1;
872 ttrace->syscall_stats = intlist__new(NULL);
877 static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
879 struct thread_trace *ttrace;
884 if (thread__priv(thread) == NULL)
885 thread__set_priv(thread, thread_trace__new());
887 if (thread__priv(thread) == NULL)
890 ttrace = thread__priv(thread);
895 color_fprintf(fp, PERF_COLOR_RED,
896 "WARNING: not enough memory, dropping samples!\n");
900 #define TRACE_PFMAJ (1 << 0)
901 #define TRACE_PFMIN (1 << 1)
903 static const size_t trace__entry_str_size = 2048;
905 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
907 struct thread_trace *ttrace = thread__priv(thread);
909 if (fd > ttrace->paths.max) {
910 char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
915 if (ttrace->paths.max != -1) {
916 memset(npath + ttrace->paths.max + 1, 0,
917 (fd - ttrace->paths.max) * sizeof(char *));
919 memset(npath, 0, (fd + 1) * sizeof(char *));
922 ttrace->paths.table = npath;
923 ttrace->paths.max = fd;
926 ttrace->paths.table[fd] = strdup(pathname);
928 return ttrace->paths.table[fd] != NULL ? 0 : -1;
931 static int thread__read_fd_path(struct thread *thread, int fd)
933 char linkname[PATH_MAX], pathname[PATH_MAX];
937 if (thread->pid_ == thread->tid) {
938 scnprintf(linkname, sizeof(linkname),
939 "/proc/%d/fd/%d", thread->pid_, fd);
941 scnprintf(linkname, sizeof(linkname),
942 "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
945 if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
948 ret = readlink(linkname, pathname, sizeof(pathname));
950 if (ret < 0 || ret > st.st_size)
953 pathname[ret] = '\0';
954 return trace__set_fd_pathname(thread, fd, pathname);
957 static const char *thread__fd_path(struct thread *thread, int fd,
960 struct thread_trace *ttrace = thread__priv(thread);
968 if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
971 ++trace->stats.proc_getname;
972 if (thread__read_fd_path(thread, fd))
976 return ttrace->paths.table[fd];
979 static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
980 struct syscall_arg *arg)
983 size_t printed = scnprintf(bf, size, "%d", fd);
984 const char *path = thread__fd_path(arg->thread, fd, arg->trace);
987 printed += scnprintf(bf + printed, size - printed, "<%s>", path);
992 static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
993 struct syscall_arg *arg)
996 size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
997 struct thread_trace *ttrace = thread__priv(arg->thread);
999 if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
1000 zfree(&ttrace->paths.table[fd]);
1005 static void thread__set_filename_pos(struct thread *thread, const char *bf,
1008 struct thread_trace *ttrace = thread__priv(thread);
1010 ttrace->filename.ptr = ptr;
1011 ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
1014 static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
1015 struct syscall_arg *arg)
1017 unsigned long ptr = arg->val;
1019 if (!arg->trace->vfs_getname)
1020 return scnprintf(bf, size, "%#x", ptr);
1022 thread__set_filename_pos(arg->thread, bf, ptr);
1026 static bool trace__filter_duration(struct trace *trace, double t)
1028 return t < (trace->duration_filter * NSEC_PER_MSEC);
1031 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
1033 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
1035 return fprintf(fp, "%10.3f ", ts);
1038 static bool done = false;
1039 static bool interrupted = false;
1041 static void sig_handler(int sig)
1044 interrupted = sig == SIGINT;
1047 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
1048 u64 duration, u64 tstamp, FILE *fp)
1050 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
1051 printed += fprintf_duration(duration, fp);
1053 if (trace->multiple_threads) {
1054 if (trace->show_comm)
1055 printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
1056 printed += fprintf(fp, "%d ", thread->tid);
1062 static int trace__process_event(struct trace *trace, struct machine *machine,
1063 union perf_event *event, struct perf_sample *sample)
1067 switch (event->header.type) {
1068 case PERF_RECORD_LOST:
1069 color_fprintf(trace->output, PERF_COLOR_RED,
1070 "LOST %" PRIu64 " events!\n", event->lost.lost);
1071 ret = machine__process_lost_event(machine, event, sample);
1074 ret = machine__process_event(machine, event, sample);
1081 static int trace__tool_process(struct perf_tool *tool,
1082 union perf_event *event,
1083 struct perf_sample *sample,
1084 struct machine *machine)
1086 struct trace *trace = container_of(tool, struct trace, tool);
1087 return trace__process_event(trace, machine, event, sample);
1090 static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1092 struct machine *machine = vmachine;
1094 if (machine->kptr_restrict_warned)
1097 if (symbol_conf.kptr_restrict) {
1098 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1099 "Check /proc/sys/kernel/kptr_restrict.\n\n"
1100 "Kernel samples will not be resolved.\n");
1101 machine->kptr_restrict_warned = true;
1105 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1108 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1110 int err = symbol__init(NULL);
1115 trace->host = machine__new_host();
1116 if (trace->host == NULL)
1119 if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
1122 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
1123 evlist->threads, trace__tool_process, false,
1124 trace->opts.proc_map_timeout);
1131 static int syscall__set_arg_fmts(struct syscall *sc)
1133 struct format_field *field;
1136 sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
1137 if (sc->arg_scnprintf == NULL)
1141 sc->arg_parm = sc->fmt->arg_parm;
1143 for (field = sc->args; field; field = field->next) {
1144 if (sc->fmt && sc->fmt->arg_scnprintf[idx])
1145 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
1146 else if (strcmp(field->type, "const char *") == 0 &&
1147 (strcmp(field->name, "filename") == 0 ||
1148 strcmp(field->name, "path") == 0 ||
1149 strcmp(field->name, "pathname") == 0))
1150 sc->arg_scnprintf[idx] = SCA_FILENAME;
1151 else if (field->flags & FIELD_IS_POINTER)
1152 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
1153 else if (strcmp(field->type, "pid_t") == 0)
1154 sc->arg_scnprintf[idx] = SCA_PID;
1155 else if (strcmp(field->type, "umode_t") == 0)
1156 sc->arg_scnprintf[idx] = SCA_MODE_T;
1157 else if ((strcmp(field->type, "int") == 0 ||
1158 strcmp(field->type, "unsigned int") == 0 ||
1159 strcmp(field->type, "long") == 0) &&
1160 (len = strlen(field->name)) >= 2 &&
1161 strcmp(field->name + len - 2, "fd") == 0) {
1163 * /sys/kernel/tracing/events/syscalls/sys_enter*
1164 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1169 sc->arg_scnprintf[idx] = SCA_FD;
1177 static int trace__read_syscall_info(struct trace *trace, int id)
1181 const char *name = syscalltbl__name(trace->sctbl, id);
1186 if (id > trace->syscalls.max) {
1187 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
1189 if (nsyscalls == NULL)
1192 if (trace->syscalls.max != -1) {
1193 memset(nsyscalls + trace->syscalls.max + 1, 0,
1194 (id - trace->syscalls.max) * sizeof(*sc));
1196 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
1199 trace->syscalls.table = nsyscalls;
1200 trace->syscalls.max = id;
1203 sc = trace->syscalls.table + id;
1206 sc->fmt = syscall_fmt__find(sc->name);
1208 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
1209 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1211 if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
1212 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
1213 sc->tp_format = trace_event__tp_format("syscalls", tp_name);
1216 if (IS_ERR(sc->tp_format))
1219 sc->args = sc->tp_format->format.fields;
1220 sc->nr_args = sc->tp_format->format.nr_fields;
1222 * We need to check and discard the first variable '__syscall_nr'
1223 * or 'nr' that mean the syscall number. It is needless here.
1224 * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
1226 if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
1227 sc->args = sc->args->next;
1231 sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
1233 return syscall__set_arg_fmts(sc);
1236 static int trace__validate_ev_qualifier(struct trace *trace)
1239 struct str_node *pos;
1241 trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
1242 trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
1243 sizeof(trace->ev_qualifier_ids.entries[0]));
1245 if (trace->ev_qualifier_ids.entries == NULL) {
1246 fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
1254 strlist__for_each_entry(pos, trace->ev_qualifier) {
1255 const char *sc = pos->s;
1256 int id = syscalltbl__id(trace->sctbl, sc);
1260 fputs("Error:\tInvalid syscall ", trace->output);
1263 fputs(", ", trace->output);
1266 fputs(sc, trace->output);
1269 trace->ev_qualifier_ids.entries[i++] = id;
1273 fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
1274 "\nHint:\tand: 'man syscalls'\n", trace->output);
1275 zfree(&trace->ev_qualifier_ids.entries);
1276 trace->ev_qualifier_ids.nr = 0;
1283 * args is to be interpreted as a series of longs but we need to handle
1284 * 8-byte unaligned accesses. args points to raw_data within the event
1285 * and raw_data is guaranteed to be 8-byte unaligned because it is
1286 * preceded by raw_size which is a u32. So we need to copy args to a temp
1287 * variable to read it. Most notably this avoids extended load instructions
1288 * on unaligned addresses
1291 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
1292 unsigned char *args, struct trace *trace,
1293 struct thread *thread)
1299 if (sc->args != NULL) {
1300 struct format_field *field;
1302 struct syscall_arg arg = {
1309 for (field = sc->args; field;
1310 field = field->next, ++arg.idx, bit <<= 1) {
1314 /* special care for unaligned accesses */
1315 p = args + sizeof(unsigned long) * arg.idx;
1316 memcpy(&val, p, sizeof(val));
1319 * Suppress this argument if its value is zero and
1320 * and we don't have a string associated in an
1324 !(sc->arg_scnprintf &&
1325 sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
1326 sc->arg_parm[arg.idx]))
1329 printed += scnprintf(bf + printed, size - printed,
1330 "%s%s: ", printed ? ", " : "", field->name);
1331 if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
1334 arg.parm = sc->arg_parm[arg.idx];
1335 printed += sc->arg_scnprintf[arg.idx](bf + printed,
1336 size - printed, &arg);
1338 printed += scnprintf(bf + printed, size - printed,
1342 } else if (IS_ERR(sc->tp_format)) {
1344 * If we managed to read the tracepoint /format file, then we
1345 * may end up not having any args, like with gettid(), so only
1346 * print the raw args when we didn't manage to read it.
1351 /* special care for unaligned accesses */
1352 p = args + sizeof(unsigned long) * i;
1353 memcpy(&val, p, sizeof(val));
1354 printed += scnprintf(bf + printed, size - printed,
1356 printed ? ", " : "", i, val);
1364 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
1365 union perf_event *event,
1366 struct perf_sample *sample);
1368 static struct syscall *trace__syscall_info(struct trace *trace,
1369 struct perf_evsel *evsel, int id)
1375 * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
1376 * before that, leaving at a higher verbosity level till that is
1377 * explained. Reproduced with plain ftrace with:
1379 * echo 1 > /t/events/raw_syscalls/sys_exit/enable
1380 * grep "NR -1 " /t/trace_pipe
1382 * After generating some load on the machine.
1386 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
1387 id, perf_evsel__name(evsel), ++n);
1392 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
1393 trace__read_syscall_info(trace, id))
1396 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
1399 return &trace->syscalls.table[id];
1403 fprintf(trace->output, "Problems reading syscall %d", id);
1404 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
1405 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
1406 fputs(" information\n", trace->output);
1411 static void thread__update_stats(struct thread_trace *ttrace,
1412 int id, struct perf_sample *sample)
1414 struct int_node *inode;
1415 struct stats *stats;
1418 inode = intlist__findnew(ttrace->syscall_stats, id);
1422 stats = inode->priv;
1423 if (stats == NULL) {
1424 stats = malloc(sizeof(struct stats));
1428 inode->priv = stats;
1431 if (ttrace->entry_time && sample->time > ttrace->entry_time)
1432 duration = sample->time - ttrace->entry_time;
1434 update_stats(stats, duration);
1437 static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
1439 struct thread_trace *ttrace;
1443 if (trace->current == NULL)
1446 ttrace = thread__priv(trace->current);
1448 if (!ttrace->entry_pending)
1451 duration = sample->time - ttrace->entry_time;
1453 printed = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output);
1454 printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
1455 ttrace->entry_pending = false;
1460 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
1461 union perf_event *event __maybe_unused,
1462 struct perf_sample *sample)
1467 struct thread *thread;
1468 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
1469 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1470 struct thread_trace *ttrace;
1475 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1476 ttrace = thread__trace(thread, trace->output);
1480 args = perf_evsel__sc_tp_ptr(evsel, args, sample);
1482 if (ttrace->entry_str == NULL) {
1483 ttrace->entry_str = malloc(trace__entry_str_size);
1484 if (!ttrace->entry_str)
1488 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
1489 trace__printf_interrupted_entry(trace, sample);
1491 ttrace->entry_time = sample->time;
1492 msg = ttrace->entry_str;
1493 printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
1495 printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
1496 args, trace, thread);
1499 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
1500 trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output);
1501 fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
1504 ttrace->entry_pending = true;
1505 /* See trace__vfs_getname & trace__sys_exit */
1506 ttrace->filename.pending_open = false;
1509 if (trace->current != thread) {
1510 thread__put(trace->current);
1511 trace->current = thread__get(thread);
1515 thread__put(thread);
1519 static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
1520 struct perf_sample *sample,
1521 struct callchain_cursor *cursor)
1523 struct addr_location al;
1525 if (machine__resolve(trace->host, &al, sample) < 0 ||
1526 thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
1532 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
1534 /* TODO: user-configurable print_opts */
1535 const unsigned int print_opts = EVSEL__PRINT_SYM |
1537 EVSEL__PRINT_UNKNOWN_AS_ADDR;
1539 return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
1542 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
1543 union perf_event *event __maybe_unused,
1544 struct perf_sample *sample)
1548 struct thread *thread;
1549 int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
1550 struct syscall *sc = trace__syscall_info(trace, evsel, id);
1551 struct thread_trace *ttrace;
1556 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1557 ttrace = thread__trace(thread, trace->output);
1562 thread__update_stats(ttrace, id, sample);
1564 ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
1566 if (id == trace->open_id && ret >= 0 && ttrace->filename.pending_open) {
1567 trace__set_fd_pathname(thread, ret, ttrace->filename.name);
1568 ttrace->filename.pending_open = false;
1569 ++trace->stats.vfs_getname;
1572 if (ttrace->entry_time) {
1573 duration = sample->time - ttrace->entry_time;
1574 if (trace__filter_duration(trace, duration))
1576 } else if (trace->duration_filter)
1579 if (sample->callchain) {
1580 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1581 if (callchain_ret == 0) {
1582 if (callchain_cursor.nr < trace->min_stack)
1588 if (trace->summary_only)
1591 trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output);
1593 if (ttrace->entry_pending) {
1594 fprintf(trace->output, "%-70s", ttrace->entry_str);
1596 fprintf(trace->output, " ... [");
1597 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
1598 fprintf(trace->output, "]: %s()", sc->name);
1601 if (sc->fmt == NULL) {
1603 fprintf(trace->output, ") = %ld", ret);
1604 } else if (ret < 0 && (sc->fmt->errmsg || sc->fmt->errpid)) {
1605 char bf[STRERR_BUFSIZE];
1606 const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
1607 *e = audit_errno_to_name(-ret);
1609 fprintf(trace->output, ") = -1 %s %s", e, emsg);
1610 } else if (ret == 0 && sc->fmt->timeout)
1611 fprintf(trace->output, ") = 0 Timeout");
1612 else if (sc->fmt->hexret)
1613 fprintf(trace->output, ") = %#lx", ret);
1614 else if (sc->fmt->errpid) {
1615 struct thread *child = machine__find_thread(trace->host, ret, ret);
1617 if (child != NULL) {
1618 fprintf(trace->output, ") = %ld", ret);
1619 if (child->comm_set)
1620 fprintf(trace->output, " (%s)", thread__comm_str(child));
1626 fputc('\n', trace->output);
1628 if (callchain_ret > 0)
1629 trace__fprintf_callchain(trace, sample);
1630 else if (callchain_ret < 0)
1631 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1633 ttrace->entry_pending = false;
1636 thread__put(thread);
1640 static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
1641 union perf_event *event __maybe_unused,
1642 struct perf_sample *sample)
1644 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1645 struct thread_trace *ttrace;
1646 size_t filename_len, entry_str_len, to_move;
1647 ssize_t remaining_space;
1649 const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
1654 ttrace = thread__priv(thread);
1658 filename_len = strlen(filename);
1659 if (filename_len == 0)
1662 if (ttrace->filename.namelen < filename_len) {
1663 char *f = realloc(ttrace->filename.name, filename_len + 1);
1668 ttrace->filename.namelen = filename_len;
1669 ttrace->filename.name = f;
1672 strcpy(ttrace->filename.name, filename);
1673 ttrace->filename.pending_open = true;
1675 if (!ttrace->filename.ptr)
1678 entry_str_len = strlen(ttrace->entry_str);
1679 remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
1680 if (remaining_space <= 0)
1683 if (filename_len > (size_t)remaining_space) {
1684 filename += filename_len - remaining_space;
1685 filename_len = remaining_space;
1688 to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
1689 pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
1690 memmove(pos + filename_len, pos, to_move);
1691 memcpy(pos, filename, filename_len);
1693 ttrace->filename.ptr = 0;
1694 ttrace->filename.entry_str_pos = 0;
1699 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
1700 union perf_event *event __maybe_unused,
1701 struct perf_sample *sample)
1703 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
1704 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
1705 struct thread *thread = machine__findnew_thread(trace->host,
1708 struct thread_trace *ttrace = thread__trace(thread, trace->output);
1713 ttrace->runtime_ms += runtime_ms;
1714 trace->runtime_ms += runtime_ms;
1715 thread__put(thread);
1719 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
1721 perf_evsel__strval(evsel, sample, "comm"),
1722 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
1724 perf_evsel__intval(evsel, sample, "vruntime"));
1725 thread__put(thread);
1729 static void bpf_output__printer(enum binary_printer_ops op,
1730 unsigned int val, void *extra)
1732 FILE *output = extra;
1733 unsigned char ch = (unsigned char)val;
1736 case BINARY_PRINT_CHAR_DATA:
1737 fprintf(output, "%c", isprint(ch) ? ch : '.');
1739 case BINARY_PRINT_DATA_BEGIN:
1740 case BINARY_PRINT_LINE_BEGIN:
1741 case BINARY_PRINT_ADDR:
1742 case BINARY_PRINT_NUM_DATA:
1743 case BINARY_PRINT_NUM_PAD:
1744 case BINARY_PRINT_SEP:
1745 case BINARY_PRINT_CHAR_PAD:
1746 case BINARY_PRINT_LINE_END:
1747 case BINARY_PRINT_DATA_END:
1753 static void bpf_output__fprintf(struct trace *trace,
1754 struct perf_sample *sample)
1756 print_binary(sample->raw_data, sample->raw_size, 8,
1757 bpf_output__printer, trace->output);
1760 static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
1761 union perf_event *event __maybe_unused,
1762 struct perf_sample *sample)
1764 int callchain_ret = 0;
1766 if (sample->callchain) {
1767 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1768 if (callchain_ret == 0) {
1769 if (callchain_cursor.nr < trace->min_stack)
1775 trace__printf_interrupted_entry(trace, sample);
1776 trace__fprintf_tstamp(trace, sample->time, trace->output);
1778 if (trace->trace_syscalls)
1779 fprintf(trace->output, "( ): ");
1781 fprintf(trace->output, "%s:", evsel->name);
1783 if (perf_evsel__is_bpf_output(evsel)) {
1784 bpf_output__fprintf(trace, sample);
1785 } else if (evsel->tp_format) {
1786 event_format__fprintf(evsel->tp_format, sample->cpu,
1787 sample->raw_data, sample->raw_size,
1791 fprintf(trace->output, ")\n");
1793 if (callchain_ret > 0)
1794 trace__fprintf_callchain(trace, sample);
1795 else if (callchain_ret < 0)
1796 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1801 static void print_location(FILE *f, struct perf_sample *sample,
1802 struct addr_location *al,
1803 bool print_dso, bool print_sym)
1806 if ((verbose > 0 || print_dso) && al->map)
1807 fprintf(f, "%s@", al->map->dso->long_name);
1809 if ((verbose > 0 || print_sym) && al->sym)
1810 fprintf(f, "%s+0x%" PRIx64, al->sym->name,
1811 al->addr - al->sym->start);
1813 fprintf(f, "0x%" PRIx64, al->addr);
1815 fprintf(f, "0x%" PRIx64, sample->addr);
1818 static int trace__pgfault(struct trace *trace,
1819 struct perf_evsel *evsel,
1820 union perf_event *event __maybe_unused,
1821 struct perf_sample *sample)
1823 struct thread *thread;
1824 struct addr_location al;
1825 char map_type = 'd';
1826 struct thread_trace *ttrace;
1828 int callchain_ret = 0;
1830 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1832 if (sample->callchain) {
1833 callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
1834 if (callchain_ret == 0) {
1835 if (callchain_cursor.nr < trace->min_stack)
1841 ttrace = thread__trace(thread, trace->output);
1845 if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
1850 if (trace->summary_only)
1853 thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
1856 trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
1858 fprintf(trace->output, "%sfault [",
1859 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
1862 print_location(trace->output, sample, &al, false, true);
1864 fprintf(trace->output, "] => ");
1866 thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE,
1870 thread__find_addr_location(thread, sample->cpumode,
1871 MAP__FUNCTION, sample->addr, &al);
1879 print_location(trace->output, sample, &al, true, false);
1881 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
1883 if (callchain_ret > 0)
1884 trace__fprintf_callchain(trace, sample);
1885 else if (callchain_ret < 0)
1886 pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
1890 thread__put(thread);
1894 static void trace__set_base_time(struct trace *trace,
1895 struct perf_evsel *evsel,
1896 struct perf_sample *sample)
1899 * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
1900 * and don't use sample->time unconditionally, we may end up having
1901 * some other event in the future without PERF_SAMPLE_TIME for good
1902 * reason, i.e. we may not be interested in its timestamps, just in
1903 * it taking place, picking some piece of information when it
1904 * appears in our event stream (vfs_getname comes to mind).
1906 if (trace->base_time == 0 && !trace->full_time &&
1907 (evsel->attr.sample_type & PERF_SAMPLE_TIME))
1908 trace->base_time = sample->time;
1911 static int trace__process_sample(struct perf_tool *tool,
1912 union perf_event *event,
1913 struct perf_sample *sample,
1914 struct perf_evsel *evsel,
1915 struct machine *machine __maybe_unused)
1917 struct trace *trace = container_of(tool, struct trace, tool);
1918 struct thread *thread;
1921 tracepoint_handler handler = evsel->handler;
1923 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
1924 if (thread && thread__is_filtered(thread))
1927 trace__set_base_time(trace, evsel, sample);
1931 handler(trace, evsel, event, sample);
1937 static int trace__record(struct trace *trace, int argc, const char **argv)
1939 unsigned int rec_argc, i, j;
1940 const char **rec_argv;
1941 const char * const record_args[] = {
1948 const char * const sc_args[] = { "-e", };
1949 unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
1950 const char * const majpf_args[] = { "-e", "major-faults" };
1951 unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
1952 const char * const minpf_args[] = { "-e", "minor-faults" };
1953 unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
1955 /* +1 is for the event string below */
1956 rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
1957 majpf_args_nr + minpf_args_nr + argc;
1958 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1960 if (rec_argv == NULL)
1964 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1965 rec_argv[j++] = record_args[i];
1967 if (trace->trace_syscalls) {
1968 for (i = 0; i < sc_args_nr; i++)
1969 rec_argv[j++] = sc_args[i];
1971 /* event string may be different for older kernels - e.g., RHEL6 */
1972 if (is_valid_tracepoint("raw_syscalls:sys_enter"))
1973 rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
1974 else if (is_valid_tracepoint("syscalls:sys_enter"))
1975 rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
1977 pr_err("Neither raw_syscalls nor syscalls events exist.\n");
1982 if (trace->trace_pgfaults & TRACE_PFMAJ)
1983 for (i = 0; i < majpf_args_nr; i++)
1984 rec_argv[j++] = majpf_args[i];
1986 if (trace->trace_pgfaults & TRACE_PFMIN)
1987 for (i = 0; i < minpf_args_nr; i++)
1988 rec_argv[j++] = minpf_args[i];
1990 for (i = 0; i < (unsigned int)argc; i++)
1991 rec_argv[j++] = argv[i];
1993 return cmd_record(j, rec_argv, NULL);
1996 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
1998 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
2000 struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
2005 if (perf_evsel__field(evsel, "pathname") == NULL) {
2006 perf_evsel__delete(evsel);
2010 evsel->handler = trace__vfs_getname;
2011 perf_evlist__add(evlist, evsel);
2015 static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
2017 struct perf_evsel *evsel;
2018 struct perf_event_attr attr = {
2019 .type = PERF_TYPE_SOFTWARE,
2023 attr.config = config;
2024 attr.sample_period = 1;
2026 event_attr_init(&attr);
2028 evsel = perf_evsel__new(&attr);
2030 evsel->handler = trace__pgfault;
2035 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
2037 const u32 type = event->header.type;
2038 struct perf_evsel *evsel;
2040 if (type != PERF_RECORD_SAMPLE) {
2041 trace__process_event(trace, trace->host, event, sample);
2045 evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
2046 if (evsel == NULL) {
2047 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
2051 trace__set_base_time(trace, evsel, sample);
2053 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
2054 sample->raw_data == NULL) {
2055 fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
2056 perf_evsel__name(evsel), sample->tid,
2057 sample->cpu, sample->raw_size);
2059 tracepoint_handler handler = evsel->handler;
2060 handler(trace, evsel, event, sample);
2064 static int trace__add_syscall_newtp(struct trace *trace)
2067 struct perf_evlist *evlist = trace->evlist;
2068 struct perf_evsel *sys_enter, *sys_exit;
2070 sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
2071 if (sys_enter == NULL)
2074 if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
2075 goto out_delete_sys_enter;
2077 sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
2078 if (sys_exit == NULL)
2079 goto out_delete_sys_enter;
2081 if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
2082 goto out_delete_sys_exit;
2084 perf_evlist__add(evlist, sys_enter);
2085 perf_evlist__add(evlist, sys_exit);
2087 if (callchain_param.enabled && !trace->kernel_syscallchains) {
2089 * We're interested only in the user space callchain
2090 * leading to the syscall, allow overriding that for
2091 * debugging reasons using --kernel_syscall_callchains
2093 sys_exit->attr.exclude_callchain_kernel = 1;
2096 trace->syscalls.events.sys_enter = sys_enter;
2097 trace->syscalls.events.sys_exit = sys_exit;
2103 out_delete_sys_exit:
2104 perf_evsel__delete_priv(sys_exit);
2105 out_delete_sys_enter:
2106 perf_evsel__delete_priv(sys_enter);
2110 static int trace__set_ev_qualifier_filter(struct trace *trace)
2113 struct perf_evsel *sys_exit;
2114 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
2115 trace->ev_qualifier_ids.nr,
2116 trace->ev_qualifier_ids.entries);
2121 if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
2123 sys_exit = trace->syscalls.events.sys_exit;
2124 err = perf_evsel__append_tp_filter(sys_exit, filter);
2135 static int trace__run(struct trace *trace, int argc, const char **argv)
2137 struct perf_evlist *evlist = trace->evlist;
2138 struct perf_evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
2140 unsigned long before;
2141 const bool forks = argc > 0;
2142 bool draining = false;
2146 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
2147 goto out_error_raw_syscalls;
2149 if (trace->trace_syscalls)
2150 trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
2152 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
2153 pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
2154 if (pgfault_maj == NULL)
2156 perf_evlist__add(evlist, pgfault_maj);
2159 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
2160 pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
2161 if (pgfault_min == NULL)
2163 perf_evlist__add(evlist, pgfault_min);
2167 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
2168 trace__sched_stat_runtime))
2169 goto out_error_sched_stat_runtime;
2171 err = perf_evlist__create_maps(evlist, &trace->opts.target);
2173 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
2174 goto out_delete_evlist;
2177 err = trace__symbols_init(trace, evlist);
2179 fprintf(trace->output, "Problems initializing symbol libraries!\n");
2180 goto out_delete_evlist;
2183 perf_evlist__config(evlist, &trace->opts, NULL);
2185 if (callchain_param.enabled) {
2186 bool use_identifier = false;
2188 if (trace->syscalls.events.sys_exit) {
2189 perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
2190 &trace->opts, &callchain_param);
2191 use_identifier = true;
2195 perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
2196 use_identifier = true;
2200 perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
2201 use_identifier = true;
2204 if (use_identifier) {
2206 * Now we have evsels with different sample_ids, use
2207 * PERF_SAMPLE_IDENTIFIER to map from sample to evsel
2208 * from a fixed position in each ring buffer record.
2210 * As of this the changeset introducing this comment, this
2211 * isn't strictly needed, as the fields that can come before
2212 * PERF_SAMPLE_ID are all used, but we'll probably disable
2213 * some of those for things like copying the payload of
2214 * pointer syscall arguments, and for vfs_getname we don't
2215 * need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
2216 * here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
2218 perf_evlist__set_sample_bit(evlist, IDENTIFIER);
2219 perf_evlist__reset_sample_bit(evlist, ID);
2223 signal(SIGCHLD, sig_handler);
2224 signal(SIGINT, sig_handler);
2227 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
2230 fprintf(trace->output, "Couldn't run the workload!\n");
2231 goto out_delete_evlist;
2235 err = perf_evlist__open(evlist);
2237 goto out_error_open;
2239 err = bpf__apply_obj_config();
2241 char errbuf[BUFSIZ];
2243 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
2244 pr_err("ERROR: Apply config to BPF failed: %s\n",
2246 goto out_error_open;
2250 * Better not use !target__has_task() here because we need to cover the
2251 * case where no threads were specified in the command line, but a
2252 * workload was, and in that case we will fill in the thread_map when
2253 * we fork the workload in perf_evlist__prepare_workload.
2255 if (trace->filter_pids.nr > 0)
2256 err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
2257 else if (thread_map__pid(evlist->threads, 0) == -1)
2258 err = perf_evlist__set_filter_pid(evlist, getpid());
2263 if (trace->ev_qualifier_ids.nr > 0) {
2264 err = trace__set_ev_qualifier_filter(trace);
2268 pr_debug("event qualifier tracepoint filter: %s\n",
2269 trace->syscalls.events.sys_exit->filter);
2272 err = perf_evlist__apply_filters(evlist, &evsel);
2274 goto out_error_apply_filters;
2276 err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
2278 goto out_error_mmap;
2280 if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
2281 perf_evlist__enable(evlist);
2284 perf_evlist__start_workload(evlist);
2286 if (trace->opts.initial_delay) {
2287 usleep(trace->opts.initial_delay * 1000);
2288 perf_evlist__enable(evlist);
2291 trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
2292 evlist->threads->nr > 1 ||
2293 perf_evlist__first(evlist)->attr.inherit;
2295 before = trace->nr_events;
2297 for (i = 0; i < evlist->nr_mmaps; i++) {
2298 union perf_event *event;
2300 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
2301 struct perf_sample sample;
2305 err = perf_evlist__parse_sample(evlist, event, &sample);
2307 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
2311 trace__handle_event(trace, event, &sample);
2313 perf_evlist__mmap_consume(evlist, i);
2318 if (done && !draining) {
2319 perf_evlist__disable(evlist);
2325 if (trace->nr_events == before) {
2326 int timeout = done ? 100 : -1;
2328 if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
2329 if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
2339 thread__zput(trace->current);
2341 perf_evlist__disable(evlist);
2345 trace__fprintf_thread_summary(trace, trace->output);
2347 if (trace->show_tool_stats) {
2348 fprintf(trace->output, "Stats:\n "
2349 " vfs_getname : %" PRIu64 "\n"
2350 " proc_getname: %" PRIu64 "\n",
2351 trace->stats.vfs_getname,
2352 trace->stats.proc_getname);
2357 perf_evlist__delete(evlist);
2358 trace->evlist = NULL;
2359 trace->live = false;
2362 char errbuf[BUFSIZ];
2364 out_error_sched_stat_runtime:
2365 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
2368 out_error_raw_syscalls:
2369 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
2373 perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
2377 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
2380 fprintf(trace->output, "%s\n", errbuf);
2381 goto out_delete_evlist;
2383 out_error_apply_filters:
2384 fprintf(trace->output,
2385 "Failed to set filter \"%s\" on event %s with %d (%s)\n",
2386 evsel->filter, perf_evsel__name(evsel), errno,
2387 str_error_r(errno, errbuf, sizeof(errbuf)));
2388 goto out_delete_evlist;
2391 fprintf(trace->output, "Not enough memory to run!\n");
2392 goto out_delete_evlist;
2395 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
2396 goto out_delete_evlist;
2399 static int trace__replay(struct trace *trace)
2401 const struct perf_evsel_str_handler handlers[] = {
2402 { "probe:vfs_getname", trace__vfs_getname, },
2404 struct perf_data_file file = {
2406 .mode = PERF_DATA_MODE_READ,
2407 .force = trace->force,
2409 struct perf_session *session;
2410 struct perf_evsel *evsel;
2413 trace->tool.sample = trace__process_sample;
2414 trace->tool.mmap = perf_event__process_mmap;
2415 trace->tool.mmap2 = perf_event__process_mmap2;
2416 trace->tool.comm = perf_event__process_comm;
2417 trace->tool.exit = perf_event__process_exit;
2418 trace->tool.fork = perf_event__process_fork;
2419 trace->tool.attr = perf_event__process_attr;
2420 trace->tool.tracing_data = perf_event__process_tracing_data;
2421 trace->tool.build_id = perf_event__process_build_id;
2422 trace->tool.namespaces = perf_event__process_namespaces;
2424 trace->tool.ordered_events = true;
2425 trace->tool.ordering_requires_timestamps = true;
2427 /* add tid to output */
2428 trace->multiple_threads = true;
2430 session = perf_session__new(&file, false, &trace->tool);
2431 if (session == NULL)
2434 if (trace->opts.target.pid)
2435 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
2437 if (trace->opts.target.tid)
2438 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
2440 if (symbol__init(&session->header.env) < 0)
2443 trace->host = &session->machines.host;
2445 err = perf_session__set_tracepoints_handlers(session, handlers);
2449 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2450 "raw_syscalls:sys_enter");
2451 /* older kernels have syscalls tp versus raw_syscalls */
2453 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2454 "syscalls:sys_enter");
2457 (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
2458 perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
2459 pr_err("Error during initialize raw_syscalls:sys_enter event\n");
2463 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2464 "raw_syscalls:sys_exit");
2466 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
2467 "syscalls:sys_exit");
2469 (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
2470 perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
2471 pr_err("Error during initialize raw_syscalls:sys_exit event\n");
2475 evlist__for_each_entry(session->evlist, evsel) {
2476 if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
2477 (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
2478 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
2479 evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
2480 evsel->handler = trace__pgfault;
2485 err = perf_session__process_events(session);
2487 pr_err("Failed to process events, error %d", err);
2489 else if (trace->summary)
2490 trace__fprintf_thread_summary(trace, trace->output);
2493 perf_session__delete(session);
2498 static size_t trace__fprintf_threads_header(FILE *fp)
2502 printed = fprintf(fp, "\n Summary of events:\n\n");
2507 DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
2508 struct stats *stats;
2513 struct int_node *source = rb_entry(nd, struct int_node, rb_node);
2514 struct stats *stats = source->priv;
2516 entry->syscall = source->i;
2517 entry->stats = stats;
2518 entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
2521 static size_t thread__dump_stats(struct thread_trace *ttrace,
2522 struct trace *trace, FILE *fp)
2527 DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
2529 if (syscall_stats == NULL)
2532 printed += fprintf(fp, "\n");
2534 printed += fprintf(fp, " syscall calls total min avg max stddev\n");
2535 printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
2536 printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
2538 resort_rb__for_each_entry(nd, syscall_stats) {
2539 struct stats *stats = syscall_stats_entry->stats;
2541 double min = (double)(stats->min) / NSEC_PER_MSEC;
2542 double max = (double)(stats->max) / NSEC_PER_MSEC;
2543 double avg = avg_stats(stats);
2545 u64 n = (u64) stats->n;
2547 pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
2548 avg /= NSEC_PER_MSEC;
2550 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
2551 printed += fprintf(fp, " %-15s", sc->name);
2552 printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
2553 n, syscall_stats_entry->msecs, min, avg);
2554 printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
2558 resort_rb__delete(syscall_stats);
2559 printed += fprintf(fp, "\n\n");
2564 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
2567 struct thread_trace *ttrace = thread__priv(thread);
2573 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
2575 printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
2576 printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
2577 printed += fprintf(fp, "%.1f%%", ratio);
2579 printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
2581 printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
2583 printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
2584 else if (fputc('\n', fp) != EOF)
2587 printed += thread__dump_stats(ttrace, trace, fp);
2592 static unsigned long thread__nr_events(struct thread_trace *ttrace)
2594 return ttrace ? ttrace->nr_events : 0;
2597 DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
2598 struct thread *thread;
2601 entry->thread = rb_entry(nd, struct thread, rb_node);
2604 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
2606 DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host);
2607 size_t printed = trace__fprintf_threads_header(fp);
2610 if (threads == NULL) {
2611 fprintf(fp, "%s", "Error sorting output by nr_events!\n");
2615 resort_rb__for_each_entry(nd, threads)
2616 printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
2618 resort_rb__delete(threads);
2623 static int trace__set_duration(const struct option *opt, const char *str,
2624 int unset __maybe_unused)
2626 struct trace *trace = opt->value;
2628 trace->duration_filter = atof(str);
2632 static int trace__set_filter_pids(const struct option *opt, const char *str,
2633 int unset __maybe_unused)
2637 struct trace *trace = opt->value;
2639 * FIXME: introduce a intarray class, plain parse csv and create a
2640 * { int nr, int entries[] } struct...
2642 struct intlist *list = intlist__new(str);
2647 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
2648 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
2650 if (trace->filter_pids.entries == NULL)
2653 trace->filter_pids.entries[0] = getpid();
2655 for (i = 1; i < trace->filter_pids.nr; ++i)
2656 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
2658 intlist__delete(list);
2664 static int trace__open_output(struct trace *trace, const char *filename)
2668 if (!stat(filename, &st) && st.st_size) {
2669 char oldname[PATH_MAX];
2671 scnprintf(oldname, sizeof(oldname), "%s.old", filename);
2673 rename(filename, oldname);
2676 trace->output = fopen(filename, "w");
2678 return trace->output == NULL ? -errno : 0;
2681 static int parse_pagefaults(const struct option *opt, const char *str,
2682 int unset __maybe_unused)
2684 int *trace_pgfaults = opt->value;
2686 if (strcmp(str, "all") == 0)
2687 *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
2688 else if (strcmp(str, "maj") == 0)
2689 *trace_pgfaults |= TRACE_PFMAJ;
2690 else if (strcmp(str, "min") == 0)
2691 *trace_pgfaults |= TRACE_PFMIN;
2698 static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
2700 struct perf_evsel *evsel;
2702 evlist__for_each_entry(evlist, evsel)
2703 evsel->handler = handler;
2707 * XXX: Hackish, just splitting the combined -e+--event (syscalls
2708 * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
2709 * existing facilities unchanged (trace->ev_qualifier + parse_options()).
2711 * It'd be better to introduce a parse_options() variant that would return a
2712 * list with the terms it didn't match to an event...
2714 static int trace__parse_events_option(const struct option *opt, const char *str,
2715 int unset __maybe_unused)
2717 struct trace *trace = (struct trace *)opt->value;
2718 const char *s = str;
2719 char *sep = NULL, *lists[2] = { NULL, NULL, };
2720 int len = strlen(str), err = -1, list;
2721 char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
2722 char group_name[PATH_MAX];
2724 if (strace_groups_dir == NULL)
2729 trace->not_ev_qualifier = true;
2733 if ((sep = strchr(s, ',')) != NULL)
2737 if (syscalltbl__id(trace->sctbl, s) >= 0) {
2740 path__join(group_name, sizeof(group_name), strace_groups_dir, s);
2741 if (access(group_name, R_OK) == 0)
2746 sprintf(lists[list] + strlen(lists[list]), ",%s", s);
2748 lists[list] = malloc(len);
2749 if (lists[list] == NULL)
2751 strcpy(lists[list], s);
2761 if (lists[1] != NULL) {
2762 struct strlist_config slist_config = {
2763 .dirname = strace_groups_dir,
2766 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
2767 if (trace->ev_qualifier == NULL) {
2768 fputs("Not enough memory to parse event qualifier", trace->output);
2772 if (trace__validate_ev_qualifier(trace))
2779 struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
2780 "event selector. use 'perf list' to list available events",
2781 parse_events_option);
2782 err = parse_events_option(&o, lists[0], 0);
2791 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
2793 const char *trace_usage[] = {
2794 "perf trace [<options>] [<command>]",
2795 "perf trace [<options>] -- <command> [<options>]",
2796 "perf trace record [<options>] [<command>]",
2797 "perf trace record [<options>] -- <command> [<options>]",
2800 struct trace trace = {
2809 .user_freq = UINT_MAX,
2810 .user_interval = ULLONG_MAX,
2811 .no_buffering = true,
2812 .mmap_pages = UINT_MAX,
2813 .proc_map_timeout = 500,
2817 .trace_syscalls = true,
2818 .kernel_syscallchains = false,
2819 .max_stack = UINT_MAX,
2821 const char *output_name = NULL;
2822 const struct option trace_options[] = {
2823 OPT_CALLBACK('e', "event", &trace, "event",
2824 "event/syscall selector. use 'perf list' to list available events",
2825 trace__parse_events_option),
2826 OPT_BOOLEAN(0, "comm", &trace.show_comm,
2827 "show the thread COMM next to its id"),
2828 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
2829 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
2830 trace__parse_events_option),
2831 OPT_STRING('o', "output", &output_name, "file", "output file name"),
2832 OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
2833 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
2834 "trace events on existing process id"),
2835 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
2836 "trace events on existing thread id"),
2837 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
2838 "pids to filter (by the kernel)", trace__set_filter_pids),
2839 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
2840 "system-wide collection from all CPUs"),
2841 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
2842 "list of cpus to monitor"),
2843 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
2844 "child tasks do not inherit counters"),
2845 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
2846 "number of mmap data pages",
2847 perf_evlist__parse_mmap_pages),
2848 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
2850 OPT_CALLBACK(0, "duration", &trace, "float",
2851 "show only events with duration > N.M ms",
2852 trace__set_duration),
2853 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
2854 OPT_INCR('v', "verbose", &verbose, "be more verbose"),
2855 OPT_BOOLEAN('T', "time", &trace.full_time,
2856 "Show full timestamp, not time relative to first start"),
2857 OPT_BOOLEAN('s', "summary", &trace.summary_only,
2858 "Show only syscall summary with statistics"),
2859 OPT_BOOLEAN('S', "with-summary", &trace.summary,
2860 "Show all syscalls and summary with statistics"),
2861 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
2862 "Trace pagefaults", parse_pagefaults, "maj"),
2863 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
2864 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
2865 OPT_CALLBACK(0, "call-graph", &trace.opts,
2866 "record_mode[,record_size]", record_callchain_help,
2867 &record_parse_callchain_opt),
2868 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
2869 "Show the kernel callchains on the syscall exit path"),
2870 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
2871 "Set the minimum stack depth when parsing the callchain, "
2872 "anything below the specified depth will be ignored."),
2873 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
2874 "Set the maximum stack depth when parsing the callchain, "
2875 "anything beyond the specified depth will be ignored. "
2876 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
2877 OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
2878 "per thread proc mmap processing timeout in ms"),
2879 OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
2880 "ms to wait before starting measurement after program "
2884 bool __maybe_unused max_stack_user_set = true;
2885 bool mmap_pages_user_set = true;
2886 const char * const trace_subcommands[] = { "record", NULL };
2890 signal(SIGSEGV, sighandler_dump_stack);
2891 signal(SIGFPE, sighandler_dump_stack);
2893 trace.evlist = perf_evlist__new();
2894 trace.sctbl = syscalltbl__new();
2896 if (trace.evlist == NULL || trace.sctbl == NULL) {
2897 pr_err("Not enough memory to run!\n");
2902 argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
2903 trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2905 err = bpf__setup_stdout(trace.evlist);
2907 bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
2908 pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
2914 if (trace.trace_pgfaults) {
2915 trace.opts.sample_address = true;
2916 trace.opts.sample_time = true;
2919 if (trace.opts.mmap_pages == UINT_MAX)
2920 mmap_pages_user_set = false;
2922 if (trace.max_stack == UINT_MAX) {
2923 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack;
2924 max_stack_user_set = false;
2927 #ifdef HAVE_DWARF_UNWIND_SUPPORT
2928 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
2929 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
2932 if (callchain_param.enabled) {
2933 if (!mmap_pages_user_set && geteuid() == 0)
2934 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
2936 symbol_conf.use_callchain = true;
2939 if (trace.evlist->nr_entries > 0)
2940 evlist__set_evsel_handler(trace.evlist, trace__event_handler);
2942 if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
2943 return trace__record(&trace, argc-1, &argv[1]);
2945 /* summary_only implies summary option, but don't overwrite summary if set */
2946 if (trace.summary_only)
2947 trace.summary = trace.summary_only;
2949 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
2950 trace.evlist->nr_entries == 0 /* Was --events used? */) {
2951 pr_err("Please specify something to trace.\n");
2955 if (!trace.trace_syscalls && trace.ev_qualifier) {
2956 pr_err("The -e option can't be used with --no-syscalls.\n");
2960 if (output_name != NULL) {
2961 err = trace__open_output(&trace, output_name);
2963 perror("failed to create output file");
2968 trace.open_id = syscalltbl__id(trace.sctbl, "open");
2970 err = target__validate(&trace.opts.target);
2972 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
2973 fprintf(trace.output, "%s", bf);
2977 err = target__parse_uid(&trace.opts.target);
2979 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
2980 fprintf(trace.output, "%s", bf);
2984 if (!argc && target__none(&trace.opts.target))
2985 trace.opts.target.system_wide = true;
2988 err = trace__replay(&trace);
2990 err = trace__run(&trace, argc, argv);
2993 if (output_name != NULL)
2994 fclose(trace.output);