4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
9 $ perf stat ~/hackbench 10
12 Performance counter stats for '/home/mingo/hackbench':
14 1255.538611 task clock ticks # 10.143 CPU utilization factor
15 54011 context switches # 0.043 M/sec
16 385 CPU migrations # 0.000 M/sec
17 17755 pagefaults # 0.014 M/sec
18 3808323185 CPU cycles # 3033.219 M/sec
19 1575111190 instructions # 1254.530 M/sec
20 17367895 cache references # 13.833 M/sec
21 7674421 cache misses # 6.112 M/sec
23 Wall-clock time elapsed: 123.786620 msecs
26 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
28 * Improvements and fixes by:
30 * Arjan van de Ven <arjan@linux.intel.com>
31 * Yanmin Zhang <yanmin.zhang@intel.com>
32 * Wu Fengguang <fengguang.wu@intel.com>
33 * Mike Galbraith <efault@gmx.de>
34 * Paul Mackerras <paulus@samba.org>
35 * Jaswinder Singh Rajput <jaswinder@kernel.org>
37 * Released under the GPL v2. (and only v2, not any later version)
42 #include "util/util.h"
43 #include "util/parse-options.h"
44 #include "util/parse-events.h"
45 #include "util/event.h"
46 #include "util/evsel.h"
47 #include "util/debug.h"
48 #include "util/header.h"
49 #include "util/cpumap.h"
50 #include "util/thread.h"
52 #include <sys/prctl.h>
56 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
58 #define DEFAULT_SEPARATOR " "
60 static struct perf_event_attr default_attrs[] = {
62 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
63 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
64 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
65 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
67 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
68 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
69 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
70 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
71 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES },
72 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES },
76 static bool system_wide = false;
77 static int nr_cpus = 0;
78 static int run_idx = 0;
80 static int run_count = 1;
81 static bool no_inherit = false;
82 static bool scale = true;
83 static bool no_aggr = false;
84 static pid_t target_pid = -1;
85 static pid_t target_tid = -1;
86 static pid_t *all_tids = NULL;
87 static int thread_num = 0;
88 static pid_t child_pid = -1;
89 static bool null_run = false;
90 static bool big_num = true;
91 static int big_num_opt = -1;
92 static const char *cpu_list;
93 static const char *csv_sep = NULL;
94 static bool csv_output = false;
102 static volatile int done = 0;
110 struct stats res_stats[3];
112 struct cpu_counts cpu_counts[];
115 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel, int ncpus)
117 size_t priv_size = (sizeof(struct perf_stat) +
118 (ncpus * sizeof(struct cpu_counts)));
119 evsel->priv = zalloc(priv_size);
120 return evsel->priv == NULL ? -ENOMEM : 0;
123 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
129 static void update_stats(struct stats *stats, u64 val)
134 delta = val - stats->mean;
135 stats->mean += delta / stats->n;
136 stats->M2 += delta*(val - stats->mean);
139 static double avg_stats(struct stats *stats)
145 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
147 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
148 * s^2 = -------------------------------
151 * http://en.wikipedia.org/wiki/Stddev
153 * The std dev of the mean is related to the std dev by:
160 static double stddev_stats(struct stats *stats)
162 double variance = stats->M2 / (stats->n - 1);
163 double variance_mean = variance / stats->n;
165 return sqrt(variance_mean);
168 struct stats runtime_nsecs_stats[MAX_NR_CPUS];
169 struct stats runtime_cycles_stats[MAX_NR_CPUS];
170 struct stats runtime_branches_stats[MAX_NR_CPUS];
171 struct stats walltime_nsecs_stats;
173 #define MATCH_EVENT(t, c, evsel) \
174 (evsel->attr.type == PERF_TYPE_##t && \
175 evsel->attr.config == PERF_COUNT_##c)
177 #define ERR_PERF_OPEN \
178 "counter %d, sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information."
180 static int create_perf_stat_counter(struct perf_evsel *evsel, bool *perm_err)
182 struct perf_event_attr *attr = &evsel->attr;
187 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
188 PERF_FORMAT_TOTAL_TIME_RUNNING;
193 for (cpu = 0; cpu < nr_cpus; cpu++) {
194 FD(evsel, cpu, 0) = sys_perf_event_open(attr,
195 -1, cpumap[cpu], -1, 0);
196 if (FD(evsel, cpu, 0) < 0) {
197 if (errno == EPERM || errno == EACCES)
199 error(ERR_PERF_OPEN, evsel->idx,
200 FD(evsel, cpu, 0), strerror(errno));
206 attr->inherit = !no_inherit;
207 if (target_pid == -1 && target_tid == -1) {
209 attr->enable_on_exec = 1;
211 for (thread = 0; thread < thread_num; thread++) {
212 FD(evsel, 0, thread) = sys_perf_event_open(attr,
213 all_tids[thread], -1, -1, 0);
214 if (FD(evsel, 0, thread) < 0) {
215 if (errno == EPERM || errno == EACCES)
217 error(ERR_PERF_OPEN, evsel->idx,
218 FD(evsel, 0, thread),
230 * Does the counter have nsecs as a unit?
232 static inline int nsec_counter(struct perf_evsel *counter)
234 if (MATCH_EVENT(SOFTWARE, SW_CPU_CLOCK, counter) ||
235 MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
242 * Read out the results of a single counter:
243 * aggregate counts across CPUs in system-wide mode
245 static void read_counter_aggr(struct perf_evsel *counter)
247 struct perf_stat *ps = counter->priv;
248 u64 count[3], single_count[3];
254 count[0] = count[1] = count[2] = 0;
257 for (cpu = 0; cpu < nr_cpus; cpu++) {
258 for (thread = 0; thread < thread_num; thread++) {
259 if (FD(counter, cpu, thread) < 0)
262 res = read(FD(counter, cpu, thread),
263 single_count, nv * sizeof(u64));
264 assert(res == nv * sizeof(u64));
266 close(FD(counter, cpu, thread));
267 FD(counter, cpu, thread) = -1;
269 count[0] += single_count[0];
271 count[1] += single_count[1];
272 count[2] += single_count[2];
285 if (count[2] < count[1]) {
287 count[0] = (unsigned long long)
288 ((double)count[0] * count[1] / count[2] + 0.5);
292 for (i = 0; i < 3; i++)
293 update_stats(&ps->res_stats[i], count[i]);
296 fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
297 count[0], count[1], count[2]);
301 * Save the full runtime - to allow normalization during printout:
303 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
304 update_stats(&runtime_nsecs_stats[0], count[0]);
305 if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
306 update_stats(&runtime_cycles_stats[0], count[0]);
307 if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
308 update_stats(&runtime_branches_stats[0], count[0]);
312 * Read out the results of a single counter:
313 * do not aggregate counts across CPUs in system-wide mode
315 static void read_counter(struct perf_evsel *counter)
317 struct cpu_counts *cpu_counts = counter->priv;
322 count[0] = count[1] = count[2] = 0;
326 for (cpu = 0; cpu < nr_cpus; cpu++) {
328 if (FD(counter, cpu, 0) < 0)
331 res = read(FD(counter, cpu, 0), count, nv * sizeof(u64));
333 assert(res == nv * sizeof(u64));
335 close(FD(counter, cpu, 0));
336 FD(counter, cpu, 0) = -1;
341 } else if (count[2] < count[1]) {
342 count[0] = (unsigned long long)
343 ((double)count[0] * count[1] / count[2] + 0.5);
346 cpu_counts[cpu].val = count[0]; /* scaled count */
347 cpu_counts[cpu].ena = count[1];
348 cpu_counts[cpu].run = count[2];
350 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
351 update_stats(&runtime_nsecs_stats[cpu], count[0]);
352 if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
353 update_stats(&runtime_cycles_stats[cpu], count[0]);
354 if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
355 update_stats(&runtime_branches_stats[cpu], count[0]);
359 static int run_perf_stat(int argc __used, const char **argv)
361 unsigned long long t0, t1;
362 struct perf_evsel *counter;
365 int child_ready_pipe[2], go_pipe[2];
366 bool perm_err = false;
367 const bool forks = (argc > 0);
373 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
374 perror("failed to create pipes");
379 if ((child_pid = fork()) < 0)
380 perror("failed to fork");
383 close(child_ready_pipe[0]);
385 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
388 * Do a dummy execvp to get the PLT entry resolved,
389 * so we avoid the resolver overhead on the real
392 execvp("", (char **)argv);
395 * Tell the parent we're ready to go
397 close(child_ready_pipe[1]);
400 * Wait until the parent tells us to go.
402 if (read(go_pipe[0], &buf, 1) == -1)
403 perror("unable to read pipe");
405 execvp(argv[0], (char **)argv);
411 if (target_tid == -1 && target_pid == -1 && !system_wide)
412 all_tids[0] = child_pid;
415 * Wait for the child to be ready to exec.
417 close(child_ready_pipe[1]);
419 if (read(child_ready_pipe[0], &buf, 1) == -1)
420 perror("unable to read pipe");
421 close(child_ready_pipe[0]);
424 list_for_each_entry(counter, &evsel_list, node)
425 ncreated += create_perf_stat_counter(counter, &perm_err);
427 if (ncreated < nr_counters) {
429 error("You may not have permission to collect %sstats.\n"
430 "\t Consider tweaking"
431 " /proc/sys/kernel/perf_event_paranoid or running as root.",
432 system_wide ? "system-wide " : "");
433 die("Not all events could be opened.\n");
435 kill(child_pid, SIGTERM);
440 * Enable counters and exec the command:
448 while(!done) sleep(1);
453 update_stats(&walltime_nsecs_stats, t1 - t0);
456 list_for_each_entry(counter, &evsel_list, node)
457 read_counter(counter);
459 list_for_each_entry(counter, &evsel_list, node)
460 read_counter_aggr(counter);
462 return WEXITSTATUS(status);
465 static void print_noise(struct perf_evsel *evsel, double avg)
467 struct perf_stat *ps;
473 fprintf(stderr, " ( +- %7.3f%% )",
474 100 * stddev_stats(&ps->res_stats[0]) / avg);
477 static void nsec_printout(int cpu, struct perf_evsel *counter, double avg)
479 double msecs = avg / 1e6;
480 char cpustr[16] = { '\0', };
481 const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-24s";
484 sprintf(cpustr, "CPU%*d%s",
486 cpumap[cpu], csv_sep);
488 fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(counter));
493 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) {
494 fprintf(stderr, " # %10.3f CPUs ",
495 avg / avg_stats(&walltime_nsecs_stats));
499 static void abs_printout(int cpu, struct perf_evsel *counter, double avg)
501 double total, ratio = 0.0;
502 char cpustr[16] = { '\0', };
508 fmt = "%s%'18.0f%s%-24s";
510 fmt = "%s%18.0f%s%-24s";
513 sprintf(cpustr, "CPU%*d%s",
515 cpumap[cpu], csv_sep);
519 fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(counter));
524 if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
525 total = avg_stats(&runtime_cycles_stats[cpu]);
530 fprintf(stderr, " # %10.3f IPC ", ratio);
531 } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter) &&
532 runtime_branches_stats[cpu].n != 0) {
533 total = avg_stats(&runtime_branches_stats[cpu]);
536 ratio = avg * 100 / total;
538 fprintf(stderr, " # %10.3f %% ", ratio);
540 } else if (runtime_nsecs_stats[cpu].n != 0) {
541 total = avg_stats(&runtime_nsecs_stats[cpu]);
544 ratio = 1000.0 * avg / total;
546 fprintf(stderr, " # %10.3f M/sec", ratio);
551 * Print out the results of a single counter:
552 * aggregated counts in system-wide mode
554 static void print_counter_aggr(struct perf_evsel *counter)
556 struct perf_stat *ps = counter->priv;
557 double avg = avg_stats(&ps->res_stats[0]);
558 int scaled = ps->scaled;
561 fprintf(stderr, "%*s%s%-24s\n",
563 "<not counted>", csv_sep, event_name(counter));
567 if (nsec_counter(counter))
568 nsec_printout(-1, counter, avg);
570 abs_printout(-1, counter, avg);
577 print_noise(counter, avg);
580 double avg_enabled, avg_running;
582 avg_enabled = avg_stats(&ps->res_stats[1]);
583 avg_running = avg_stats(&ps->res_stats[2]);
585 fprintf(stderr, " (scaled from %.2f%%)",
586 100 * avg_running / avg_enabled);
589 fprintf(stderr, "\n");
593 * Print out the results of a single counter:
594 * does not use aggregated count in system-wide
596 static void print_counter(struct perf_evsel *counter)
598 struct perf_stat *ps = counter->priv;
602 for (cpu = 0; cpu < nr_cpus; cpu++) {
603 val = ps->cpu_counts[cpu].val;
604 ena = ps->cpu_counts[cpu].ena;
605 run = ps->cpu_counts[cpu].run;
606 if (run == 0 || ena == 0) {
607 fprintf(stderr, "CPU%*d%s%*s%s%-24s",
609 cpumap[cpu], csv_sep,
611 "<not counted>", csv_sep,
612 event_name(counter));
614 fprintf(stderr, "\n");
618 if (nsec_counter(counter))
619 nsec_printout(cpu, counter, val);
621 abs_printout(cpu, counter, val);
624 print_noise(counter, 1.0);
627 fprintf(stderr, " (scaled from %.2f%%)",
631 fprintf(stderr, "\n");
635 static void print_stat(int argc, const char **argv)
637 struct perf_evsel *counter;
643 fprintf(stderr, "\n");
644 fprintf(stderr, " Performance counter stats for ");
645 if(target_pid == -1 && target_tid == -1) {
646 fprintf(stderr, "\'%s", argv[0]);
647 for (i = 1; i < argc; i++)
648 fprintf(stderr, " %s", argv[i]);
649 } else if (target_pid != -1)
650 fprintf(stderr, "process id \'%d", target_pid);
652 fprintf(stderr, "thread id \'%d", target_tid);
654 fprintf(stderr, "\'");
656 fprintf(stderr, " (%d runs)", run_count);
657 fprintf(stderr, ":\n\n");
661 list_for_each_entry(counter, &evsel_list, node)
662 print_counter(counter);
664 list_for_each_entry(counter, &evsel_list, node)
665 print_counter_aggr(counter);
669 fprintf(stderr, "\n");
670 fprintf(stderr, " %18.9f seconds time elapsed",
671 avg_stats(&walltime_nsecs_stats)/1e9);
673 fprintf(stderr, " ( +- %7.3f%% )",
674 100*stddev_stats(&walltime_nsecs_stats) /
675 avg_stats(&walltime_nsecs_stats));
677 fprintf(stderr, "\n\n");
681 static volatile int signr = -1;
683 static void skip_signal(int signo)
691 static void sig_atexit(void)
694 kill(child_pid, SIGTERM);
699 signal(signr, SIG_DFL);
700 kill(getpid(), signr);
703 static const char * const stat_usage[] = {
704 "perf stat [<options>] [<command>]",
708 static int stat__set_big_num(const struct option *opt __used,
709 const char *s __used, int unset)
711 big_num_opt = unset ? 0 : 1;
715 static const struct option options[] = {
716 OPT_CALLBACK('e', "event", NULL, "event",
717 "event selector. use 'perf list' to list available events",
719 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
720 "child tasks do not inherit counters"),
721 OPT_INTEGER('p', "pid", &target_pid,
722 "stat events on existing process id"),
723 OPT_INTEGER('t', "tid", &target_tid,
724 "stat events on existing thread id"),
725 OPT_BOOLEAN('a', "all-cpus", &system_wide,
726 "system-wide collection from all CPUs"),
727 OPT_BOOLEAN('c', "scale", &scale,
728 "scale/normalize counters"),
729 OPT_INCR('v', "verbose", &verbose,
730 "be more verbose (show counter open errors, etc)"),
731 OPT_INTEGER('r', "repeat", &run_count,
732 "repeat command and print average + stddev (max: 100)"),
733 OPT_BOOLEAN('n', "null", &null_run,
734 "null run - dont start any counters"),
735 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
736 "print large numbers with thousands\' separators",
738 OPT_STRING('C', "cpu", &cpu_list, "cpu",
739 "list of cpus to monitor in system-wide"),
740 OPT_BOOLEAN('A', "no-aggr", &no_aggr,
741 "disable CPU count aggregation"),
742 OPT_STRING('x', "field-separator", &csv_sep, "separator",
743 "print counts with custom separator"),
747 int cmd_stat(int argc, const char **argv, const char *prefix __used)
749 struct perf_evsel *pos;
750 int status = -ENOMEM;
752 setlocale(LC_ALL, "");
754 argc = parse_options(argc, argv, options, stat_usage,
755 PARSE_OPT_STOP_AT_NON_OPTION);
760 csv_sep = DEFAULT_SEPARATOR;
763 * let the spreadsheet do the pretty-printing
766 /* User explicitely passed -B? */
767 if (big_num_opt == 1) {
768 fprintf(stderr, "-B option not supported with -x\n");
769 usage_with_options(stat_usage, options);
770 } else /* Nope, so disable big number formatting */
772 } else if (big_num_opt == 0) /* User passed --no-big-num */
775 if (!argc && target_pid == -1 && target_tid == -1)
776 usage_with_options(stat_usage, options);
778 usage_with_options(stat_usage, options);
780 /* no_aggr is for system-wide only */
781 if (no_aggr && !system_wide)
782 usage_with_options(stat_usage, options);
784 /* Set attrs and nr_counters if no event is selected and !null_run */
785 if (!null_run && !nr_counters) {
788 nr_counters = ARRAY_SIZE(default_attrs);
790 for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
791 pos = perf_evsel__new(default_attrs[c].type,
792 default_attrs[c].config,
796 list_add(&pos->node, &evsel_list);
801 nr_cpus = read_cpu_map(cpu_list);
806 usage_with_options(stat_usage, options);
808 if (target_pid != -1) {
809 target_tid = target_pid;
810 thread_num = find_all_tid(target_pid, &all_tids);
811 if (thread_num <= 0) {
812 fprintf(stderr, "Can't find all threads of pid %d\n",
814 usage_with_options(stat_usage, options);
817 all_tids=malloc(sizeof(pid_t));
821 all_tids[0] = target_tid;
825 list_for_each_entry(pos, &evsel_list, node) {
826 if (perf_evsel__alloc_stat_priv(pos, nr_cpus) < 0 ||
827 perf_evsel__alloc_fd(pos, nr_cpus, thread_num) < 0)
832 * We dont want to block the signals - that would cause
833 * child tasks to inherit that and Ctrl-C would not work.
834 * What we want is for Ctrl-C to work in the exec()-ed
835 * task, but being ignored by perf stat itself:
838 signal(SIGINT, skip_signal);
839 signal(SIGALRM, skip_signal);
840 signal(SIGABRT, skip_signal);
843 for (run_idx = 0; run_idx < run_count; run_idx++) {
844 if (run_count != 1 && verbose)
845 fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
846 status = run_perf_stat(argc, argv);
850 print_stat(argc, argv);
852 list_for_each_entry(pos, &evsel_list, node) {
853 perf_evsel__free_fd(pos);
854 perf_evsel__free_stat_priv(pos);