]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/builtin-stat.c
perf tools: Introduce event selectors
[karo-tx-linux.git] / tools / perf / builtin-stat.c
1 /*
2  * builtin-stat.c
3  *
4  * Builtin stat command: Give a precise performance counters summary
5  * overview about any workload, CPU or specific PID.
6  *
7  * Sample output:
8
9    $ perf stat ~/hackbench 10
10    Time: 0.104
11
12     Performance counter stats for '/home/mingo/hackbench':
13
14        1255.538611  task clock ticks     #      10.143 CPU utilization factor
15              54011  context switches     #       0.043 M/sec
16                385  CPU migrations       #       0.000 M/sec
17              17755  pagefaults           #       0.014 M/sec
18         3808323185  CPU cycles           #    3033.219 M/sec
19         1575111190  instructions         #    1254.530 M/sec
20           17367895  cache references     #      13.833 M/sec
21            7674421  cache misses         #       6.112 M/sec
22
23     Wall-clock time elapsed:   123.786620 msecs
24
25  *
26  * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
27  *
28  * Improvements and fixes by:
29  *
30  *   Arjan van de Ven <arjan@linux.intel.com>
31  *   Yanmin Zhang <yanmin.zhang@intel.com>
32  *   Wu Fengguang <fengguang.wu@intel.com>
33  *   Mike Galbraith <efault@gmx.de>
34  *   Paul Mackerras <paulus@samba.org>
35  *   Jaswinder Singh Rajput <jaswinder@kernel.org>
36  *
37  * Released under the GPL v2. (and only v2, not any later version)
38  */
39
40 #include "perf.h"
41 #include "builtin.h"
42 #include "util/util.h"
43 #include "util/parse-options.h"
44 #include "util/parse-events.h"
45 #include "util/event.h"
46 #include "util/evsel.h"
47 #include "util/debug.h"
48 #include "util/header.h"
49 #include "util/cpumap.h"
50 #include "util/thread.h"
51
52 #include <sys/prctl.h>
53 #include <math.h>
54 #include <locale.h>
55
56 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
57
58 #define DEFAULT_SEPARATOR       " "
59
60 static struct perf_event_attr default_attrs[] = {
61
62   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK              },
63   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES        },
64   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS          },
65   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS             },
66
67   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES              },
68   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS            },
69   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS     },
70   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES           },
71   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES        },
72   { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES            },
73
74 };
75
76 static bool                     system_wide                     =  false;
77 static int                      nr_cpus                         =  0;
78 static int                      run_idx                         =  0;
79
80 static int                      run_count                       =  1;
81 static bool                     no_inherit                      = false;
82 static bool                     scale                           =  true;
83 static bool                     no_aggr                         = false;
84 static pid_t                    target_pid                      = -1;
85 static pid_t                    target_tid                      = -1;
86 static pid_t                    *all_tids                       =  NULL;
87 static int                      thread_num                      =  0;
88 static pid_t                    child_pid                       = -1;
89 static bool                     null_run                        =  false;
90 static bool                     big_num                         =  true;
91 static int                      big_num_opt                     =  -1;
92 static const char               *cpu_list;
93 static const char               *csv_sep                        = NULL;
94 static bool                     csv_output                      = false;
95
96 struct cpu_counts {
97         u64 val;
98         u64 ena;
99         u64 run;
100 };
101
102 static volatile int done = 0;
103
104 struct stats
105 {
106         double n, mean, M2;
107 };
108
109 struct perf_stat {
110         struct stats      res_stats[3];
111         int               scaled;
112         struct cpu_counts cpu_counts[];
113 };
114
115 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel, int ncpus)
116 {
117         size_t priv_size = (sizeof(struct perf_stat) +
118                             (ncpus * sizeof(struct cpu_counts)));
119         evsel->priv = zalloc(priv_size);
120         return evsel->priv == NULL ? -ENOMEM : 0;
121 }
122
123 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
124 {
125         free(evsel->priv);
126         evsel->priv = NULL;
127 }
128
129 static void update_stats(struct stats *stats, u64 val)
130 {
131         double delta;
132
133         stats->n++;
134         delta = val - stats->mean;
135         stats->mean += delta / stats->n;
136         stats->M2 += delta*(val - stats->mean);
137 }
138
139 static double avg_stats(struct stats *stats)
140 {
141         return stats->mean;
142 }
143
144 /*
145  * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
146  *
147  *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
148  * s^2 = -------------------------------
149  *                  n - 1
150  *
151  * http://en.wikipedia.org/wiki/Stddev
152  *
153  * The std dev of the mean is related to the std dev by:
154  *
155  *             s
156  * s_mean = -------
157  *          sqrt(n)
158  *
159  */
160 static double stddev_stats(struct stats *stats)
161 {
162         double variance = stats->M2 / (stats->n - 1);
163         double variance_mean = variance / stats->n;
164
165         return sqrt(variance_mean);
166 }
167
168 struct stats                    runtime_nsecs_stats[MAX_NR_CPUS];
169 struct stats                    runtime_cycles_stats[MAX_NR_CPUS];
170 struct stats                    runtime_branches_stats[MAX_NR_CPUS];
171 struct stats                    walltime_nsecs_stats;
172
173 #define MATCH_EVENT(t, c, evsel)                        \
174         (evsel->attr.type == PERF_TYPE_##t &&   \
175          evsel->attr.config == PERF_COUNT_##c)
176
177 #define ERR_PERF_OPEN \
178 "counter %d, sys_perf_event_open() syscall returned with %d (%s).  /bin/dmesg may provide additional information."
179
180 static int create_perf_stat_counter(struct perf_evsel *evsel, bool *perm_err)
181 {
182         struct perf_event_attr *attr = &evsel->attr;
183         int thread;
184         int ncreated = 0;
185
186         if (scale)
187                 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
188                                     PERF_FORMAT_TOTAL_TIME_RUNNING;
189
190         if (system_wide) {
191                 int cpu;
192
193                 for (cpu = 0; cpu < nr_cpus; cpu++) {
194                         FD(evsel, cpu, 0) = sys_perf_event_open(attr,
195                                         -1, cpumap[cpu], -1, 0);
196                         if (FD(evsel, cpu, 0) < 0) {
197                                 if (errno == EPERM || errno == EACCES)
198                                         *perm_err = true;
199                                 error(ERR_PERF_OPEN, evsel->idx,
200                                         FD(evsel, cpu, 0), strerror(errno));
201                         } else {
202                                 ++ncreated;
203                         }
204                 }
205         } else {
206                 attr->inherit = !no_inherit;
207                 if (target_pid == -1 && target_tid == -1) {
208                         attr->disabled = 1;
209                         attr->enable_on_exec = 1;
210                 }
211                 for (thread = 0; thread < thread_num; thread++) {
212                         FD(evsel, 0, thread) = sys_perf_event_open(attr,
213                                 all_tids[thread], -1, -1, 0);
214                         if (FD(evsel, 0, thread) < 0) {
215                                 if (errno == EPERM || errno == EACCES)
216                                         *perm_err = true;
217                                 error(ERR_PERF_OPEN, evsel->idx,
218                                         FD(evsel, 0, thread),
219                                          strerror(errno));
220                         } else {
221                                 ++ncreated;
222                         }
223                 }
224         }
225
226         return ncreated;
227 }
228
229 /*
230  * Does the counter have nsecs as a unit?
231  */
232 static inline int nsec_counter(struct perf_evsel *counter)
233 {
234         if (MATCH_EVENT(SOFTWARE, SW_CPU_CLOCK, counter) ||
235             MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
236                 return 1;
237
238         return 0;
239 }
240
241 /*
242  * Read out the results of a single counter:
243  * aggregate counts across CPUs in system-wide mode
244  */
245 static void read_counter_aggr(struct perf_evsel *counter)
246 {
247         struct perf_stat *ps = counter->priv;
248         u64 count[3], single_count[3];
249         int cpu;
250         size_t res, nv;
251         int scaled;
252         int i, thread;
253
254         count[0] = count[1] = count[2] = 0;
255
256         nv = scale ? 3 : 1;
257         for (cpu = 0; cpu < nr_cpus; cpu++) {
258                 for (thread = 0; thread < thread_num; thread++) {
259                         if (FD(counter, cpu, thread) < 0)
260                                 continue;
261
262                         res = read(FD(counter, cpu, thread),
263                                         single_count, nv * sizeof(u64));
264                         assert(res == nv * sizeof(u64));
265
266                         close(FD(counter, cpu, thread));
267                         FD(counter, cpu, thread) = -1;
268
269                         count[0] += single_count[0];
270                         if (scale) {
271                                 count[1] += single_count[1];
272                                 count[2] += single_count[2];
273                         }
274                 }
275         }
276
277         scaled = 0;
278         if (scale) {
279                 if (count[2] == 0) {
280                         ps->scaled = -1;
281                         count[0] = 0;
282                         return;
283                 }
284
285                 if (count[2] < count[1]) {
286                         ps->scaled = 1;
287                         count[0] = (unsigned long long)
288                                 ((double)count[0] * count[1] / count[2] + 0.5);
289                 }
290         }
291
292         for (i = 0; i < 3; i++)
293                 update_stats(&ps->res_stats[i], count[i]);
294
295         if (verbose) {
296                 fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
297                                 count[0], count[1], count[2]);
298         }
299
300         /*
301          * Save the full runtime - to allow normalization during printout:
302          */
303         if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
304                 update_stats(&runtime_nsecs_stats[0], count[0]);
305         if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
306                 update_stats(&runtime_cycles_stats[0], count[0]);
307         if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
308                 update_stats(&runtime_branches_stats[0], count[0]);
309 }
310
311 /*
312  * Read out the results of a single counter:
313  * do not aggregate counts across CPUs in system-wide mode
314  */
315 static void read_counter(struct perf_evsel *counter)
316 {
317         struct cpu_counts *cpu_counts = counter->priv;
318         u64 count[3];
319         int cpu;
320         size_t res, nv;
321
322         count[0] = count[1] = count[2] = 0;
323
324         nv = scale ? 3 : 1;
325
326         for (cpu = 0; cpu < nr_cpus; cpu++) {
327
328                 if (FD(counter, cpu, 0) < 0)
329                         continue;
330
331                 res = read(FD(counter, cpu, 0), count, nv * sizeof(u64));
332
333                 assert(res == nv * sizeof(u64));
334
335                 close(FD(counter, cpu, 0));
336                 FD(counter, cpu, 0) = -1;
337
338                 if (scale) {
339                         if (count[2] == 0) {
340                                 count[0] = 0;
341                         } else if (count[2] < count[1]) {
342                                 count[0] = (unsigned long long)
343                                 ((double)count[0] * count[1] / count[2] + 0.5);
344                         }
345                 }
346                 cpu_counts[cpu].val = count[0]; /* scaled count */
347                 cpu_counts[cpu].ena = count[1];
348                 cpu_counts[cpu].run = count[2];
349
350                 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
351                         update_stats(&runtime_nsecs_stats[cpu], count[0]);
352                 if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
353                         update_stats(&runtime_cycles_stats[cpu], count[0]);
354                 if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
355                         update_stats(&runtime_branches_stats[cpu], count[0]);
356         }
357 }
358
359 static int run_perf_stat(int argc __used, const char **argv)
360 {
361         unsigned long long t0, t1;
362         struct perf_evsel *counter;
363         int status = 0;
364         int ncreated = 0;
365         int child_ready_pipe[2], go_pipe[2];
366         bool perm_err = false;
367         const bool forks = (argc > 0);
368         char buf;
369
370         if (!system_wide)
371                 nr_cpus = 1;
372
373         if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
374                 perror("failed to create pipes");
375                 exit(1);
376         }
377
378         if (forks) {
379                 if ((child_pid = fork()) < 0)
380                         perror("failed to fork");
381
382                 if (!child_pid) {
383                         close(child_ready_pipe[0]);
384                         close(go_pipe[1]);
385                         fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
386
387                         /*
388                          * Do a dummy execvp to get the PLT entry resolved,
389                          * so we avoid the resolver overhead on the real
390                          * execvp call.
391                          */
392                         execvp("", (char **)argv);
393
394                         /*
395                          * Tell the parent we're ready to go
396                          */
397                         close(child_ready_pipe[1]);
398
399                         /*
400                          * Wait until the parent tells us to go.
401                          */
402                         if (read(go_pipe[0], &buf, 1) == -1)
403                                 perror("unable to read pipe");
404
405                         execvp(argv[0], (char **)argv);
406
407                         perror(argv[0]);
408                         exit(-1);
409                 }
410
411                 if (target_tid == -1 && target_pid == -1 && !system_wide)
412                         all_tids[0] = child_pid;
413
414                 /*
415                  * Wait for the child to be ready to exec.
416                  */
417                 close(child_ready_pipe[1]);
418                 close(go_pipe[0]);
419                 if (read(child_ready_pipe[0], &buf, 1) == -1)
420                         perror("unable to read pipe");
421                 close(child_ready_pipe[0]);
422         }
423
424         list_for_each_entry(counter, &evsel_list, node)
425                 ncreated += create_perf_stat_counter(counter, &perm_err);
426
427         if (ncreated < nr_counters) {
428                 if (perm_err)
429                         error("You may not have permission to collect %sstats.\n"
430                               "\t Consider tweaking"
431                               " /proc/sys/kernel/perf_event_paranoid or running as root.",
432                               system_wide ? "system-wide " : "");
433                 die("Not all events could be opened.\n");
434                 if (child_pid != -1)
435                         kill(child_pid, SIGTERM);
436                 return -1;
437         }
438
439         /*
440          * Enable counters and exec the command:
441          */
442         t0 = rdclock();
443
444         if (forks) {
445                 close(go_pipe[1]);
446                 wait(&status);
447         } else {
448                 while(!done) sleep(1);
449         }
450
451         t1 = rdclock();
452
453         update_stats(&walltime_nsecs_stats, t1 - t0);
454
455         if (no_aggr) {
456                 list_for_each_entry(counter, &evsel_list, node)
457                         read_counter(counter);
458         } else {
459                 list_for_each_entry(counter, &evsel_list, node)
460                         read_counter_aggr(counter);
461         }
462         return WEXITSTATUS(status);
463 }
464
465 static void print_noise(struct perf_evsel *evsel, double avg)
466 {
467         struct perf_stat *ps;
468
469         if (run_count == 1)
470                 return;
471
472         ps = evsel->priv;
473         fprintf(stderr, "   ( +- %7.3f%% )",
474                         100 * stddev_stats(&ps->res_stats[0]) / avg);
475 }
476
477 static void nsec_printout(int cpu, struct perf_evsel *counter, double avg)
478 {
479         double msecs = avg / 1e6;
480         char cpustr[16] = { '\0', };
481         const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-24s";
482
483         if (no_aggr)
484                 sprintf(cpustr, "CPU%*d%s",
485                         csv_output ? 0 : -4,
486                         cpumap[cpu], csv_sep);
487
488         fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(counter));
489
490         if (csv_output)
491                 return;
492
493         if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) {
494                 fprintf(stderr, " # %10.3f CPUs ",
495                                 avg / avg_stats(&walltime_nsecs_stats));
496         }
497 }
498
499 static void abs_printout(int cpu, struct perf_evsel *counter, double avg)
500 {
501         double total, ratio = 0.0;
502         char cpustr[16] = { '\0', };
503         const char *fmt;
504
505         if (csv_output)
506                 fmt = "%s%.0f%s%s";
507         else if (big_num)
508                 fmt = "%s%'18.0f%s%-24s";
509         else
510                 fmt = "%s%18.0f%s%-24s";
511
512         if (no_aggr)
513                 sprintf(cpustr, "CPU%*d%s",
514                         csv_output ? 0 : -4,
515                         cpumap[cpu], csv_sep);
516         else
517                 cpu = 0;
518
519         fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(counter));
520
521         if (csv_output)
522                 return;
523
524         if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
525                 total = avg_stats(&runtime_cycles_stats[cpu]);
526
527                 if (total)
528                         ratio = avg / total;
529
530                 fprintf(stderr, " # %10.3f IPC  ", ratio);
531         } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter) &&
532                         runtime_branches_stats[cpu].n != 0) {
533                 total = avg_stats(&runtime_branches_stats[cpu]);
534
535                 if (total)
536                         ratio = avg * 100 / total;
537
538                 fprintf(stderr, " # %10.3f %%    ", ratio);
539
540         } else if (runtime_nsecs_stats[cpu].n != 0) {
541                 total = avg_stats(&runtime_nsecs_stats[cpu]);
542
543                 if (total)
544                         ratio = 1000.0 * avg / total;
545
546                 fprintf(stderr, " # %10.3f M/sec", ratio);
547         }
548 }
549
550 /*
551  * Print out the results of a single counter:
552  * aggregated counts in system-wide mode
553  */
554 static void print_counter_aggr(struct perf_evsel *counter)
555 {
556         struct perf_stat *ps = counter->priv;
557         double avg = avg_stats(&ps->res_stats[0]);
558         int scaled = ps->scaled;
559
560         if (scaled == -1) {
561                 fprintf(stderr, "%*s%s%-24s\n",
562                         csv_output ? 0 : 18,
563                         "<not counted>", csv_sep, event_name(counter));
564                 return;
565         }
566
567         if (nsec_counter(counter))
568                 nsec_printout(-1, counter, avg);
569         else
570                 abs_printout(-1, counter, avg);
571
572         if (csv_output) {
573                 fputc('\n', stderr);
574                 return;
575         }
576
577         print_noise(counter, avg);
578
579         if (scaled) {
580                 double avg_enabled, avg_running;
581
582                 avg_enabled = avg_stats(&ps->res_stats[1]);
583                 avg_running = avg_stats(&ps->res_stats[2]);
584
585                 fprintf(stderr, "  (scaled from %.2f%%)",
586                                 100 * avg_running / avg_enabled);
587         }
588
589         fprintf(stderr, "\n");
590 }
591
592 /*
593  * Print out the results of a single counter:
594  * does not use aggregated count in system-wide
595  */
596 static void print_counter(struct perf_evsel *counter)
597 {
598         struct perf_stat *ps = counter->priv;
599         u64 ena, run, val;
600         int cpu;
601
602         for (cpu = 0; cpu < nr_cpus; cpu++) {
603                 val = ps->cpu_counts[cpu].val;
604                 ena = ps->cpu_counts[cpu].ena;
605                 run = ps->cpu_counts[cpu].run;
606                 if (run == 0 || ena == 0) {
607                         fprintf(stderr, "CPU%*d%s%*s%s%-24s",
608                                 csv_output ? 0 : -4,
609                                 cpumap[cpu], csv_sep,
610                                 csv_output ? 0 : 18,
611                                 "<not counted>", csv_sep,
612                                 event_name(counter));
613
614                         fprintf(stderr, "\n");
615                         continue;
616                 }
617
618                 if (nsec_counter(counter))
619                         nsec_printout(cpu, counter, val);
620                 else
621                         abs_printout(cpu, counter, val);
622
623                 if (!csv_output) {
624                         print_noise(counter, 1.0);
625
626                         if (run != ena) {
627                                 fprintf(stderr, "  (scaled from %.2f%%)",
628                                         100.0 * run / ena);
629                         }
630                 }
631                 fprintf(stderr, "\n");
632         }
633 }
634
635 static void print_stat(int argc, const char **argv)
636 {
637         struct perf_evsel *counter;
638         int i;
639
640         fflush(stdout);
641
642         if (!csv_output) {
643                 fprintf(stderr, "\n");
644                 fprintf(stderr, " Performance counter stats for ");
645                 if(target_pid == -1 && target_tid == -1) {
646                         fprintf(stderr, "\'%s", argv[0]);
647                         for (i = 1; i < argc; i++)
648                                 fprintf(stderr, " %s", argv[i]);
649                 } else if (target_pid != -1)
650                         fprintf(stderr, "process id \'%d", target_pid);
651                 else
652                         fprintf(stderr, "thread id \'%d", target_tid);
653
654                 fprintf(stderr, "\'");
655                 if (run_count > 1)
656                         fprintf(stderr, " (%d runs)", run_count);
657                 fprintf(stderr, ":\n\n");
658         }
659
660         if (no_aggr) {
661                 list_for_each_entry(counter, &evsel_list, node)
662                         print_counter(counter);
663         } else {
664                 list_for_each_entry(counter, &evsel_list, node)
665                         print_counter_aggr(counter);
666         }
667
668         if (!csv_output) {
669                 fprintf(stderr, "\n");
670                 fprintf(stderr, " %18.9f  seconds time elapsed",
671                                 avg_stats(&walltime_nsecs_stats)/1e9);
672                 if (run_count > 1) {
673                         fprintf(stderr, "   ( +- %7.3f%% )",
674                                 100*stddev_stats(&walltime_nsecs_stats) /
675                                 avg_stats(&walltime_nsecs_stats));
676                 }
677                 fprintf(stderr, "\n\n");
678         }
679 }
680
681 static volatile int signr = -1;
682
683 static void skip_signal(int signo)
684 {
685         if(child_pid == -1)
686                 done = 1;
687
688         signr = signo;
689 }
690
691 static void sig_atexit(void)
692 {
693         if (child_pid != -1)
694                 kill(child_pid, SIGTERM);
695
696         if (signr == -1)
697                 return;
698
699         signal(signr, SIG_DFL);
700         kill(getpid(), signr);
701 }
702
703 static const char * const stat_usage[] = {
704         "perf stat [<options>] [<command>]",
705         NULL
706 };
707
708 static int stat__set_big_num(const struct option *opt __used,
709                              const char *s __used, int unset)
710 {
711         big_num_opt = unset ? 0 : 1;
712         return 0;
713 }
714
715 static const struct option options[] = {
716         OPT_CALLBACK('e', "event", NULL, "event",
717                      "event selector. use 'perf list' to list available events",
718                      parse_events),
719         OPT_BOOLEAN('i', "no-inherit", &no_inherit,
720                     "child tasks do not inherit counters"),
721         OPT_INTEGER('p', "pid", &target_pid,
722                     "stat events on existing process id"),
723         OPT_INTEGER('t', "tid", &target_tid,
724                     "stat events on existing thread id"),
725         OPT_BOOLEAN('a', "all-cpus", &system_wide,
726                     "system-wide collection from all CPUs"),
727         OPT_BOOLEAN('c', "scale", &scale,
728                     "scale/normalize counters"),
729         OPT_INCR('v', "verbose", &verbose,
730                     "be more verbose (show counter open errors, etc)"),
731         OPT_INTEGER('r', "repeat", &run_count,
732                     "repeat command and print average + stddev (max: 100)"),
733         OPT_BOOLEAN('n', "null", &null_run,
734                     "null run - dont start any counters"),
735         OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 
736                            "print large numbers with thousands\' separators",
737                            stat__set_big_num),
738         OPT_STRING('C', "cpu", &cpu_list, "cpu",
739                     "list of cpus to monitor in system-wide"),
740         OPT_BOOLEAN('A', "no-aggr", &no_aggr,
741                     "disable CPU count aggregation"),
742         OPT_STRING('x', "field-separator", &csv_sep, "separator",
743                    "print counts with custom separator"),
744         OPT_END()
745 };
746
747 int cmd_stat(int argc, const char **argv, const char *prefix __used)
748 {
749         struct perf_evsel *pos;
750         int status = -ENOMEM;
751
752         setlocale(LC_ALL, "");
753
754         argc = parse_options(argc, argv, options, stat_usage,
755                 PARSE_OPT_STOP_AT_NON_OPTION);
756
757         if (csv_sep)
758                 csv_output = true;
759         else
760                 csv_sep = DEFAULT_SEPARATOR;
761
762         /*
763          * let the spreadsheet do the pretty-printing
764          */
765         if (csv_output) {
766                 /* User explicitely passed -B? */
767                 if (big_num_opt == 1) {
768                         fprintf(stderr, "-B option not supported with -x\n");
769                         usage_with_options(stat_usage, options);
770                 } else /* Nope, so disable big number formatting */
771                         big_num = false;
772         } else if (big_num_opt == 0) /* User passed --no-big-num */
773                 big_num = false;
774
775         if (!argc && target_pid == -1 && target_tid == -1)
776                 usage_with_options(stat_usage, options);
777         if (run_count <= 0)
778                 usage_with_options(stat_usage, options);
779
780         /* no_aggr is for system-wide only */
781         if (no_aggr && !system_wide)
782                 usage_with_options(stat_usage, options);
783
784         /* Set attrs and nr_counters if no event is selected and !null_run */
785         if (!null_run && !nr_counters) {
786                 size_t c;
787
788                 nr_counters = ARRAY_SIZE(default_attrs);
789
790                 for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
791                         pos = perf_evsel__new(default_attrs[c].type,
792                                               default_attrs[c].config,
793                                               nr_counters);
794                         if (pos == NULL)
795                                 goto out;
796                         list_add(&pos->node, &evsel_list);
797                 }
798         }
799
800         if (system_wide)
801                 nr_cpus = read_cpu_map(cpu_list);
802         else
803                 nr_cpus = 1;
804
805         if (nr_cpus < 1)
806                 usage_with_options(stat_usage, options);
807
808         if (target_pid != -1) {
809                 target_tid = target_pid;
810                 thread_num = find_all_tid(target_pid, &all_tids);
811                 if (thread_num <= 0) {
812                         fprintf(stderr, "Can't find all threads of pid %d\n",
813                                         target_pid);
814                         usage_with_options(stat_usage, options);
815                 }
816         } else {
817                 all_tids=malloc(sizeof(pid_t));
818                 if (!all_tids)
819                         return -ENOMEM;
820
821                 all_tids[0] = target_tid;
822                 thread_num = 1;
823         }
824
825         list_for_each_entry(pos, &evsel_list, node) {
826                 if (perf_evsel__alloc_stat_priv(pos, nr_cpus) < 0 ||
827                     perf_evsel__alloc_fd(pos, nr_cpus, thread_num) < 0)
828                         goto out_free_fd;
829         }
830
831         /*
832          * We dont want to block the signals - that would cause
833          * child tasks to inherit that and Ctrl-C would not work.
834          * What we want is for Ctrl-C to work in the exec()-ed
835          * task, but being ignored by perf stat itself:
836          */
837         atexit(sig_atexit);
838         signal(SIGINT,  skip_signal);
839         signal(SIGALRM, skip_signal);
840         signal(SIGABRT, skip_signal);
841
842         status = 0;
843         for (run_idx = 0; run_idx < run_count; run_idx++) {
844                 if (run_count != 1 && verbose)
845                         fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
846                 status = run_perf_stat(argc, argv);
847         }
848
849         if (status != -1)
850                 print_stat(argc, argv);
851 out_free_fd:
852         list_for_each_entry(pos, &evsel_list, node) {
853                 perf_evsel__free_fd(pos);
854                 perf_evsel__free_stat_priv(pos);
855         }
856 out:
857         return status;
858 }