]> git.karo-electronics.de Git - mv-sheeva.git/blob - Documentation/perf_counter/builtin-report.c
perf report: Bail out if there are unrecognized options/arguments
[mv-sheeva.git] / Documentation / perf_counter / builtin-report.c
1 /*
2  * builtin-report.c
3  *
4  * Builtin report command: Analyze the perf.data input file,
5  * look up and read DSOs and symbol information and display
6  * a histogram of results, along various sorting keys.
7  */
8 #include "builtin.h"
9
10 #include "util/util.h"
11
12 #include "util/color.h"
13 #include "util/list.h"
14 #include "util/cache.h"
15 #include "util/rbtree.h"
16 #include "util/symbol.h"
17 #include "util/string.h"
18
19 #include "perf.h"
20
21 #include "util/parse-options.h"
22 #include "util/parse-events.h"
23
24 #define SHOW_KERNEL     1
25 #define SHOW_USER       2
26 #define SHOW_HV         4
27
28 static char             const *input_name = "perf.data";
29 static char             *vmlinux = NULL;
30
31 static char             default_sort_order[] = "comm,dso";
32 static char             *sort_order = default_sort_order;
33
34 static int              input;
35 static int              show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
36
37 static int              dump_trace = 0;
38 #define dprintf(x...)   do { if (dump_trace) printf(x); } while (0)
39
40 static int              verbose;
41 static int              full_paths;
42
43 static unsigned long    page_size;
44 static unsigned long    mmap_window = 32;
45
46 const char *perf_event_names[] = {
47         [PERF_EVENT_MMAP]   = " PERF_EVENT_MMAP",
48         [PERF_EVENT_MUNMAP] = " PERF_EVENT_MUNMAP",
49         [PERF_EVENT_COMM]   = " PERF_EVENT_COMM",
50 };
51
52 struct ip_event {
53         struct perf_event_header header;
54         __u64 ip;
55         __u32 pid, tid;
56 };
57
58 struct mmap_event {
59         struct perf_event_header header;
60         __u32 pid, tid;
61         __u64 start;
62         __u64 len;
63         __u64 pgoff;
64         char filename[PATH_MAX];
65 };
66
67 struct comm_event {
68         struct perf_event_header header;
69         __u32 pid, tid;
70         char comm[16];
71 };
72
73 typedef union event_union {
74         struct perf_event_header header;
75         struct ip_event ip;
76         struct mmap_event mmap;
77         struct comm_event comm;
78 } event_t;
79
80 static LIST_HEAD(dsos);
81 static struct dso *kernel_dso;
82
83 static void dsos__add(struct dso *dso)
84 {
85         list_add_tail(&dso->node, &dsos);
86 }
87
88 static struct dso *dsos__find(const char *name)
89 {
90         struct dso *pos;
91
92         list_for_each_entry(pos, &dsos, node)
93                 if (strcmp(pos->name, name) == 0)
94                         return pos;
95         return NULL;
96 }
97
98 static struct dso *dsos__findnew(const char *name)
99 {
100         struct dso *dso = dsos__find(name);
101         int nr;
102
103         if (dso)
104                 return dso;
105
106         dso = dso__new(name, 0);
107         if (!dso)
108                 goto out_delete_dso;
109
110         nr = dso__load(dso, NULL, verbose);
111         if (nr < 0) {
112                 if (verbose)
113                         fprintf(stderr, "Failed to open: %s\n", name);
114                 goto out_delete_dso;
115         }
116         if (!nr && verbose) {
117                 fprintf(stderr,
118                 "No symbols found in: %s, maybe install a debug package?\n",
119                                 name);
120         }
121
122         dsos__add(dso);
123
124         return dso;
125
126 out_delete_dso:
127         dso__delete(dso);
128         return NULL;
129 }
130
131 static void dsos__fprintf(FILE *fp)
132 {
133         struct dso *pos;
134
135         list_for_each_entry(pos, &dsos, node)
136                 dso__fprintf(pos, fp);
137 }
138
139 static int load_kernel(void)
140 {
141         int err;
142
143         kernel_dso = dso__new("[kernel]", 0);
144         if (!kernel_dso)
145                 return -1;
146
147         err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
148         if (err) {
149                 dso__delete(kernel_dso);
150                 kernel_dso = NULL;
151         } else
152                 dsos__add(kernel_dso);
153
154         return err;
155 }
156
157 static char __cwd[PATH_MAX];
158 static char *cwd = __cwd;
159 static int cwdlen;
160
161 static int strcommon(const char *pathname)
162 {
163         int n = 0;
164
165         while (pathname[n] == cwd[n] && n < cwdlen)
166                 ++n;
167
168         return n;
169 }
170
171 struct map {
172         struct list_head node;
173         uint64_t         start;
174         uint64_t         end;
175         uint64_t         pgoff;
176         struct dso       *dso;
177 };
178
179 static struct map *map__new(struct mmap_event *event)
180 {
181         struct map *self = malloc(sizeof(*self));
182
183         if (self != NULL) {
184                 const char *filename = event->filename;
185                 char newfilename[PATH_MAX];
186
187                 if (cwd) {
188                         int n = strcommon(filename);
189
190                         if (n == cwdlen) {
191                                 snprintf(newfilename, sizeof(newfilename),
192                                          ".%s", filename + n);
193                                 filename = newfilename;
194                         }
195                 }
196
197                 self->start = event->start;
198                 self->end   = event->start + event->len;
199                 self->pgoff = event->pgoff;
200
201                 self->dso = dsos__findnew(filename);
202                 if (self->dso == NULL)
203                         goto out_delete;
204         }
205         return self;
206 out_delete:
207         free(self);
208         return NULL;
209 }
210
211 struct thread;
212
213 struct thread {
214         struct rb_node   rb_node;
215         struct list_head maps;
216         pid_t            pid;
217         char             *comm;
218 };
219
220 static struct thread *thread__new(pid_t pid)
221 {
222         struct thread *self = malloc(sizeof(*self));
223
224         if (self != NULL) {
225                 self->pid = pid;
226                 self->comm = malloc(32);
227                 if (self->comm)
228                         snprintf(self->comm, 32, ":%d", self->pid);
229                 INIT_LIST_HEAD(&self->maps);
230         }
231
232         return self;
233 }
234
235 static int thread__set_comm(struct thread *self, const char *comm)
236 {
237         if (self->comm)
238                 free(self->comm);
239         self->comm = strdup(comm);
240         return self->comm ? 0 : -ENOMEM;
241 }
242
243 static struct rb_root threads;
244 static struct thread *last_match;
245
246 static struct thread *threads__findnew(pid_t pid)
247 {
248         struct rb_node **p = &threads.rb_node;
249         struct rb_node *parent = NULL;
250         struct thread *th;
251
252         /*
253          * Font-end cache - PID lookups come in blocks,
254          * so most of the time we dont have to look up
255          * the full rbtree:
256          */
257         if (last_match && last_match->pid == pid)
258                 return last_match;
259
260         while (*p != NULL) {
261                 parent = *p;
262                 th = rb_entry(parent, struct thread, rb_node);
263
264                 if (th->pid == pid) {
265                         last_match = th;
266                         return th;
267                 }
268
269                 if (pid < th->pid)
270                         p = &(*p)->rb_left;
271                 else
272                         p = &(*p)->rb_right;
273         }
274
275         th = thread__new(pid);
276         if (th != NULL) {
277                 rb_link_node(&th->rb_node, parent, p);
278                 rb_insert_color(&th->rb_node, &threads);
279                 last_match = th;
280         }
281
282         return th;
283 }
284
285 static void thread__insert_map(struct thread *self, struct map *map)
286 {
287         list_add_tail(&map->node, &self->maps);
288 }
289
290 static struct map *thread__find_map(struct thread *self, uint64_t ip)
291 {
292         struct map *pos;
293
294         if (self == NULL)
295                 return NULL;
296
297         list_for_each_entry(pos, &self->maps, node)
298                 if (ip >= pos->start && ip <= pos->end)
299                         return pos;
300
301         return NULL;
302 }
303
304 /*
305  * histogram, sorted on item, collects counts
306  */
307
308 static struct rb_root hist;
309
310 struct hist_entry {
311         struct rb_node   rb_node;
312
313         struct thread    *thread;
314         struct map       *map;
315         struct dso       *dso;
316         struct symbol    *sym;
317         uint64_t         ip;
318         char             level;
319
320         uint32_t         count;
321 };
322
323 /*
324  * configurable sorting bits
325  */
326
327 struct sort_entry {
328         struct list_head list;
329
330         char *header;
331
332         int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
333         int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
334         size_t  (*print)(FILE *fp, struct hist_entry *);
335 };
336
337 /* --sort pid */
338
339 static int64_t
340 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
341 {
342         return right->thread->pid - left->thread->pid;
343 }
344
345 static size_t
346 sort__thread_print(FILE *fp, struct hist_entry *self)
347 {
348         return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
349 }
350
351 static struct sort_entry sort_thread = {
352         .header = "         Command:  Pid",
353         .cmp    = sort__thread_cmp,
354         .print  = sort__thread_print,
355 };
356
357 /* --sort comm */
358
359 static int64_t
360 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
361 {
362         return right->thread->pid - left->thread->pid;
363 }
364
365 static int64_t
366 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
367 {
368         char *comm_l = left->thread->comm;
369         char *comm_r = right->thread->comm;
370
371         if (!comm_l || !comm_r) {
372                 if (!comm_l && !comm_r)
373                         return 0;
374                 else if (!comm_l)
375                         return -1;
376                 else
377                         return 1;
378         }
379
380         return strcmp(comm_l, comm_r);
381 }
382
383 static size_t
384 sort__comm_print(FILE *fp, struct hist_entry *self)
385 {
386         return fprintf(fp, "%16s", self->thread->comm);
387 }
388
389 static struct sort_entry sort_comm = {
390         .header         = "         Command",
391         .cmp            = sort__comm_cmp,
392         .collapse       = sort__comm_collapse,
393         .print          = sort__comm_print,
394 };
395
396 /* --sort dso */
397
398 static int64_t
399 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
400 {
401         struct dso *dso_l = left->dso;
402         struct dso *dso_r = right->dso;
403
404         if (!dso_l || !dso_r) {
405                 if (!dso_l && !dso_r)
406                         return 0;
407                 else if (!dso_l)
408                         return -1;
409                 else
410                         return 1;
411         }
412
413         return strcmp(dso_l->name, dso_r->name);
414 }
415
416 static size_t
417 sort__dso_print(FILE *fp, struct hist_entry *self)
418 {
419         if (self->dso)
420                 return fprintf(fp, "%-25s", self->dso->name);
421
422         return fprintf(fp, "%016llx         ", (__u64)self->ip);
423 }
424
425 static struct sort_entry sort_dso = {
426         .header = "Shared Object            ",
427         .cmp    = sort__dso_cmp,
428         .print  = sort__dso_print,
429 };
430
431 /* --sort symbol */
432
433 static int64_t
434 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
435 {
436         uint64_t ip_l, ip_r;
437
438         if (left->sym == right->sym)
439                 return 0;
440
441         ip_l = left->sym ? left->sym->start : left->ip;
442         ip_r = right->sym ? right->sym->start : right->ip;
443
444         return (int64_t)(ip_r - ip_l);
445 }
446
447 static size_t
448 sort__sym_print(FILE *fp, struct hist_entry *self)
449 {
450         size_t ret = 0;
451
452         if (verbose)
453                 ret += fprintf(fp, "%#018llx  ", (__u64)self->ip);
454
455         if (self->sym)
456                 ret += fprintf(fp, "%s", self->sym->name);
457         else
458                 ret += fprintf(fp, "%#016llx", (__u64)self->ip);
459
460         return ret;
461 }
462
463 static struct sort_entry sort_sym = {
464         .header = "Symbol",
465         .cmp    = sort__sym_cmp,
466         .print  = sort__sym_print,
467 };
468
469 static int sort__need_collapse = 0;
470
471 struct sort_dimension {
472         char *name;
473         struct sort_entry *entry;
474         int taken;
475 };
476
477 static struct sort_dimension sort_dimensions[] = {
478         { .name = "pid",        .entry = &sort_thread,  },
479         { .name = "comm",       .entry = &sort_comm,    },
480         { .name = "dso",        .entry = &sort_dso,     },
481         { .name = "symbol",     .entry = &sort_sym,     },
482 };
483
484 static LIST_HEAD(hist_entry__sort_list);
485
486 static int sort_dimension__add(char *tok)
487 {
488         int i;
489
490         for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
491                 struct sort_dimension *sd = &sort_dimensions[i];
492
493                 if (sd->taken)
494                         continue;
495
496                 if (strncasecmp(tok, sd->name, strlen(tok)))
497                         continue;
498
499                 if (sd->entry->collapse)
500                         sort__need_collapse = 1;
501
502                 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
503                 sd->taken = 1;
504
505                 return 0;
506         }
507
508         return -ESRCH;
509 }
510
511 static int64_t
512 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
513 {
514         struct sort_entry *se;
515         int64_t cmp = 0;
516
517         list_for_each_entry(se, &hist_entry__sort_list, list) {
518                 cmp = se->cmp(left, right);
519                 if (cmp)
520                         break;
521         }
522
523         return cmp;
524 }
525
526 static int64_t
527 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
528 {
529         struct sort_entry *se;
530         int64_t cmp = 0;
531
532         list_for_each_entry(se, &hist_entry__sort_list, list) {
533                 int64_t (*f)(struct hist_entry *, struct hist_entry *);
534
535                 f = se->collapse ?: se->cmp;
536
537                 cmp = f(left, right);
538                 if (cmp)
539                         break;
540         }
541
542         return cmp;
543 }
544
545 static size_t
546 hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples)
547 {
548         struct sort_entry *se;
549         size_t ret;
550
551         if (total_samples) {
552                 double percent = self->count * 100.0 / total_samples;
553                 char *color = PERF_COLOR_NORMAL;
554
555                 /*
556                  * We color high-overhead entries in red, low-overhead
557                  * entries in green - and keep the middle ground normal:
558                  */
559                 if (percent >= 5.0)
560                         color = PERF_COLOR_RED;
561                 if (percent < 0.5)
562                         color = PERF_COLOR_GREEN;
563
564                 ret = color_fprintf(fp, color, "   %6.2f%%",
565                                 (self->count * 100.0) / total_samples);
566         } else
567                 ret = fprintf(fp, "%12d ", self->count);
568
569         list_for_each_entry(se, &hist_entry__sort_list, list) {
570                 fprintf(fp, "  ");
571                 ret += se->print(fp, self);
572         }
573
574         ret += fprintf(fp, "\n");
575
576         return ret;
577 }
578
579 /*
580  * collect histogram counts
581  */
582
583 static int
584 hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
585                 struct symbol *sym, uint64_t ip, char level)
586 {
587         struct rb_node **p = &hist.rb_node;
588         struct rb_node *parent = NULL;
589         struct hist_entry *he;
590         struct hist_entry entry = {
591                 .thread = thread,
592                 .map    = map,
593                 .dso    = dso,
594                 .sym    = sym,
595                 .ip     = ip,
596                 .level  = level,
597                 .count  = 1,
598         };
599         int cmp;
600
601         while (*p != NULL) {
602                 parent = *p;
603                 he = rb_entry(parent, struct hist_entry, rb_node);
604
605                 cmp = hist_entry__cmp(&entry, he);
606
607                 if (!cmp) {
608                         he->count++;
609                         return 0;
610                 }
611
612                 if (cmp < 0)
613                         p = &(*p)->rb_left;
614                 else
615                         p = &(*p)->rb_right;
616         }
617
618         he = malloc(sizeof(*he));
619         if (!he)
620                 return -ENOMEM;
621         *he = entry;
622         rb_link_node(&he->rb_node, parent, p);
623         rb_insert_color(&he->rb_node, &hist);
624
625         return 0;
626 }
627
628 static void hist_entry__free(struct hist_entry *he)
629 {
630         free(he);
631 }
632
633 /*
634  * collapse the histogram
635  */
636
637 static struct rb_root collapse_hists;
638
639 static void collapse__insert_entry(struct hist_entry *he)
640 {
641         struct rb_node **p = &collapse_hists.rb_node;
642         struct rb_node *parent = NULL;
643         struct hist_entry *iter;
644         int64_t cmp;
645
646         while (*p != NULL) {
647                 parent = *p;
648                 iter = rb_entry(parent, struct hist_entry, rb_node);
649
650                 cmp = hist_entry__collapse(iter, he);
651
652                 if (!cmp) {
653                         iter->count += he->count;
654                         hist_entry__free(he);
655                         return;
656                 }
657
658                 if (cmp < 0)
659                         p = &(*p)->rb_left;
660                 else
661                         p = &(*p)->rb_right;
662         }
663
664         rb_link_node(&he->rb_node, parent, p);
665         rb_insert_color(&he->rb_node, &collapse_hists);
666 }
667
668 static void collapse__resort(void)
669 {
670         struct rb_node *next;
671         struct hist_entry *n;
672
673         if (!sort__need_collapse)
674                 return;
675
676         next = rb_first(&hist);
677         while (next) {
678                 n = rb_entry(next, struct hist_entry, rb_node);
679                 next = rb_next(&n->rb_node);
680
681                 rb_erase(&n->rb_node, &hist);
682                 collapse__insert_entry(n);
683         }
684 }
685
686 /*
687  * reverse the map, sort on count.
688  */
689
690 static struct rb_root output_hists;
691
692 static void output__insert_entry(struct hist_entry *he)
693 {
694         struct rb_node **p = &output_hists.rb_node;
695         struct rb_node *parent = NULL;
696         struct hist_entry *iter;
697
698         while (*p != NULL) {
699                 parent = *p;
700                 iter = rb_entry(parent, struct hist_entry, rb_node);
701
702                 if (he->count > iter->count)
703                         p = &(*p)->rb_left;
704                 else
705                         p = &(*p)->rb_right;
706         }
707
708         rb_link_node(&he->rb_node, parent, p);
709         rb_insert_color(&he->rb_node, &output_hists);
710 }
711
712 static void output__resort(void)
713 {
714         struct rb_node *next;
715         struct hist_entry *n;
716         struct rb_root *tree = &hist;
717
718         if (sort__need_collapse)
719                 tree = &collapse_hists;
720
721         next = rb_first(tree);
722
723         while (next) {
724                 n = rb_entry(next, struct hist_entry, rb_node);
725                 next = rb_next(&n->rb_node);
726
727                 rb_erase(&n->rb_node, tree);
728                 output__insert_entry(n);
729         }
730 }
731
732 static size_t output__fprintf(FILE *fp, uint64_t total_samples)
733 {
734         struct hist_entry *pos;
735         struct sort_entry *se;
736         struct rb_node *nd;
737         size_t ret = 0;
738
739         fprintf(fp, "\n");
740         fprintf(fp, "#\n");
741         fprintf(fp, "# (%Ld profiler events)\n", (__u64)total_samples);
742         fprintf(fp, "#\n");
743
744         fprintf(fp, "# Overhead");
745         list_for_each_entry(se, &hist_entry__sort_list, list)
746                 fprintf(fp, "  %s", se->header);
747         fprintf(fp, "\n");
748
749         fprintf(fp, "# ........");
750         list_for_each_entry(se, &hist_entry__sort_list, list) {
751                 int i;
752
753                 fprintf(fp, "  ");
754                 for (i = 0; i < strlen(se->header); i++)
755                         fprintf(fp, ".");
756         }
757         fprintf(fp, "\n");
758
759         fprintf(fp, "#\n");
760
761         for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
762                 pos = rb_entry(nd, struct hist_entry, rb_node);
763                 ret += hist_entry__fprintf(fp, pos, total_samples);
764         }
765
766         if (!strcmp(sort_order, default_sort_order)) {
767                 fprintf(fp, "#\n");
768                 fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
769                 fprintf(fp, "#\n");
770         }
771         fprintf(fp, "\n");
772
773         return ret;
774 }
775
776 static void register_idle_thread(void)
777 {
778         struct thread *thread = threads__findnew(0);
779
780         if (thread == NULL ||
781                         thread__set_comm(thread, "[idle]")) {
782                 fprintf(stderr, "problem inserting idle task.\n");
783                 exit(-1);
784         }
785 }
786
787 static unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0;
788
789 static int
790 process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
791 {
792         char level;
793         int show = 0;
794         struct dso *dso = NULL;
795         struct thread *thread = threads__findnew(event->ip.pid);
796         uint64_t ip = event->ip.ip;
797         struct map *map = NULL;
798
799         dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
800                 (void *)(offset + head),
801                 (void *)(long)(event->header.size),
802                 event->header.misc,
803                 event->ip.pid,
804                 (void *)(long)ip);
805
806         dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
807
808         if (thread == NULL) {
809                 fprintf(stderr, "problem processing %d event, skipping it.\n",
810                         event->header.type);
811                 return -1;
812         }
813
814         if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
815                 show = SHOW_KERNEL;
816                 level = 'k';
817
818                 dso = kernel_dso;
819
820                 dprintf(" ...... dso: %s\n", dso->name);
821
822         } else if (event->header.misc & PERF_EVENT_MISC_USER) {
823
824                 show = SHOW_USER;
825                 level = '.';
826
827                 map = thread__find_map(thread, ip);
828                 if (map != NULL) {
829                         dso = map->dso;
830                         ip -= map->start + map->pgoff;
831                 } else {
832                         /*
833                          * If this is outside of all known maps,
834                          * and is a negative address, try to look it
835                          * up in the kernel dso, as it might be a
836                          * vsyscall (which executes in user-mode):
837                          */
838                         if ((long long)ip < 0)
839                                 dso = kernel_dso;
840                 }
841                 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
842
843         } else {
844                 show = SHOW_HV;
845                 level = 'H';
846                 dprintf(" ...... dso: [hypervisor]\n");
847         }
848
849         if (show & show_mask) {
850                 struct symbol *sym = dso__find_symbol(dso, ip);
851
852                 if (hist_entry__add(thread, map, dso, sym, ip, level)) {
853                         fprintf(stderr,
854                 "problem incrementing symbol count, skipping event\n");
855                         return -1;
856                 }
857         }
858         total++;
859
860         return 0;
861 }
862
863 static int
864 process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
865 {
866         struct thread *thread = threads__findnew(event->mmap.pid);
867         struct map *map = map__new(&event->mmap);
868
869         dprintf("%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n",
870                 (void *)(offset + head),
871                 (void *)(long)(event->header.size),
872                 (void *)(long)event->mmap.start,
873                 (void *)(long)event->mmap.len,
874                 (void *)(long)event->mmap.pgoff,
875                 event->mmap.filename);
876
877         if (thread == NULL || map == NULL) {
878                 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
879                 return 0;
880         }
881
882         thread__insert_map(thread, map);
883         total_mmap++;
884
885         return 0;
886 }
887
888 static int
889 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
890 {
891         struct thread *thread = threads__findnew(event->comm.pid);
892
893         dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
894                 (void *)(offset + head),
895                 (void *)(long)(event->header.size),
896                 event->comm.comm, event->comm.pid);
897
898         if (thread == NULL ||
899             thread__set_comm(thread, event->comm.comm)) {
900                 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
901                 return -1;
902         }
903         total_comm++;
904
905         return 0;
906 }
907
908 static int
909 process_event(event_t *event, unsigned long offset, unsigned long head)
910 {
911         if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
912                 return process_overflow_event(event, offset, head);
913
914         switch (event->header.type) {
915         case PERF_EVENT_MMAP:
916                 return process_mmap_event(event, offset, head);
917
918         case PERF_EVENT_COMM:
919                 return process_comm_event(event, offset, head);
920
921         /*
922          * We dont process them right now but they are fine:
923          */
924         case PERF_EVENT_MUNMAP:
925         case PERF_EVENT_PERIOD:
926         case PERF_EVENT_THROTTLE:
927         case PERF_EVENT_UNTHROTTLE:
928                 return 0;
929
930         default:
931                 return -1;
932         }
933
934         return 0;
935 }
936
937 static int __cmd_report(void)
938 {
939         int ret, rc = EXIT_FAILURE;
940         unsigned long offset = 0;
941         unsigned long head = 0;
942         struct stat stat;
943         event_t *event;
944         uint32_t size;
945         char *buf;
946
947         register_idle_thread();
948
949         input = open(input_name, O_RDONLY);
950         if (input < 0) {
951                 perror("failed to open file");
952                 exit(-1);
953         }
954
955         ret = fstat(input, &stat);
956         if (ret < 0) {
957                 perror("failed to stat file");
958                 exit(-1);
959         }
960
961         if (!stat.st_size) {
962                 fprintf(stderr, "zero-sized file, nothing to do!\n");
963                 exit(0);
964         }
965
966         if (load_kernel() < 0) {
967                 perror("failed to load kernel symbols");
968                 return EXIT_FAILURE;
969         }
970
971         if (!full_paths) {
972                 if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
973                         perror("failed to get the current directory");
974                         return EXIT_FAILURE;
975                 }
976                 cwdlen = strlen(cwd);
977         } else {
978                 cwd = NULL;
979                 cwdlen = 0;
980         }
981 remap:
982         buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
983                            MAP_SHARED, input, offset);
984         if (buf == MAP_FAILED) {
985                 perror("failed to mmap file");
986                 exit(-1);
987         }
988
989 more:
990         event = (event_t *)(buf + head);
991
992         size = event->header.size;
993         if (!size)
994                 size = 8;
995
996         if (head + event->header.size >= page_size * mmap_window) {
997                 unsigned long shift = page_size * (head / page_size);
998                 int ret;
999
1000                 ret = munmap(buf, page_size * mmap_window);
1001                 assert(ret == 0);
1002
1003                 offset += shift;
1004                 head -= shift;
1005                 goto remap;
1006         }
1007
1008         size = event->header.size;
1009
1010         if (!size || process_event(event, offset, head) < 0) {
1011
1012                 dprintf("%p [%p]: skipping unknown header type: %d\n",
1013                         (void *)(offset + head),
1014                         (void *)(long)(event->header.size),
1015                         event->header.type);
1016
1017                 total_unknown++;
1018
1019                 /*
1020                  * assume we lost track of the stream, check alignment, and
1021                  * increment a single u64 in the hope to catch on again 'soon'.
1022                  */
1023
1024                 if (unlikely(head & 7))
1025                         head &= ~7ULL;
1026
1027                 size = 8;
1028         }
1029
1030         head += size;
1031
1032         if (offset + head < stat.st_size)
1033                 goto more;
1034
1035         rc = EXIT_SUCCESS;
1036         close(input);
1037
1038         dprintf("      IP events: %10ld\n", total);
1039         dprintf("    mmap events: %10ld\n", total_mmap);
1040         dprintf("    comm events: %10ld\n", total_comm);
1041         dprintf(" unknown events: %10ld\n", total_unknown);
1042
1043         if (dump_trace)
1044                 return 0;
1045
1046         if (verbose >= 2)
1047                 dsos__fprintf(stdout);
1048
1049         collapse__resort();
1050         output__resort();
1051         output__fprintf(stdout, total);
1052
1053         return rc;
1054 }
1055
1056 static const char * const report_usage[] = {
1057         "perf report [<options>] <command>",
1058         NULL
1059 };
1060
1061 static const struct option options[] = {
1062         OPT_STRING('i', "input", &input_name, "file",
1063                     "input file name"),
1064         OPT_BOOLEAN('v', "verbose", &verbose,
1065                     "be more verbose (show symbol address, etc)"),
1066         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1067                     "dump raw trace in ASCII"),
1068         OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
1069         OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1070                    "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
1071         OPT_BOOLEAN('P', "full-paths", &full_paths,
1072                     "Don't shorten the pathnames taking into account the cwd"),
1073         OPT_END()
1074 };
1075
1076 static void setup_sorting(void)
1077 {
1078         char *tmp, *tok, *str = strdup(sort_order);
1079
1080         for (tok = strtok_r(str, ", ", &tmp);
1081                         tok; tok = strtok_r(NULL, ", ", &tmp)) {
1082                 if (sort_dimension__add(tok) < 0) {
1083                         error("Unknown --sort key: `%s'", tok);
1084                         usage_with_options(report_usage, options);
1085                 }
1086         }
1087
1088         free(str);
1089 }
1090
1091 int cmd_report(int argc, const char **argv, const char *prefix)
1092 {
1093         symbol__init();
1094
1095         page_size = getpagesize();
1096
1097         argc = parse_options(argc, argv, options, report_usage, 0);
1098
1099         setup_sorting();
1100
1101         /*
1102          * Any (unrecognized) arguments left?
1103          */
1104         if (argc)
1105                 usage_with_options(report_usage, options);
1106
1107         setup_pager();
1108
1109         return __cmd_report();
1110 }