]> git.karo-electronics.de Git - mv-sheeva.git/blob - Documentation/perf_counter/builtin-report.c
perf report: Print out the total number of events
[mv-sheeva.git] / Documentation / perf_counter / builtin-report.c
1 /*
2  * builtin-report.c
3  *
4  * Builtin report command: Analyze the perf.data input file,
5  * look up and read DSOs and symbol information and display
6  * a histogram of results, along various sorting keys.
7  */
8 #include "builtin.h"
9
10 #include "util/util.h"
11
12 #include "util/list.h"
13 #include "util/cache.h"
14 #include "util/rbtree.h"
15 #include "util/symbol.h"
16 #include "util/string.h"
17
18 #include "perf.h"
19
20 #include "util/parse-options.h"
21 #include "util/parse-events.h"
22
23 #define SHOW_KERNEL     1
24 #define SHOW_USER       2
25 #define SHOW_HV         4
26
27 static char             const *input_name = "perf.data";
28 static char             *vmlinux = NULL;
29
30 static char             default_sort_order[] = "comm,dso";
31 static char             *sort_order = default_sort_order;
32
33 static int              input;
34 static int              show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
35
36 static int              dump_trace = 0;
37 #define dprintf(x...)   do { if (dump_trace) printf(x); } while (0)
38
39 static int              verbose;
40 static int              full_paths;
41
42 static unsigned long    page_size;
43 static unsigned long    mmap_window = 32;
44
45 const char *perf_event_names[] = {
46         [PERF_EVENT_MMAP]   = " PERF_EVENT_MMAP",
47         [PERF_EVENT_MUNMAP] = " PERF_EVENT_MUNMAP",
48         [PERF_EVENT_COMM]   = " PERF_EVENT_COMM",
49 };
50
51 struct ip_event {
52         struct perf_event_header header;
53         __u64 ip;
54         __u32 pid, tid;
55 };
56
57 struct mmap_event {
58         struct perf_event_header header;
59         __u32 pid, tid;
60         __u64 start;
61         __u64 len;
62         __u64 pgoff;
63         char filename[PATH_MAX];
64 };
65
66 struct comm_event {
67         struct perf_event_header header;
68         __u32 pid, tid;
69         char comm[16];
70 };
71
72 typedef union event_union {
73         struct perf_event_header header;
74         struct ip_event ip;
75         struct mmap_event mmap;
76         struct comm_event comm;
77 } event_t;
78
79 static LIST_HEAD(dsos);
80 static struct dso *kernel_dso;
81
82 static void dsos__add(struct dso *dso)
83 {
84         list_add_tail(&dso->node, &dsos);
85 }
86
87 static struct dso *dsos__find(const char *name)
88 {
89         struct dso *pos;
90
91         list_for_each_entry(pos, &dsos, node)
92                 if (strcmp(pos->name, name) == 0)
93                         return pos;
94         return NULL;
95 }
96
97 static struct dso *dsos__findnew(const char *name)
98 {
99         struct dso *dso = dsos__find(name);
100         int nr;
101
102         if (dso)
103                 return dso;
104
105         dso = dso__new(name, 0);
106         if (!dso)
107                 goto out_delete_dso;
108
109         nr = dso__load(dso, NULL, verbose);
110         if (nr < 0) {
111                 if (verbose)
112                         fprintf(stderr, "Failed to open: %s\n", name);
113                 goto out_delete_dso;
114         }
115         if (!nr && verbose) {
116                 fprintf(stderr,
117                 "No symbols found in: %s, maybe install a debug package?\n",
118                                 name);
119         }
120
121         dsos__add(dso);
122
123         return dso;
124
125 out_delete_dso:
126         dso__delete(dso);
127         return NULL;
128 }
129
130 static void dsos__fprintf(FILE *fp)
131 {
132         struct dso *pos;
133
134         list_for_each_entry(pos, &dsos, node)
135                 dso__fprintf(pos, fp);
136 }
137
138 static int load_kernel(void)
139 {
140         int err;
141
142         kernel_dso = dso__new("[kernel]", 0);
143         if (!kernel_dso)
144                 return -1;
145
146         err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
147         if (err) {
148                 dso__delete(kernel_dso);
149                 kernel_dso = NULL;
150         } else
151                 dsos__add(kernel_dso);
152
153         return err;
154 }
155
156 static char __cwd[PATH_MAX];
157 static char *cwd = __cwd;
158 static int cwdlen;
159
160 static int strcommon(const char *pathname)
161 {
162         int n = 0;
163
164         while (pathname[n] == cwd[n] && n < cwdlen)
165                 ++n;
166
167         return n;
168 }
169
170 struct map {
171         struct list_head node;
172         uint64_t         start;
173         uint64_t         end;
174         uint64_t         pgoff;
175         struct dso       *dso;
176 };
177
178 static struct map *map__new(struct mmap_event *event)
179 {
180         struct map *self = malloc(sizeof(*self));
181
182         if (self != NULL) {
183                 const char *filename = event->filename;
184                 char newfilename[PATH_MAX];
185
186                 if (cwd) {
187                         int n = strcommon(filename);
188
189                         if (n == cwdlen) {
190                                 snprintf(newfilename, sizeof(newfilename),
191                                          ".%s", filename + n);
192                                 filename = newfilename;
193                         }
194                 }
195
196                 self->start = event->start;
197                 self->end   = event->start + event->len;
198                 self->pgoff = event->pgoff;
199
200                 self->dso = dsos__findnew(filename);
201                 if (self->dso == NULL)
202                         goto out_delete;
203         }
204         return self;
205 out_delete:
206         free(self);
207         return NULL;
208 }
209
210 struct thread;
211
212 struct thread {
213         struct rb_node   rb_node;
214         struct list_head maps;
215         pid_t            pid;
216         char             *comm;
217 };
218
219 static struct thread *thread__new(pid_t pid)
220 {
221         struct thread *self = malloc(sizeof(*self));
222
223         if (self != NULL) {
224                 self->pid = pid;
225                 self->comm = malloc(32);
226                 if (self->comm)
227                         snprintf(self->comm, 32, ":%d", self->pid);
228                 INIT_LIST_HEAD(&self->maps);
229         }
230
231         return self;
232 }
233
234 static int thread__set_comm(struct thread *self, const char *comm)
235 {
236         if (self->comm)
237                 free(self->comm);
238         self->comm = strdup(comm);
239         return self->comm ? 0 : -ENOMEM;
240 }
241
242 static struct rb_root threads;
243 static struct thread *last_match;
244
245 static struct thread *threads__findnew(pid_t pid)
246 {
247         struct rb_node **p = &threads.rb_node;
248         struct rb_node *parent = NULL;
249         struct thread *th;
250
251         /*
252          * Font-end cache - PID lookups come in blocks,
253          * so most of the time we dont have to look up
254          * the full rbtree:
255          */
256         if (last_match && last_match->pid == pid)
257                 return last_match;
258
259         while (*p != NULL) {
260                 parent = *p;
261                 th = rb_entry(parent, struct thread, rb_node);
262
263                 if (th->pid == pid) {
264                         last_match = th;
265                         return th;
266                 }
267
268                 if (pid < th->pid)
269                         p = &(*p)->rb_left;
270                 else
271                         p = &(*p)->rb_right;
272         }
273
274         th = thread__new(pid);
275         if (th != NULL) {
276                 rb_link_node(&th->rb_node, parent, p);
277                 rb_insert_color(&th->rb_node, &threads);
278                 last_match = th;
279         }
280
281         return th;
282 }
283
284 static void thread__insert_map(struct thread *self, struct map *map)
285 {
286         list_add_tail(&map->node, &self->maps);
287 }
288
289 static struct map *thread__find_map(struct thread *self, uint64_t ip)
290 {
291         struct map *pos;
292
293         if (self == NULL)
294                 return NULL;
295
296         list_for_each_entry(pos, &self->maps, node)
297                 if (ip >= pos->start && ip <= pos->end)
298                         return pos;
299
300         return NULL;
301 }
302
303 /*
304  * histogram, sorted on item, collects counts
305  */
306
307 static struct rb_root hist;
308
309 struct hist_entry {
310         struct rb_node   rb_node;
311
312         struct thread    *thread;
313         struct map       *map;
314         struct dso       *dso;
315         struct symbol    *sym;
316         uint64_t         ip;
317         char             level;
318
319         uint32_t         count;
320 };
321
322 /*
323  * configurable sorting bits
324  */
325
326 struct sort_entry {
327         struct list_head list;
328
329         char *header;
330
331         int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
332         int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
333         size_t  (*print)(FILE *fp, struct hist_entry *);
334 };
335
336 /* --sort pid */
337
338 static int64_t
339 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
340 {
341         return right->thread->pid - left->thread->pid;
342 }
343
344 static size_t
345 sort__thread_print(FILE *fp, struct hist_entry *self)
346 {
347         return fprintf(fp, " %16s:%5d", self->thread->comm ?: "", self->thread->pid);
348 }
349
350 static struct sort_entry sort_thread = {
351         .header = "         Command: Pid ",
352         .cmp    = sort__thread_cmp,
353         .print  = sort__thread_print,
354 };
355
356 /* --sort comm */
357
358 static int64_t
359 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
360 {
361         return right->thread->pid - left->thread->pid;
362 }
363
364 static int64_t
365 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
366 {
367         char *comm_l = left->thread->comm;
368         char *comm_r = right->thread->comm;
369
370         if (!comm_l || !comm_r) {
371                 if (!comm_l && !comm_r)
372                         return 0;
373                 else if (!comm_l)
374                         return -1;
375                 else
376                         return 1;
377         }
378
379         return strcmp(comm_l, comm_r);
380 }
381
382 static size_t
383 sort__comm_print(FILE *fp, struct hist_entry *self)
384 {
385         return fprintf(fp, "  %16s", self->thread->comm);
386 }
387
388 static struct sort_entry sort_comm = {
389         .header         = "          Command",
390         .cmp            = sort__comm_cmp,
391         .collapse       = sort__comm_collapse,
392         .print          = sort__comm_print,
393 };
394
395 /* --sort dso */
396
397 static int64_t
398 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
399 {
400         struct dso *dso_l = left->dso;
401         struct dso *dso_r = right->dso;
402
403         if (!dso_l || !dso_r) {
404                 if (!dso_l && !dso_r)
405                         return 0;
406                 else if (!dso_l)
407                         return -1;
408                 else
409                         return 1;
410         }
411
412         return strcmp(dso_l->name, dso_r->name);
413 }
414
415 static size_t
416 sort__dso_print(FILE *fp, struct hist_entry *self)
417 {
418         if (self->dso)
419                 return fprintf(fp, "  %-25s", self->dso->name);
420
421         return fprintf(fp, "  %016llx         ", (__u64)self->ip);
422 }
423
424 static struct sort_entry sort_dso = {
425         .header = " Shared Object            ",
426         .cmp    = sort__dso_cmp,
427         .print  = sort__dso_print,
428 };
429
430 /* --sort symbol */
431
432 static int64_t
433 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
434 {
435         uint64_t ip_l, ip_r;
436
437         if (left->sym == right->sym)
438                 return 0;
439
440         ip_l = left->sym ? left->sym->start : left->ip;
441         ip_r = right->sym ? right->sym->start : right->ip;
442
443         return (int64_t)(ip_r - ip_l);
444 }
445
446 static size_t
447 sort__sym_print(FILE *fp, struct hist_entry *self)
448 {
449         size_t ret = 0;
450
451         if (verbose)
452                 ret += fprintf(fp, "  %#018llx", (__u64)self->ip);
453
454         if (self->sym)
455                 ret += fprintf(fp, "  %s", self->sym->name);
456         else
457                 ret += fprintf(fp, "  %#016llx", (__u64)self->ip);
458
459         return ret;
460 }
461
462 static struct sort_entry sort_sym = {
463         .header = " Symbol",
464         .cmp    = sort__sym_cmp,
465         .print  = sort__sym_print,
466 };
467
468 static int sort__need_collapse = 0;
469
470 struct sort_dimension {
471         char *name;
472         struct sort_entry *entry;
473         int taken;
474 };
475
476 static struct sort_dimension sort_dimensions[] = {
477         { .name = "pid",        .entry = &sort_thread,  },
478         { .name = "comm",       .entry = &sort_comm,    },
479         { .name = "dso",        .entry = &sort_dso,     },
480         { .name = "symbol",     .entry = &sort_sym,     },
481 };
482
483 static LIST_HEAD(hist_entry__sort_list);
484
485 static int sort_dimension__add(char *tok)
486 {
487         int i;
488
489         for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
490                 struct sort_dimension *sd = &sort_dimensions[i];
491
492                 if (sd->taken)
493                         continue;
494
495                 if (strncasecmp(tok, sd->name, strlen(tok)))
496                         continue;
497
498                 if (sd->entry->collapse)
499                         sort__need_collapse = 1;
500
501                 list_add_tail(&sd->entry->list, &hist_entry__sort_list);
502                 sd->taken = 1;
503
504                 return 0;
505         }
506
507         return -ESRCH;
508 }
509
510 static int64_t
511 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
512 {
513         struct sort_entry *se;
514         int64_t cmp = 0;
515
516         list_for_each_entry(se, &hist_entry__sort_list, list) {
517                 cmp = se->cmp(left, right);
518                 if (cmp)
519                         break;
520         }
521
522         return cmp;
523 }
524
525 static int64_t
526 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
527 {
528         struct sort_entry *se;
529         int64_t cmp = 0;
530
531         list_for_each_entry(se, &hist_entry__sort_list, list) {
532                 int64_t (*f)(struct hist_entry *, struct hist_entry *);
533
534                 f = se->collapse ?: se->cmp;
535
536                 cmp = f(left, right);
537                 if (cmp)
538                         break;
539         }
540
541         return cmp;
542 }
543
544 static size_t
545 hist_entry__fprintf(FILE *fp, struct hist_entry *self, uint64_t total_samples)
546 {
547         struct sort_entry *se;
548         size_t ret;
549
550         if (total_samples) {
551                 ret = fprintf(fp, "   %6.2f%%",
552                                 (self->count * 100.0) / total_samples);
553         } else
554                 ret = fprintf(fp, "%12d ", self->count);
555
556         list_for_each_entry(se, &hist_entry__sort_list, list)
557                 ret += se->print(fp, self);
558
559         ret += fprintf(fp, "\n");
560
561         return ret;
562 }
563
564 /*
565  * collect histogram counts
566  */
567
568 static int
569 hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
570                 struct symbol *sym, uint64_t ip, char level)
571 {
572         struct rb_node **p = &hist.rb_node;
573         struct rb_node *parent = NULL;
574         struct hist_entry *he;
575         struct hist_entry entry = {
576                 .thread = thread,
577                 .map    = map,
578                 .dso    = dso,
579                 .sym    = sym,
580                 .ip     = ip,
581                 .level  = level,
582                 .count  = 1,
583         };
584         int cmp;
585
586         while (*p != NULL) {
587                 parent = *p;
588                 he = rb_entry(parent, struct hist_entry, rb_node);
589
590                 cmp = hist_entry__cmp(&entry, he);
591
592                 if (!cmp) {
593                         he->count++;
594                         return 0;
595                 }
596
597                 if (cmp < 0)
598                         p = &(*p)->rb_left;
599                 else
600                         p = &(*p)->rb_right;
601         }
602
603         he = malloc(sizeof(*he));
604         if (!he)
605                 return -ENOMEM;
606         *he = entry;
607         rb_link_node(&he->rb_node, parent, p);
608         rb_insert_color(&he->rb_node, &hist);
609
610         return 0;
611 }
612
613 static void hist_entry__free(struct hist_entry *he)
614 {
615         free(he);
616 }
617
618 /*
619  * collapse the histogram
620  */
621
622 static struct rb_root collapse_hists;
623
624 static void collapse__insert_entry(struct hist_entry *he)
625 {
626         struct rb_node **p = &collapse_hists.rb_node;
627         struct rb_node *parent = NULL;
628         struct hist_entry *iter;
629         int64_t cmp;
630
631         while (*p != NULL) {
632                 parent = *p;
633                 iter = rb_entry(parent, struct hist_entry, rb_node);
634
635                 cmp = hist_entry__collapse(iter, he);
636
637                 if (!cmp) {
638                         iter->count += he->count;
639                         hist_entry__free(he);
640                         return;
641                 }
642
643                 if (cmp < 0)
644                         p = &(*p)->rb_left;
645                 else
646                         p = &(*p)->rb_right;
647         }
648
649         rb_link_node(&he->rb_node, parent, p);
650         rb_insert_color(&he->rb_node, &collapse_hists);
651 }
652
653 static void collapse__resort(void)
654 {
655         struct rb_node *next;
656         struct hist_entry *n;
657
658         if (!sort__need_collapse)
659                 return;
660
661         next = rb_first(&hist);
662         while (next) {
663                 n = rb_entry(next, struct hist_entry, rb_node);
664                 next = rb_next(&n->rb_node);
665
666                 rb_erase(&n->rb_node, &hist);
667                 collapse__insert_entry(n);
668         }
669 }
670
671 /*
672  * reverse the map, sort on count.
673  */
674
675 static struct rb_root output_hists;
676
677 static void output__insert_entry(struct hist_entry *he)
678 {
679         struct rb_node **p = &output_hists.rb_node;
680         struct rb_node *parent = NULL;
681         struct hist_entry *iter;
682
683         while (*p != NULL) {
684                 parent = *p;
685                 iter = rb_entry(parent, struct hist_entry, rb_node);
686
687                 if (he->count > iter->count)
688                         p = &(*p)->rb_left;
689                 else
690                         p = &(*p)->rb_right;
691         }
692
693         rb_link_node(&he->rb_node, parent, p);
694         rb_insert_color(&he->rb_node, &output_hists);
695 }
696
697 static void output__resort(void)
698 {
699         struct rb_node *next;
700         struct hist_entry *n;
701         struct rb_root *tree = &hist;
702
703         if (sort__need_collapse)
704                 tree = &collapse_hists;
705
706         next = rb_first(tree);
707
708         while (next) {
709                 n = rb_entry(next, struct hist_entry, rb_node);
710                 next = rb_next(&n->rb_node);
711
712                 rb_erase(&n->rb_node, tree);
713                 output__insert_entry(n);
714         }
715 }
716
717 static size_t output__fprintf(FILE *fp, uint64_t total_samples)
718 {
719         struct hist_entry *pos;
720         struct sort_entry *se;
721         struct rb_node *nd;
722         size_t ret = 0;
723
724         fprintf(fp, "#\n");
725         fprintf(fp, "# (%Ld profiler events)\n", (__u64)total_samples);
726         fprintf(fp, "#\n");
727
728         fprintf(fp, "# Overhead");
729         list_for_each_entry(se, &hist_entry__sort_list, list)
730                 fprintf(fp, " %s", se->header);
731         fprintf(fp, "\n");
732
733         fprintf(fp, "# ........");
734         list_for_each_entry(se, &hist_entry__sort_list, list) {
735                 int i;
736
737                 fprintf(fp, "  ");
738                 for (i = 0; i < strlen(se->header)-1; i++)
739                         fprintf(fp, ".");
740         }
741         fprintf(fp, "\n");
742
743         fprintf(fp, "#\n");
744
745         for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
746                 pos = rb_entry(nd, struct hist_entry, rb_node);
747                 ret += hist_entry__fprintf(fp, pos, total_samples);
748         }
749
750         if (!strcmp(sort_order, default_sort_order)) {
751                 fprintf(fp, "#\n");
752                 fprintf(fp, "# ( For more details, try: perf report --sort comm,dso,symbol )\n");
753                 fprintf(fp, "#\n");
754         }
755
756         return ret;
757 }
758
759 static void register_idle_thread(void)
760 {
761         struct thread *thread = threads__findnew(0);
762
763         if (thread == NULL ||
764                         thread__set_comm(thread, "[idle]")) {
765                 fprintf(stderr, "problem inserting idle task.\n");
766                 exit(-1);
767         }
768 }
769
770 static unsigned long total = 0, total_mmap = 0, total_comm = 0, total_unknown = 0;
771
772 static int
773 process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
774 {
775         char level;
776         int show = 0;
777         struct dso *dso = NULL;
778         struct thread *thread = threads__findnew(event->ip.pid);
779         uint64_t ip = event->ip.ip;
780         struct map *map = NULL;
781
782         dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
783                 (void *)(offset + head),
784                 (void *)(long)(event->header.size),
785                 event->header.misc,
786                 event->ip.pid,
787                 (void *)(long)ip);
788
789         dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);
790
791         if (thread == NULL) {
792                 fprintf(stderr, "problem processing %d event, skipping it.\n",
793                         event->header.type);
794                 return -1;
795         }
796
797         if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
798                 show = SHOW_KERNEL;
799                 level = 'k';
800
801                 dso = kernel_dso;
802
803                 dprintf(" ...... dso: %s\n", dso->name);
804
805         } else if (event->header.misc & PERF_EVENT_MISC_USER) {
806
807                 show = SHOW_USER;
808                 level = '.';
809
810                 map = thread__find_map(thread, ip);
811                 if (map != NULL) {
812                         dso = map->dso;
813                         ip -= map->start + map->pgoff;
814                 } else {
815                         /*
816                          * If this is outside of all known maps,
817                          * and is a negative address, try to look it
818                          * up in the kernel dso, as it might be a
819                          * vsyscall (which executes in user-mode):
820                          */
821                         if ((long long)ip < 0)
822                                 dso = kernel_dso;
823                 }
824                 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
825
826         } else {
827                 show = SHOW_HV;
828                 level = 'H';
829                 dprintf(" ...... dso: [hypervisor]\n");
830         }
831
832         if (show & show_mask) {
833                 struct symbol *sym = dso__find_symbol(dso, ip);
834
835                 if (hist_entry__add(thread, map, dso, sym, ip, level)) {
836                         fprintf(stderr,
837                 "problem incrementing symbol count, skipping event\n");
838                         return -1;
839                 }
840         }
841         total++;
842
843         return 0;
844 }
845
846 static int
847 process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
848 {
849         struct thread *thread = threads__findnew(event->mmap.pid);
850         struct map *map = map__new(&event->mmap);
851
852         dprintf("%p [%p]: PERF_EVENT_MMAP: [%p(%p) @ %p]: %s\n",
853                 (void *)(offset + head),
854                 (void *)(long)(event->header.size),
855                 (void *)(long)event->mmap.start,
856                 (void *)(long)event->mmap.len,
857                 (void *)(long)event->mmap.pgoff,
858                 event->mmap.filename);
859
860         if (thread == NULL || map == NULL) {
861                 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
862                 return 0;
863         }
864
865         thread__insert_map(thread, map);
866         total_mmap++;
867
868         return 0;
869 }
870
871 static int
872 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
873 {
874         struct thread *thread = threads__findnew(event->comm.pid);
875
876         dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
877                 (void *)(offset + head),
878                 (void *)(long)(event->header.size),
879                 event->comm.comm, event->comm.pid);
880
881         if (thread == NULL ||
882             thread__set_comm(thread, event->comm.comm)) {
883                 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
884                 return -1;
885         }
886         total_comm++;
887
888         return 0;
889 }
890
891 static int
892 process_event(event_t *event, unsigned long offset, unsigned long head)
893 {
894         if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
895                 return process_overflow_event(event, offset, head);
896
897         switch (event->header.type) {
898         case PERF_EVENT_MMAP:
899                 return process_mmap_event(event, offset, head);
900
901         case PERF_EVENT_COMM:
902                 return process_comm_event(event, offset, head);
903
904         /*
905          * We dont process them right now but they are fine:
906          */
907         case PERF_EVENT_MUNMAP:
908         case PERF_EVENT_PERIOD:
909         case PERF_EVENT_THROTTLE:
910         case PERF_EVENT_UNTHROTTLE:
911                 return 0;
912
913         default:
914                 return -1;
915         }
916
917         return 0;
918 }
919
920 static int __cmd_report(void)
921 {
922         int ret, rc = EXIT_FAILURE;
923         unsigned long offset = 0;
924         unsigned long head = 0;
925         struct stat stat;
926         event_t *event;
927         uint32_t size;
928         char *buf;
929
930         register_idle_thread();
931
932         input = open(input_name, O_RDONLY);
933         if (input < 0) {
934                 perror("failed to open file");
935                 exit(-1);
936         }
937
938         ret = fstat(input, &stat);
939         if (ret < 0) {
940                 perror("failed to stat file");
941                 exit(-1);
942         }
943
944         if (!stat.st_size) {
945                 fprintf(stderr, "zero-sized file, nothing to do!\n");
946                 exit(0);
947         }
948
949         if (load_kernel() < 0) {
950                 perror("failed to load kernel symbols");
951                 return EXIT_FAILURE;
952         }
953
954         if (!full_paths) {
955                 if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
956                         perror("failed to get the current directory");
957                         return EXIT_FAILURE;
958                 }
959                 cwdlen = strlen(cwd);
960         } else {
961                 cwd = NULL;
962                 cwdlen = 0;
963         }
964 remap:
965         buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
966                            MAP_SHARED, input, offset);
967         if (buf == MAP_FAILED) {
968                 perror("failed to mmap file");
969                 exit(-1);
970         }
971
972 more:
973         event = (event_t *)(buf + head);
974
975         size = event->header.size;
976         if (!size)
977                 size = 8;
978
979         if (head + event->header.size >= page_size * mmap_window) {
980                 unsigned long shift = page_size * (head / page_size);
981                 int ret;
982
983                 ret = munmap(buf, page_size * mmap_window);
984                 assert(ret == 0);
985
986                 offset += shift;
987                 head -= shift;
988                 goto remap;
989         }
990
991         size = event->header.size;
992
993         if (!size || process_event(event, offset, head) < 0) {
994
995                 dprintf("%p [%p]: skipping unknown header type: %d\n",
996                         (void *)(offset + head),
997                         (void *)(long)(event->header.size),
998                         event->header.type);
999
1000                 total_unknown++;
1001
1002                 /*
1003                  * assume we lost track of the stream, check alignment, and
1004                  * increment a single u64 in the hope to catch on again 'soon'.
1005                  */
1006
1007                 if (unlikely(head & 7))
1008                         head &= ~7ULL;
1009
1010                 size = 8;
1011         }
1012
1013         head += size;
1014
1015         if (offset + head < stat.st_size)
1016                 goto more;
1017
1018         rc = EXIT_SUCCESS;
1019         close(input);
1020
1021         dprintf("      IP events: %10ld\n", total);
1022         dprintf("    mmap events: %10ld\n", total_mmap);
1023         dprintf("    comm events: %10ld\n", total_comm);
1024         dprintf(" unknown events: %10ld\n", total_unknown);
1025
1026         if (dump_trace)
1027                 return 0;
1028
1029         if (verbose >= 2)
1030                 dsos__fprintf(stdout);
1031
1032         collapse__resort();
1033         output__resort();
1034         output__fprintf(stdout, total);
1035
1036         return rc;
1037 }
1038
1039 static const char * const report_usage[] = {
1040         "perf report [<options>] <command>",
1041         NULL
1042 };
1043
1044 static const struct option options[] = {
1045         OPT_STRING('i', "input", &input_name, "file",
1046                     "input file name"),
1047         OPT_BOOLEAN('v', "verbose", &verbose,
1048                     "be more verbose (show symbol address, etc)"),
1049         OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1050                     "dump raw trace in ASCII"),
1051         OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
1052         OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1053                    "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
1054         OPT_BOOLEAN('P', "full-paths", &full_paths,
1055                     "Don't shorten the pathnames taking into account the cwd"),
1056         OPT_END()
1057 };
1058
1059 static void setup_sorting(void)
1060 {
1061         char *tmp, *tok, *str = strdup(sort_order);
1062
1063         for (tok = strtok_r(str, ", ", &tmp);
1064                         tok; tok = strtok_r(NULL, ", ", &tmp)) {
1065                 if (sort_dimension__add(tok) < 0) {
1066                         error("Unknown --sort key: `%s'", tok);
1067                         usage_with_options(report_usage, options);
1068                 }
1069         }
1070
1071         free(str);
1072 }
1073
1074 int cmd_report(int argc, const char **argv, const char *prefix)
1075 {
1076         symbol__init();
1077
1078         page_size = getpagesize();
1079
1080         parse_options(argc, argv, options, report_usage, 0);
1081
1082         setup_sorting();
1083
1084         setup_pager();
1085
1086         return __cmd_report();
1087 }