]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/session.c
Merge tag 'nfs-for-4.13-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[karo-tx-linux.git] / tools / perf / util / session.c
1 #include <errno.h>
2 #include <inttypes.h>
3 #include <linux/kernel.h>
4 #include <traceevent/event-parse.h>
5 #include <api/fs/fs.h>
6
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <sys/types.h>
10 #include <sys/mman.h>
11
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "memswap.h"
15 #include "session.h"
16 #include "tool.h"
17 #include "sort.h"
18 #include "util.h"
19 #include "cpumap.h"
20 #include "perf_regs.h"
21 #include "asm/bug.h"
22 #include "auxtrace.h"
23 #include "thread.h"
24 #include "thread-stack.h"
25 #include "stat.h"
26
27 static int perf_session__deliver_event(struct perf_session *session,
28                                        union perf_event *event,
29                                        struct perf_sample *sample,
30                                        struct perf_tool *tool,
31                                        u64 file_offset);
32
33 static int perf_session__open(struct perf_session *session)
34 {
35         struct perf_data_file *file = session->file;
36
37         if (perf_session__read_header(session) < 0) {
38                 pr_err("incompatible file format (rerun with -v to learn more)\n");
39                 return -1;
40         }
41
42         if (perf_data_file__is_pipe(file))
43                 return 0;
44
45         if (perf_header__has_feat(&session->header, HEADER_STAT))
46                 return 0;
47
48         if (!perf_evlist__valid_sample_type(session->evlist)) {
49                 pr_err("non matching sample_type\n");
50                 return -1;
51         }
52
53         if (!perf_evlist__valid_sample_id_all(session->evlist)) {
54                 pr_err("non matching sample_id_all\n");
55                 return -1;
56         }
57
58         if (!perf_evlist__valid_read_format(session->evlist)) {
59                 pr_err("non matching read_format\n");
60                 return -1;
61         }
62
63         return 0;
64 }
65
66 void perf_session__set_id_hdr_size(struct perf_session *session)
67 {
68         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
69
70         machines__set_id_hdr_size(&session->machines, id_hdr_size);
71 }
72
73 int perf_session__create_kernel_maps(struct perf_session *session)
74 {
75         int ret = machine__create_kernel_maps(&session->machines.host);
76
77         if (ret >= 0)
78                 ret = machines__create_guest_kernel_maps(&session->machines);
79         return ret;
80 }
81
82 static void perf_session__destroy_kernel_maps(struct perf_session *session)
83 {
84         machines__destroy_kernel_maps(&session->machines);
85 }
86
87 static bool perf_session__has_comm_exec(struct perf_session *session)
88 {
89         struct perf_evsel *evsel;
90
91         evlist__for_each_entry(session->evlist, evsel) {
92                 if (evsel->attr.comm_exec)
93                         return true;
94         }
95
96         return false;
97 }
98
99 static void perf_session__set_comm_exec(struct perf_session *session)
100 {
101         bool comm_exec = perf_session__has_comm_exec(session);
102
103         machines__set_comm_exec(&session->machines, comm_exec);
104 }
105
106 static int ordered_events__deliver_event(struct ordered_events *oe,
107                                          struct ordered_event *event)
108 {
109         struct perf_sample sample;
110         struct perf_session *session = container_of(oe, struct perf_session,
111                                                     ordered_events);
112         int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
113
114         if (ret) {
115                 pr_err("Can't parse sample, err = %d\n", ret);
116                 return ret;
117         }
118
119         return perf_session__deliver_event(session, event->event, &sample,
120                                            session->tool, event->file_offset);
121 }
122
123 struct perf_session *perf_session__new(struct perf_data_file *file,
124                                        bool repipe, struct perf_tool *tool)
125 {
126         struct perf_session *session = zalloc(sizeof(*session));
127
128         if (!session)
129                 goto out;
130
131         session->repipe = repipe;
132         session->tool   = tool;
133         INIT_LIST_HEAD(&session->auxtrace_index);
134         machines__init(&session->machines);
135         ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
136
137         if (file) {
138                 if (perf_data_file__open(file))
139                         goto out_delete;
140
141                 session->file = file;
142
143                 if (perf_data_file__is_read(file)) {
144                         if (perf_session__open(session) < 0)
145                                 goto out_close;
146
147                         /*
148                          * set session attributes that are present in perf.data
149                          * but not in pipe-mode.
150                          */
151                         if (!file->is_pipe) {
152                                 perf_session__set_id_hdr_size(session);
153                                 perf_session__set_comm_exec(session);
154                         }
155                 }
156         } else  {
157                 session->machines.host.env = &perf_env;
158         }
159
160         if (!file || perf_data_file__is_write(file)) {
161                 /*
162                  * In O_RDONLY mode this will be performed when reading the
163                  * kernel MMAP event, in perf_event__process_mmap().
164                  */
165                 if (perf_session__create_kernel_maps(session) < 0)
166                         pr_warning("Cannot read kernel map\n");
167         }
168
169         /*
170          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
171          * processed, so perf_evlist__sample_id_all is not meaningful here.
172          */
173         if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps &&
174             tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
175                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
176                 tool->ordered_events = false;
177         }
178
179         return session;
180
181  out_close:
182         perf_data_file__close(file);
183  out_delete:
184         perf_session__delete(session);
185  out:
186         return NULL;
187 }
188
189 static void perf_session__delete_threads(struct perf_session *session)
190 {
191         machine__delete_threads(&session->machines.host);
192 }
193
194 void perf_session__delete(struct perf_session *session)
195 {
196         if (session == NULL)
197                 return;
198         auxtrace__free(session);
199         auxtrace_index__free(&session->auxtrace_index);
200         perf_session__destroy_kernel_maps(session);
201         perf_session__delete_threads(session);
202         perf_env__exit(&session->header.env);
203         machines__exit(&session->machines);
204         if (session->file)
205                 perf_data_file__close(session->file);
206         free(session);
207 }
208
209 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
210                                                  __maybe_unused,
211                                                  union perf_event *event
212                                                  __maybe_unused,
213                                                  struct perf_session *session
214                                                 __maybe_unused)
215 {
216         dump_printf(": unhandled!\n");
217         return 0;
218 }
219
220 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
221                                          union perf_event *event __maybe_unused,
222                                          struct perf_evlist **pevlist
223                                          __maybe_unused)
224 {
225         dump_printf(": unhandled!\n");
226         return 0;
227 }
228
229 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
230                                                  union perf_event *event __maybe_unused,
231                                                  struct perf_evlist **pevlist
232                                                  __maybe_unused)
233 {
234         if (dump_trace)
235                 perf_event__fprintf_event_update(event, stdout);
236
237         dump_printf(": unhandled!\n");
238         return 0;
239 }
240
241 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
242                                      union perf_event *event __maybe_unused,
243                                      struct perf_sample *sample __maybe_unused,
244                                      struct perf_evsel *evsel __maybe_unused,
245                                      struct machine *machine __maybe_unused)
246 {
247         dump_printf(": unhandled!\n");
248         return 0;
249 }
250
251 static int process_event_stub(struct perf_tool *tool __maybe_unused,
252                               union perf_event *event __maybe_unused,
253                               struct perf_sample *sample __maybe_unused,
254                               struct machine *machine __maybe_unused)
255 {
256         dump_printf(": unhandled!\n");
257         return 0;
258 }
259
260 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
261                                        union perf_event *event __maybe_unused,
262                                        struct ordered_events *oe __maybe_unused)
263 {
264         dump_printf(": unhandled!\n");
265         return 0;
266 }
267
268 static int process_finished_round(struct perf_tool *tool,
269                                   union perf_event *event,
270                                   struct ordered_events *oe);
271
272 static int skipn(int fd, off_t n)
273 {
274         char buf[4096];
275         ssize_t ret;
276
277         while (n > 0) {
278                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
279                 if (ret <= 0)
280                         return ret;
281                 n -= ret;
282         }
283
284         return 0;
285 }
286
287 static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
288                                        union perf_event *event,
289                                        struct perf_session *session
290                                        __maybe_unused)
291 {
292         dump_printf(": unhandled!\n");
293         if (perf_data_file__is_pipe(session->file))
294                 skipn(perf_data_file__fd(session->file), event->auxtrace.size);
295         return event->auxtrace.size;
296 }
297
298 static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
299                                   union perf_event *event __maybe_unused,
300                                   struct perf_session *session __maybe_unused)
301 {
302         dump_printf(": unhandled!\n");
303         return 0;
304 }
305
306
307 static
308 int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
309                                   union perf_event *event __maybe_unused,
310                                   struct perf_session *session __maybe_unused)
311 {
312         if (dump_trace)
313                 perf_event__fprintf_thread_map(event, stdout);
314
315         dump_printf(": unhandled!\n");
316         return 0;
317 }
318
319 static
320 int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
321                                union perf_event *event __maybe_unused,
322                                struct perf_session *session __maybe_unused)
323 {
324         if (dump_trace)
325                 perf_event__fprintf_cpu_map(event, stdout);
326
327         dump_printf(": unhandled!\n");
328         return 0;
329 }
330
331 static
332 int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
333                                    union perf_event *event __maybe_unused,
334                                    struct perf_session *session __maybe_unused)
335 {
336         if (dump_trace)
337                 perf_event__fprintf_stat_config(event, stdout);
338
339         dump_printf(": unhandled!\n");
340         return 0;
341 }
342
343 static int process_stat_stub(struct perf_tool *tool __maybe_unused,
344                              union perf_event *event __maybe_unused,
345                              struct perf_session *perf_session
346                              __maybe_unused)
347 {
348         if (dump_trace)
349                 perf_event__fprintf_stat(event, stdout);
350
351         dump_printf(": unhandled!\n");
352         return 0;
353 }
354
355 static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
356                                    union perf_event *event __maybe_unused,
357                                    struct perf_session *perf_session
358                                    __maybe_unused)
359 {
360         if (dump_trace)
361                 perf_event__fprintf_stat_round(event, stdout);
362
363         dump_printf(": unhandled!\n");
364         return 0;
365 }
366
367 void perf_tool__fill_defaults(struct perf_tool *tool)
368 {
369         if (tool->sample == NULL)
370                 tool->sample = process_event_sample_stub;
371         if (tool->mmap == NULL)
372                 tool->mmap = process_event_stub;
373         if (tool->mmap2 == NULL)
374                 tool->mmap2 = process_event_stub;
375         if (tool->comm == NULL)
376                 tool->comm = process_event_stub;
377         if (tool->fork == NULL)
378                 tool->fork = process_event_stub;
379         if (tool->exit == NULL)
380                 tool->exit = process_event_stub;
381         if (tool->lost == NULL)
382                 tool->lost = perf_event__process_lost;
383         if (tool->lost_samples == NULL)
384                 tool->lost_samples = perf_event__process_lost_samples;
385         if (tool->aux == NULL)
386                 tool->aux = perf_event__process_aux;
387         if (tool->itrace_start == NULL)
388                 tool->itrace_start = perf_event__process_itrace_start;
389         if (tool->context_switch == NULL)
390                 tool->context_switch = perf_event__process_switch;
391         if (tool->read == NULL)
392                 tool->read = process_event_sample_stub;
393         if (tool->throttle == NULL)
394                 tool->throttle = process_event_stub;
395         if (tool->unthrottle == NULL)
396                 tool->unthrottle = process_event_stub;
397         if (tool->attr == NULL)
398                 tool->attr = process_event_synth_attr_stub;
399         if (tool->event_update == NULL)
400                 tool->event_update = process_event_synth_event_update_stub;
401         if (tool->tracing_data == NULL)
402                 tool->tracing_data = process_event_synth_tracing_data_stub;
403         if (tool->build_id == NULL)
404                 tool->build_id = process_event_op2_stub;
405         if (tool->finished_round == NULL) {
406                 if (tool->ordered_events)
407                         tool->finished_round = process_finished_round;
408                 else
409                         tool->finished_round = process_finished_round_stub;
410         }
411         if (tool->id_index == NULL)
412                 tool->id_index = process_event_op2_stub;
413         if (tool->auxtrace_info == NULL)
414                 tool->auxtrace_info = process_event_op2_stub;
415         if (tool->auxtrace == NULL)
416                 tool->auxtrace = process_event_auxtrace_stub;
417         if (tool->auxtrace_error == NULL)
418                 tool->auxtrace_error = process_event_op2_stub;
419         if (tool->thread_map == NULL)
420                 tool->thread_map = process_event_thread_map_stub;
421         if (tool->cpu_map == NULL)
422                 tool->cpu_map = process_event_cpu_map_stub;
423         if (tool->stat_config == NULL)
424                 tool->stat_config = process_event_stat_config_stub;
425         if (tool->stat == NULL)
426                 tool->stat = process_stat_stub;
427         if (tool->stat_round == NULL)
428                 tool->stat_round = process_stat_round_stub;
429         if (tool->time_conv == NULL)
430                 tool->time_conv = process_event_op2_stub;
431 }
432
433 static void swap_sample_id_all(union perf_event *event, void *data)
434 {
435         void *end = (void *) event + event->header.size;
436         int size = end - data;
437
438         BUG_ON(size % sizeof(u64));
439         mem_bswap_64(data, size);
440 }
441
442 static void perf_event__all64_swap(union perf_event *event,
443                                    bool sample_id_all __maybe_unused)
444 {
445         struct perf_event_header *hdr = &event->header;
446         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
447 }
448
449 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
450 {
451         event->comm.pid = bswap_32(event->comm.pid);
452         event->comm.tid = bswap_32(event->comm.tid);
453
454         if (sample_id_all) {
455                 void *data = &event->comm.comm;
456
457                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
458                 swap_sample_id_all(event, data);
459         }
460 }
461
462 static void perf_event__mmap_swap(union perf_event *event,
463                                   bool sample_id_all)
464 {
465         event->mmap.pid   = bswap_32(event->mmap.pid);
466         event->mmap.tid   = bswap_32(event->mmap.tid);
467         event->mmap.start = bswap_64(event->mmap.start);
468         event->mmap.len   = bswap_64(event->mmap.len);
469         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
470
471         if (sample_id_all) {
472                 void *data = &event->mmap.filename;
473
474                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
475                 swap_sample_id_all(event, data);
476         }
477 }
478
479 static void perf_event__mmap2_swap(union perf_event *event,
480                                   bool sample_id_all)
481 {
482         event->mmap2.pid   = bswap_32(event->mmap2.pid);
483         event->mmap2.tid   = bswap_32(event->mmap2.tid);
484         event->mmap2.start = bswap_64(event->mmap2.start);
485         event->mmap2.len   = bswap_64(event->mmap2.len);
486         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
487         event->mmap2.maj   = bswap_32(event->mmap2.maj);
488         event->mmap2.min   = bswap_32(event->mmap2.min);
489         event->mmap2.ino   = bswap_64(event->mmap2.ino);
490
491         if (sample_id_all) {
492                 void *data = &event->mmap2.filename;
493
494                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
495                 swap_sample_id_all(event, data);
496         }
497 }
498 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
499 {
500         event->fork.pid  = bswap_32(event->fork.pid);
501         event->fork.tid  = bswap_32(event->fork.tid);
502         event->fork.ppid = bswap_32(event->fork.ppid);
503         event->fork.ptid = bswap_32(event->fork.ptid);
504         event->fork.time = bswap_64(event->fork.time);
505
506         if (sample_id_all)
507                 swap_sample_id_all(event, &event->fork + 1);
508 }
509
510 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
511 {
512         event->read.pid          = bswap_32(event->read.pid);
513         event->read.tid          = bswap_32(event->read.tid);
514         event->read.value        = bswap_64(event->read.value);
515         event->read.time_enabled = bswap_64(event->read.time_enabled);
516         event->read.time_running = bswap_64(event->read.time_running);
517         event->read.id           = bswap_64(event->read.id);
518
519         if (sample_id_all)
520                 swap_sample_id_all(event, &event->read + 1);
521 }
522
523 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
524 {
525         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
526         event->aux.aux_size   = bswap_64(event->aux.aux_size);
527         event->aux.flags      = bswap_64(event->aux.flags);
528
529         if (sample_id_all)
530                 swap_sample_id_all(event, &event->aux + 1);
531 }
532
533 static void perf_event__itrace_start_swap(union perf_event *event,
534                                           bool sample_id_all)
535 {
536         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
537         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
538
539         if (sample_id_all)
540                 swap_sample_id_all(event, &event->itrace_start + 1);
541 }
542
543 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
544 {
545         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
546                 event->context_switch.next_prev_pid =
547                                 bswap_32(event->context_switch.next_prev_pid);
548                 event->context_switch.next_prev_tid =
549                                 bswap_32(event->context_switch.next_prev_tid);
550         }
551
552         if (sample_id_all)
553                 swap_sample_id_all(event, &event->context_switch + 1);
554 }
555
556 static void perf_event__throttle_swap(union perf_event *event,
557                                       bool sample_id_all)
558 {
559         event->throttle.time      = bswap_64(event->throttle.time);
560         event->throttle.id        = bswap_64(event->throttle.id);
561         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
562
563         if (sample_id_all)
564                 swap_sample_id_all(event, &event->throttle + 1);
565 }
566
567 static u8 revbyte(u8 b)
568 {
569         int rev = (b >> 4) | ((b & 0xf) << 4);
570         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
571         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
572         return (u8) rev;
573 }
574
575 /*
576  * XXX this is hack in attempt to carry flags bitfield
577  * through endian village. ABI says:
578  *
579  * Bit-fields are allocated from right to left (least to most significant)
580  * on little-endian implementations and from left to right (most to least
581  * significant) on big-endian implementations.
582  *
583  * The above seems to be byte specific, so we need to reverse each
584  * byte of the bitfield. 'Internet' also says this might be implementation
585  * specific and we probably need proper fix and carry perf_event_attr
586  * bitfield flags in separate data file FEAT_ section. Thought this seems
587  * to work for now.
588  */
589 static void swap_bitfield(u8 *p, unsigned len)
590 {
591         unsigned i;
592
593         for (i = 0; i < len; i++) {
594                 *p = revbyte(*p);
595                 p++;
596         }
597 }
598
599 /* exported for swapping attributes in file header */
600 void perf_event__attr_swap(struct perf_event_attr *attr)
601 {
602         attr->type              = bswap_32(attr->type);
603         attr->size              = bswap_32(attr->size);
604
605 #define bswap_safe(f, n)                                        \
606         (attr->size > (offsetof(struct perf_event_attr, f) +    \
607                        sizeof(attr->f) * (n)))
608 #define bswap_field(f, sz)                      \
609 do {                                            \
610         if (bswap_safe(f, 0))                   \
611                 attr->f = bswap_##sz(attr->f);  \
612 } while(0)
613 #define bswap_field_16(f) bswap_field(f, 16)
614 #define bswap_field_32(f) bswap_field(f, 32)
615 #define bswap_field_64(f) bswap_field(f, 64)
616
617         bswap_field_64(config);
618         bswap_field_64(sample_period);
619         bswap_field_64(sample_type);
620         bswap_field_64(read_format);
621         bswap_field_32(wakeup_events);
622         bswap_field_32(bp_type);
623         bswap_field_64(bp_addr);
624         bswap_field_64(bp_len);
625         bswap_field_64(branch_sample_type);
626         bswap_field_64(sample_regs_user);
627         bswap_field_32(sample_stack_user);
628         bswap_field_32(aux_watermark);
629         bswap_field_16(sample_max_stack);
630
631         /*
632          * After read_format are bitfields. Check read_format because
633          * we are unable to use offsetof on bitfield.
634          */
635         if (bswap_safe(read_format, 1))
636                 swap_bitfield((u8 *) (&attr->read_format + 1),
637                               sizeof(u64));
638 #undef bswap_field_64
639 #undef bswap_field_32
640 #undef bswap_field
641 #undef bswap_safe
642 }
643
644 static void perf_event__hdr_attr_swap(union perf_event *event,
645                                       bool sample_id_all __maybe_unused)
646 {
647         size_t size;
648
649         perf_event__attr_swap(&event->attr.attr);
650
651         size = event->header.size;
652         size -= (void *)&event->attr.id - (void *)event;
653         mem_bswap_64(event->attr.id, size);
654 }
655
656 static void perf_event__event_update_swap(union perf_event *event,
657                                           bool sample_id_all __maybe_unused)
658 {
659         event->event_update.type = bswap_64(event->event_update.type);
660         event->event_update.id   = bswap_64(event->event_update.id);
661 }
662
663 static void perf_event__event_type_swap(union perf_event *event,
664                                         bool sample_id_all __maybe_unused)
665 {
666         event->event_type.event_type.event_id =
667                 bswap_64(event->event_type.event_type.event_id);
668 }
669
670 static void perf_event__tracing_data_swap(union perf_event *event,
671                                           bool sample_id_all __maybe_unused)
672 {
673         event->tracing_data.size = bswap_32(event->tracing_data.size);
674 }
675
676 static void perf_event__auxtrace_info_swap(union perf_event *event,
677                                            bool sample_id_all __maybe_unused)
678 {
679         size_t size;
680
681         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
682
683         size = event->header.size;
684         size -= (void *)&event->auxtrace_info.priv - (void *)event;
685         mem_bswap_64(event->auxtrace_info.priv, size);
686 }
687
688 static void perf_event__auxtrace_swap(union perf_event *event,
689                                       bool sample_id_all __maybe_unused)
690 {
691         event->auxtrace.size      = bswap_64(event->auxtrace.size);
692         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
693         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
694         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
695         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
696         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
697 }
698
699 static void perf_event__auxtrace_error_swap(union perf_event *event,
700                                             bool sample_id_all __maybe_unused)
701 {
702         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
703         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
704         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
705         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
706         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
707         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
708 }
709
710 static void perf_event__thread_map_swap(union perf_event *event,
711                                         bool sample_id_all __maybe_unused)
712 {
713         unsigned i;
714
715         event->thread_map.nr = bswap_64(event->thread_map.nr);
716
717         for (i = 0; i < event->thread_map.nr; i++)
718                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
719 }
720
721 static void perf_event__cpu_map_swap(union perf_event *event,
722                                      bool sample_id_all __maybe_unused)
723 {
724         struct cpu_map_data *data = &event->cpu_map.data;
725         struct cpu_map_entries *cpus;
726         struct cpu_map_mask *mask;
727         unsigned i;
728
729         data->type = bswap_64(data->type);
730
731         switch (data->type) {
732         case PERF_CPU_MAP__CPUS:
733                 cpus = (struct cpu_map_entries *)data->data;
734
735                 cpus->nr = bswap_16(cpus->nr);
736
737                 for (i = 0; i < cpus->nr; i++)
738                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
739                 break;
740         case PERF_CPU_MAP__MASK:
741                 mask = (struct cpu_map_mask *) data->data;
742
743                 mask->nr = bswap_16(mask->nr);
744                 mask->long_size = bswap_16(mask->long_size);
745
746                 switch (mask->long_size) {
747                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
748                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
749                 default:
750                         pr_err("cpu_map swap: unsupported long size\n");
751                 }
752         default:
753                 break;
754         }
755 }
756
757 static void perf_event__stat_config_swap(union perf_event *event,
758                                          bool sample_id_all __maybe_unused)
759 {
760         u64 size;
761
762         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
763         size += 1; /* nr item itself */
764         mem_bswap_64(&event->stat_config.nr, size);
765 }
766
767 static void perf_event__stat_swap(union perf_event *event,
768                                   bool sample_id_all __maybe_unused)
769 {
770         event->stat.id     = bswap_64(event->stat.id);
771         event->stat.thread = bswap_32(event->stat.thread);
772         event->stat.cpu    = bswap_32(event->stat.cpu);
773         event->stat.val    = bswap_64(event->stat.val);
774         event->stat.ena    = bswap_64(event->stat.ena);
775         event->stat.run    = bswap_64(event->stat.run);
776 }
777
778 static void perf_event__stat_round_swap(union perf_event *event,
779                                         bool sample_id_all __maybe_unused)
780 {
781         event->stat_round.type = bswap_64(event->stat_round.type);
782         event->stat_round.time = bswap_64(event->stat_round.time);
783 }
784
785 typedef void (*perf_event__swap_op)(union perf_event *event,
786                                     bool sample_id_all);
787
788 static perf_event__swap_op perf_event__swap_ops[] = {
789         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
790         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
791         [PERF_RECORD_COMM]                = perf_event__comm_swap,
792         [PERF_RECORD_FORK]                = perf_event__task_swap,
793         [PERF_RECORD_EXIT]                = perf_event__task_swap,
794         [PERF_RECORD_LOST]                = perf_event__all64_swap,
795         [PERF_RECORD_READ]                = perf_event__read_swap,
796         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
797         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
798         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
799         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
800         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
801         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
802         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
803         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
804         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
805         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
806         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
807         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
808         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
809         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
810         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
811         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
812         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
813         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
814         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
815         [PERF_RECORD_STAT]                = perf_event__stat_swap,
816         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
817         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
818         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
819         [PERF_RECORD_HEADER_MAX]          = NULL,
820 };
821
822 /*
823  * When perf record finishes a pass on every buffers, it records this pseudo
824  * event.
825  * We record the max timestamp t found in the pass n.
826  * Assuming these timestamps are monotonic across cpus, we know that if
827  * a buffer still has events with timestamps below t, they will be all
828  * available and then read in the pass n + 1.
829  * Hence when we start to read the pass n + 2, we can safely flush every
830  * events with timestamps below t.
831  *
832  *    ============ PASS n =================
833  *       CPU 0         |   CPU 1
834  *                     |
835  *    cnt1 timestamps  |   cnt2 timestamps
836  *          1          |         2
837  *          2          |         3
838  *          -          |         4  <--- max recorded
839  *
840  *    ============ PASS n + 1 ==============
841  *       CPU 0         |   CPU 1
842  *                     |
843  *    cnt1 timestamps  |   cnt2 timestamps
844  *          3          |         5
845  *          4          |         6
846  *          5          |         7 <---- max recorded
847  *
848  *      Flush every events below timestamp 4
849  *
850  *    ============ PASS n + 2 ==============
851  *       CPU 0         |   CPU 1
852  *                     |
853  *    cnt1 timestamps  |   cnt2 timestamps
854  *          6          |         8
855  *          7          |         9
856  *          -          |         10
857  *
858  *      Flush every events below timestamp 7
859  *      etc...
860  */
861 static int process_finished_round(struct perf_tool *tool __maybe_unused,
862                                   union perf_event *event __maybe_unused,
863                                   struct ordered_events *oe)
864 {
865         if (dump_trace)
866                 fprintf(stdout, "\n");
867         return ordered_events__flush(oe, OE_FLUSH__ROUND);
868 }
869
870 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
871                               struct perf_sample *sample, u64 file_offset)
872 {
873         return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
874 }
875
876 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
877 {
878         struct ip_callchain *callchain = sample->callchain;
879         struct branch_stack *lbr_stack = sample->branch_stack;
880         u64 kernel_callchain_nr = callchain->nr;
881         unsigned int i;
882
883         for (i = 0; i < kernel_callchain_nr; i++) {
884                 if (callchain->ips[i] == PERF_CONTEXT_USER)
885                         break;
886         }
887
888         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
889                 u64 total_nr;
890                 /*
891                  * LBR callstack can only get user call chain,
892                  * i is kernel call chain number,
893                  * 1 is PERF_CONTEXT_USER.
894                  *
895                  * The user call chain is stored in LBR registers.
896                  * LBR are pair registers. The caller is stored
897                  * in "from" register, while the callee is stored
898                  * in "to" register.
899                  * For example, there is a call stack
900                  * "A"->"B"->"C"->"D".
901                  * The LBR registers will recorde like
902                  * "C"->"D", "B"->"C", "A"->"B".
903                  * So only the first "to" register and all "from"
904                  * registers are needed to construct the whole stack.
905                  */
906                 total_nr = i + 1 + lbr_stack->nr + 1;
907                 kernel_callchain_nr = i + 1;
908
909                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
910
911                 for (i = 0; i < kernel_callchain_nr; i++)
912                         printf("..... %2d: %016" PRIx64 "\n",
913                                i, callchain->ips[i]);
914
915                 printf("..... %2d: %016" PRIx64 "\n",
916                        (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
917                 for (i = 0; i < lbr_stack->nr; i++)
918                         printf("..... %2d: %016" PRIx64 "\n",
919                                (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
920         }
921 }
922
923 static void callchain__printf(struct perf_evsel *evsel,
924                               struct perf_sample *sample)
925 {
926         unsigned int i;
927         struct ip_callchain *callchain = sample->callchain;
928
929         if (perf_evsel__has_branch_callstack(evsel))
930                 callchain__lbr_callstack_printf(sample);
931
932         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
933
934         for (i = 0; i < callchain->nr; i++)
935                 printf("..... %2d: %016" PRIx64 "\n",
936                        i, callchain->ips[i]);
937 }
938
939 static void branch_stack__printf(struct perf_sample *sample)
940 {
941         uint64_t i;
942
943         printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
944
945         for (i = 0; i < sample->branch_stack->nr; i++) {
946                 struct branch_entry *e = &sample->branch_stack->entries[i];
947
948                 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
949                         i, e->from, e->to,
950                         (unsigned short)e->flags.cycles,
951                         e->flags.mispred ? "M" : " ",
952                         e->flags.predicted ? "P" : " ",
953                         e->flags.abort ? "A" : " ",
954                         e->flags.in_tx ? "T" : " ",
955                         (unsigned)e->flags.reserved);
956         }
957 }
958
959 static void regs_dump__printf(u64 mask, u64 *regs)
960 {
961         unsigned rid, i = 0;
962
963         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
964                 u64 val = regs[i++];
965
966                 printf(".... %-5s 0x%" PRIx64 "\n",
967                        perf_reg_name(rid), val);
968         }
969 }
970
971 static const char *regs_abi[] = {
972         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
973         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
974         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
975 };
976
977 static inline const char *regs_dump_abi(struct regs_dump *d)
978 {
979         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
980                 return "unknown";
981
982         return regs_abi[d->abi];
983 }
984
985 static void regs__printf(const char *type, struct regs_dump *regs)
986 {
987         u64 mask = regs->mask;
988
989         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
990                type,
991                mask,
992                regs_dump_abi(regs));
993
994         regs_dump__printf(mask, regs->regs);
995 }
996
997 static void regs_user__printf(struct perf_sample *sample)
998 {
999         struct regs_dump *user_regs = &sample->user_regs;
1000
1001         if (user_regs->regs)
1002                 regs__printf("user", user_regs);
1003 }
1004
1005 static void regs_intr__printf(struct perf_sample *sample)
1006 {
1007         struct regs_dump *intr_regs = &sample->intr_regs;
1008
1009         if (intr_regs->regs)
1010                 regs__printf("intr", intr_regs);
1011 }
1012
1013 static void stack_user__printf(struct stack_dump *dump)
1014 {
1015         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1016                dump->size, dump->offset);
1017 }
1018
1019 static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1020                                        union perf_event *event,
1021                                        struct perf_sample *sample)
1022 {
1023         u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1024
1025         if (event->header.type != PERF_RECORD_SAMPLE &&
1026             !perf_evlist__sample_id_all(evlist)) {
1027                 fputs("-1 -1 ", stdout);
1028                 return;
1029         }
1030
1031         if ((sample_type & PERF_SAMPLE_CPU))
1032                 printf("%u ", sample->cpu);
1033
1034         if (sample_type & PERF_SAMPLE_TIME)
1035                 printf("%" PRIu64 " ", sample->time);
1036 }
1037
1038 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1039 {
1040         printf("... sample_read:\n");
1041
1042         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1043                 printf("...... time enabled %016" PRIx64 "\n",
1044                        sample->read.time_enabled);
1045
1046         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1047                 printf("...... time running %016" PRIx64 "\n",
1048                        sample->read.time_running);
1049
1050         if (read_format & PERF_FORMAT_GROUP) {
1051                 u64 i;
1052
1053                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1054
1055                 for (i = 0; i < sample->read.group.nr; i++) {
1056                         struct sample_read_value *value;
1057
1058                         value = &sample->read.group.values[i];
1059                         printf("..... id %016" PRIx64
1060                                ", value %016" PRIx64 "\n",
1061                                value->id, value->value);
1062                 }
1063         } else
1064                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1065                         sample->read.one.id, sample->read.one.value);
1066 }
1067
1068 static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1069                        u64 file_offset, struct perf_sample *sample)
1070 {
1071         if (!dump_trace)
1072                 return;
1073
1074         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1075                file_offset, event->header.size, event->header.type);
1076
1077         trace_event(event);
1078
1079         if (sample)
1080                 perf_evlist__print_tstamp(evlist, event, sample);
1081
1082         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1083                event->header.size, perf_event__name(event->header.type));
1084 }
1085
1086 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1087                         struct perf_sample *sample)
1088 {
1089         u64 sample_type;
1090
1091         if (!dump_trace)
1092                 return;
1093
1094         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1095                event->header.misc, sample->pid, sample->tid, sample->ip,
1096                sample->period, sample->addr);
1097
1098         sample_type = evsel->attr.sample_type;
1099
1100         if (sample_type & PERF_SAMPLE_CALLCHAIN)
1101                 callchain__printf(evsel, sample);
1102
1103         if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1104                 branch_stack__printf(sample);
1105
1106         if (sample_type & PERF_SAMPLE_REGS_USER)
1107                 regs_user__printf(sample);
1108
1109         if (sample_type & PERF_SAMPLE_REGS_INTR)
1110                 regs_intr__printf(sample);
1111
1112         if (sample_type & PERF_SAMPLE_STACK_USER)
1113                 stack_user__printf(&sample->user_stack);
1114
1115         if (sample_type & PERF_SAMPLE_WEIGHT)
1116                 printf("... weight: %" PRIu64 "\n", sample->weight);
1117
1118         if (sample_type & PERF_SAMPLE_DATA_SRC)
1119                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1120
1121         if (sample_type & PERF_SAMPLE_TRANSACTION)
1122                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1123
1124         if (sample_type & PERF_SAMPLE_READ)
1125                 sample_read__printf(sample, evsel->attr.read_format);
1126 }
1127
1128 static struct machine *machines__find_for_cpumode(struct machines *machines,
1129                                                union perf_event *event,
1130                                                struct perf_sample *sample)
1131 {
1132         struct machine *machine;
1133
1134         if (perf_guest &&
1135             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1136              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1137                 u32 pid;
1138
1139                 if (event->header.type == PERF_RECORD_MMAP
1140                     || event->header.type == PERF_RECORD_MMAP2)
1141                         pid = event->mmap.pid;
1142                 else
1143                         pid = sample->pid;
1144
1145                 machine = machines__find(machines, pid);
1146                 if (!machine)
1147                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1148                 return machine;
1149         }
1150
1151         return &machines->host;
1152 }
1153
1154 static int deliver_sample_value(struct perf_evlist *evlist,
1155                                 struct perf_tool *tool,
1156                                 union perf_event *event,
1157                                 struct perf_sample *sample,
1158                                 struct sample_read_value *v,
1159                                 struct machine *machine)
1160 {
1161         struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1162
1163         if (sid) {
1164                 sample->id     = v->id;
1165                 sample->period = v->value - sid->period;
1166                 sid->period    = v->value;
1167         }
1168
1169         if (!sid || sid->evsel == NULL) {
1170                 ++evlist->stats.nr_unknown_id;
1171                 return 0;
1172         }
1173
1174         return tool->sample(tool, event, sample, sid->evsel, machine);
1175 }
1176
1177 static int deliver_sample_group(struct perf_evlist *evlist,
1178                                 struct perf_tool *tool,
1179                                 union  perf_event *event,
1180                                 struct perf_sample *sample,
1181                                 struct machine *machine)
1182 {
1183         int ret = -EINVAL;
1184         u64 i;
1185
1186         for (i = 0; i < sample->read.group.nr; i++) {
1187                 ret = deliver_sample_value(evlist, tool, event, sample,
1188                                            &sample->read.group.values[i],
1189                                            machine);
1190                 if (ret)
1191                         break;
1192         }
1193
1194         return ret;
1195 }
1196
1197 static int
1198  perf_evlist__deliver_sample(struct perf_evlist *evlist,
1199                              struct perf_tool *tool,
1200                              union  perf_event *event,
1201                              struct perf_sample *sample,
1202                              struct perf_evsel *evsel,
1203                              struct machine *machine)
1204 {
1205         /* We know evsel != NULL. */
1206         u64 sample_type = evsel->attr.sample_type;
1207         u64 read_format = evsel->attr.read_format;
1208
1209         /* Standard sample delivery. */
1210         if (!(sample_type & PERF_SAMPLE_READ))
1211                 return tool->sample(tool, event, sample, evsel, machine);
1212
1213         /* For PERF_SAMPLE_READ we have either single or group mode. */
1214         if (read_format & PERF_FORMAT_GROUP)
1215                 return deliver_sample_group(evlist, tool, event, sample,
1216                                             machine);
1217         else
1218                 return deliver_sample_value(evlist, tool, event, sample,
1219                                             &sample->read.one, machine);
1220 }
1221
1222 static int machines__deliver_event(struct machines *machines,
1223                                    struct perf_evlist *evlist,
1224                                    union perf_event *event,
1225                                    struct perf_sample *sample,
1226                                    struct perf_tool *tool, u64 file_offset)
1227 {
1228         struct perf_evsel *evsel;
1229         struct machine *machine;
1230
1231         dump_event(evlist, event, file_offset, sample);
1232
1233         evsel = perf_evlist__id2evsel(evlist, sample->id);
1234
1235         machine = machines__find_for_cpumode(machines, event, sample);
1236
1237         switch (event->header.type) {
1238         case PERF_RECORD_SAMPLE:
1239                 if (evsel == NULL) {
1240                         ++evlist->stats.nr_unknown_id;
1241                         return 0;
1242                 }
1243                 dump_sample(evsel, event, sample);
1244                 if (machine == NULL) {
1245                         ++evlist->stats.nr_unprocessable_samples;
1246                         return 0;
1247                 }
1248                 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1249         case PERF_RECORD_MMAP:
1250                 return tool->mmap(tool, event, sample, machine);
1251         case PERF_RECORD_MMAP2:
1252                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1253                         ++evlist->stats.nr_proc_map_timeout;
1254                 return tool->mmap2(tool, event, sample, machine);
1255         case PERF_RECORD_COMM:
1256                 return tool->comm(tool, event, sample, machine);
1257         case PERF_RECORD_NAMESPACES:
1258                 return tool->namespaces(tool, event, sample, machine);
1259         case PERF_RECORD_FORK:
1260                 return tool->fork(tool, event, sample, machine);
1261         case PERF_RECORD_EXIT:
1262                 return tool->exit(tool, event, sample, machine);
1263         case PERF_RECORD_LOST:
1264                 if (tool->lost == perf_event__process_lost)
1265                         evlist->stats.total_lost += event->lost.lost;
1266                 return tool->lost(tool, event, sample, machine);
1267         case PERF_RECORD_LOST_SAMPLES:
1268                 if (tool->lost_samples == perf_event__process_lost_samples)
1269                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1270                 return tool->lost_samples(tool, event, sample, machine);
1271         case PERF_RECORD_READ:
1272                 return tool->read(tool, event, sample, evsel, machine);
1273         case PERF_RECORD_THROTTLE:
1274                 return tool->throttle(tool, event, sample, machine);
1275         case PERF_RECORD_UNTHROTTLE:
1276                 return tool->unthrottle(tool, event, sample, machine);
1277         case PERF_RECORD_AUX:
1278                 if (tool->aux == perf_event__process_aux) {
1279                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1280                                 evlist->stats.total_aux_lost += 1;
1281                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1282                                 evlist->stats.total_aux_partial += 1;
1283                 }
1284                 return tool->aux(tool, event, sample, machine);
1285         case PERF_RECORD_ITRACE_START:
1286                 return tool->itrace_start(tool, event, sample, machine);
1287         case PERF_RECORD_SWITCH:
1288         case PERF_RECORD_SWITCH_CPU_WIDE:
1289                 return tool->context_switch(tool, event, sample, machine);
1290         default:
1291                 ++evlist->stats.nr_unknown_events;
1292                 return -1;
1293         }
1294 }
1295
1296 static int perf_session__deliver_event(struct perf_session *session,
1297                                        union perf_event *event,
1298                                        struct perf_sample *sample,
1299                                        struct perf_tool *tool,
1300                                        u64 file_offset)
1301 {
1302         int ret;
1303
1304         ret = auxtrace__process_event(session, event, sample, tool);
1305         if (ret < 0)
1306                 return ret;
1307         if (ret > 0)
1308                 return 0;
1309
1310         return machines__deliver_event(&session->machines, session->evlist,
1311                                        event, sample, tool, file_offset);
1312 }
1313
1314 static s64 perf_session__process_user_event(struct perf_session *session,
1315                                             union perf_event *event,
1316                                             u64 file_offset)
1317 {
1318         struct ordered_events *oe = &session->ordered_events;
1319         struct perf_tool *tool = session->tool;
1320         int fd = perf_data_file__fd(session->file);
1321         int err;
1322
1323         dump_event(session->evlist, event, file_offset, NULL);
1324
1325         /* These events are processed right away */
1326         switch (event->header.type) {
1327         case PERF_RECORD_HEADER_ATTR:
1328                 err = tool->attr(tool, event, &session->evlist);
1329                 if (err == 0) {
1330                         perf_session__set_id_hdr_size(session);
1331                         perf_session__set_comm_exec(session);
1332                 }
1333                 return err;
1334         case PERF_RECORD_EVENT_UPDATE:
1335                 return tool->event_update(tool, event, &session->evlist);
1336         case PERF_RECORD_HEADER_EVENT_TYPE:
1337                 /*
1338                  * Depreceated, but we need to handle it for sake
1339                  * of old data files create in pipe mode.
1340                  */
1341                 return 0;
1342         case PERF_RECORD_HEADER_TRACING_DATA:
1343                 /* setup for reading amidst mmap */
1344                 lseek(fd, file_offset, SEEK_SET);
1345                 return tool->tracing_data(tool, event, session);
1346         case PERF_RECORD_HEADER_BUILD_ID:
1347                 return tool->build_id(tool, event, session);
1348         case PERF_RECORD_FINISHED_ROUND:
1349                 return tool->finished_round(tool, event, oe);
1350         case PERF_RECORD_ID_INDEX:
1351                 return tool->id_index(tool, event, session);
1352         case PERF_RECORD_AUXTRACE_INFO:
1353                 return tool->auxtrace_info(tool, event, session);
1354         case PERF_RECORD_AUXTRACE:
1355                 /* setup for reading amidst mmap */
1356                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1357                 return tool->auxtrace(tool, event, session);
1358         case PERF_RECORD_AUXTRACE_ERROR:
1359                 perf_session__auxtrace_error_inc(session, event);
1360                 return tool->auxtrace_error(tool, event, session);
1361         case PERF_RECORD_THREAD_MAP:
1362                 return tool->thread_map(tool, event, session);
1363         case PERF_RECORD_CPU_MAP:
1364                 return tool->cpu_map(tool, event, session);
1365         case PERF_RECORD_STAT_CONFIG:
1366                 return tool->stat_config(tool, event, session);
1367         case PERF_RECORD_STAT:
1368                 return tool->stat(tool, event, session);
1369         case PERF_RECORD_STAT_ROUND:
1370                 return tool->stat_round(tool, event, session);
1371         case PERF_RECORD_TIME_CONV:
1372                 session->time_conv = event->time_conv;
1373                 return tool->time_conv(tool, event, session);
1374         default:
1375                 return -EINVAL;
1376         }
1377 }
1378
1379 int perf_session__deliver_synth_event(struct perf_session *session,
1380                                       union perf_event *event,
1381                                       struct perf_sample *sample)
1382 {
1383         struct perf_evlist *evlist = session->evlist;
1384         struct perf_tool *tool = session->tool;
1385
1386         events_stats__inc(&evlist->stats, event->header.type);
1387
1388         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1389                 return perf_session__process_user_event(session, event, 0);
1390
1391         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1392 }
1393
1394 static void event_swap(union perf_event *event, bool sample_id_all)
1395 {
1396         perf_event__swap_op swap;
1397
1398         swap = perf_event__swap_ops[event->header.type];
1399         if (swap)
1400                 swap(event, sample_id_all);
1401 }
1402
1403 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1404                              void *buf, size_t buf_sz,
1405                              union perf_event **event_ptr,
1406                              struct perf_sample *sample)
1407 {
1408         union perf_event *event;
1409         size_t hdr_sz, rest;
1410         int fd;
1411
1412         if (session->one_mmap && !session->header.needs_swap) {
1413                 event = file_offset - session->one_mmap_offset +
1414                         session->one_mmap_addr;
1415                 goto out_parse_sample;
1416         }
1417
1418         if (perf_data_file__is_pipe(session->file))
1419                 return -1;
1420
1421         fd = perf_data_file__fd(session->file);
1422         hdr_sz = sizeof(struct perf_event_header);
1423
1424         if (buf_sz < hdr_sz)
1425                 return -1;
1426
1427         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1428             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1429                 return -1;
1430
1431         event = (union perf_event *)buf;
1432
1433         if (session->header.needs_swap)
1434                 perf_event_header__bswap(&event->header);
1435
1436         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1437                 return -1;
1438
1439         rest = event->header.size - hdr_sz;
1440
1441         if (readn(fd, buf, rest) != (ssize_t)rest)
1442                 return -1;
1443
1444         if (session->header.needs_swap)
1445                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1446
1447 out_parse_sample:
1448
1449         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1450             perf_evlist__parse_sample(session->evlist, event, sample))
1451                 return -1;
1452
1453         *event_ptr = event;
1454
1455         return 0;
1456 }
1457
1458 static s64 perf_session__process_event(struct perf_session *session,
1459                                        union perf_event *event, u64 file_offset)
1460 {
1461         struct perf_evlist *evlist = session->evlist;
1462         struct perf_tool *tool = session->tool;
1463         struct perf_sample sample;
1464         int ret;
1465
1466         if (session->header.needs_swap)
1467                 event_swap(event, perf_evlist__sample_id_all(evlist));
1468
1469         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1470                 return -EINVAL;
1471
1472         events_stats__inc(&evlist->stats, event->header.type);
1473
1474         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1475                 return perf_session__process_user_event(session, event, file_offset);
1476
1477         /*
1478          * For all kernel events we get the sample data
1479          */
1480         ret = perf_evlist__parse_sample(evlist, event, &sample);
1481         if (ret)
1482                 return ret;
1483
1484         if (tool->ordered_events) {
1485                 ret = perf_session__queue_event(session, event, &sample, file_offset);
1486                 if (ret != -ETIME)
1487                         return ret;
1488         }
1489
1490         return perf_session__deliver_event(session, event, &sample, tool,
1491                                            file_offset);
1492 }
1493
1494 void perf_event_header__bswap(struct perf_event_header *hdr)
1495 {
1496         hdr->type = bswap_32(hdr->type);
1497         hdr->misc = bswap_16(hdr->misc);
1498         hdr->size = bswap_16(hdr->size);
1499 }
1500
1501 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1502 {
1503         return machine__findnew_thread(&session->machines.host, -1, pid);
1504 }
1505
1506 int perf_session__register_idle_thread(struct perf_session *session)
1507 {
1508         struct thread *thread;
1509         int err = 0;
1510
1511         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1512         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1513                 pr_err("problem inserting idle task.\n");
1514                 err = -1;
1515         }
1516
1517         if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1518                 pr_err("problem inserting idle task.\n");
1519                 err = -1;
1520         }
1521
1522         /* machine__findnew_thread() got the thread, so put it */
1523         thread__put(thread);
1524         return err;
1525 }
1526
1527 static void
1528 perf_session__warn_order(const struct perf_session *session)
1529 {
1530         const struct ordered_events *oe = &session->ordered_events;
1531         struct perf_evsel *evsel;
1532         bool should_warn = true;
1533
1534         evlist__for_each_entry(session->evlist, evsel) {
1535                 if (evsel->attr.write_backward)
1536                         should_warn = false;
1537         }
1538
1539         if (!should_warn)
1540                 return;
1541         if (oe->nr_unordered_events != 0)
1542                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1543 }
1544
1545 static void perf_session__warn_about_errors(const struct perf_session *session)
1546 {
1547         const struct events_stats *stats = &session->evlist->stats;
1548
1549         if (session->tool->lost == perf_event__process_lost &&
1550             stats->nr_events[PERF_RECORD_LOST] != 0) {
1551                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1552                             "Check IO/CPU overload!\n\n",
1553                             stats->nr_events[0],
1554                             stats->nr_events[PERF_RECORD_LOST]);
1555         }
1556
1557         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1558                 double drop_rate;
1559
1560                 drop_rate = (double)stats->total_lost_samples /
1561                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1562                 if (drop_rate > 0.05) {
1563                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
1564                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1565                                     drop_rate * 100.0);
1566                 }
1567         }
1568
1569         if (session->tool->aux == perf_event__process_aux &&
1570             stats->total_aux_lost != 0) {
1571                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1572                             stats->total_aux_lost,
1573                             stats->nr_events[PERF_RECORD_AUX]);
1574         }
1575
1576         if (session->tool->aux == perf_event__process_aux &&
1577             stats->total_aux_partial != 0) {
1578                 bool vmm_exclusive = false;
1579
1580                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1581                                        &vmm_exclusive);
1582
1583                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1584                             "Are you running a KVM guest in the background?%s\n\n",
1585                             stats->total_aux_partial,
1586                             stats->nr_events[PERF_RECORD_AUX],
1587                             vmm_exclusive ?
1588                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1589                             "will reduce the gaps to only guest's timeslices." :
1590                             "");
1591         }
1592
1593         if (stats->nr_unknown_events != 0) {
1594                 ui__warning("Found %u unknown events!\n\n"
1595                             "Is this an older tool processing a perf.data "
1596                             "file generated by a more recent tool?\n\n"
1597                             "If that is not the case, consider "
1598                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1599                             stats->nr_unknown_events);
1600         }
1601
1602         if (stats->nr_unknown_id != 0) {
1603                 ui__warning("%u samples with id not present in the header\n",
1604                             stats->nr_unknown_id);
1605         }
1606
1607         if (stats->nr_invalid_chains != 0) {
1608                 ui__warning("Found invalid callchains!\n\n"
1609                             "%u out of %u events were discarded for this reason.\n\n"
1610                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1611                             stats->nr_invalid_chains,
1612                             stats->nr_events[PERF_RECORD_SAMPLE]);
1613         }
1614
1615         if (stats->nr_unprocessable_samples != 0) {
1616                 ui__warning("%u unprocessable samples recorded.\n"
1617                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1618                             stats->nr_unprocessable_samples);
1619         }
1620
1621         perf_session__warn_order(session);
1622
1623         events_stats__auxtrace_error_warn(stats);
1624
1625         if (stats->nr_proc_map_timeout != 0) {
1626                 ui__warning("%d map information files for pre-existing threads were\n"
1627                             "not processed, if there are samples for addresses they\n"
1628                             "will not be resolved, you may find out which are these\n"
1629                             "threads by running with -v and redirecting the output\n"
1630                             "to a file.\n"
1631                             "The time limit to process proc map is too short?\n"
1632                             "Increase it by --proc-map-timeout\n",
1633                             stats->nr_proc_map_timeout);
1634         }
1635 }
1636
1637 static int perf_session__flush_thread_stack(struct thread *thread,
1638                                             void *p __maybe_unused)
1639 {
1640         return thread_stack__flush(thread);
1641 }
1642
1643 static int perf_session__flush_thread_stacks(struct perf_session *session)
1644 {
1645         return machines__for_each_thread(&session->machines,
1646                                          perf_session__flush_thread_stack,
1647                                          NULL);
1648 }
1649
1650 volatile int session_done;
1651
1652 static int __perf_session__process_pipe_events(struct perf_session *session)
1653 {
1654         struct ordered_events *oe = &session->ordered_events;
1655         struct perf_tool *tool = session->tool;
1656         int fd = perf_data_file__fd(session->file);
1657         union perf_event *event;
1658         uint32_t size, cur_size = 0;
1659         void *buf = NULL;
1660         s64 skip = 0;
1661         u64 head;
1662         ssize_t err;
1663         void *p;
1664
1665         perf_tool__fill_defaults(tool);
1666
1667         head = 0;
1668         cur_size = sizeof(union perf_event);
1669
1670         buf = malloc(cur_size);
1671         if (!buf)
1672                 return -errno;
1673         ordered_events__set_copy_on_queue(oe, true);
1674 more:
1675         event = buf;
1676         err = readn(fd, event, sizeof(struct perf_event_header));
1677         if (err <= 0) {
1678                 if (err == 0)
1679                         goto done;
1680
1681                 pr_err("failed to read event header\n");
1682                 goto out_err;
1683         }
1684
1685         if (session->header.needs_swap)
1686                 perf_event_header__bswap(&event->header);
1687
1688         size = event->header.size;
1689         if (size < sizeof(struct perf_event_header)) {
1690                 pr_err("bad event header size\n");
1691                 goto out_err;
1692         }
1693
1694         if (size > cur_size) {
1695                 void *new = realloc(buf, size);
1696                 if (!new) {
1697                         pr_err("failed to allocate memory to read event\n");
1698                         goto out_err;
1699                 }
1700                 buf = new;
1701                 cur_size = size;
1702                 event = buf;
1703         }
1704         p = event;
1705         p += sizeof(struct perf_event_header);
1706
1707         if (size - sizeof(struct perf_event_header)) {
1708                 err = readn(fd, p, size - sizeof(struct perf_event_header));
1709                 if (err <= 0) {
1710                         if (err == 0) {
1711                                 pr_err("unexpected end of event stream\n");
1712                                 goto done;
1713                         }
1714
1715                         pr_err("failed to read event data\n");
1716                         goto out_err;
1717                 }
1718         }
1719
1720         if ((skip = perf_session__process_event(session, event, head)) < 0) {
1721                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1722                        head, event->header.size, event->header.type);
1723                 err = -EINVAL;
1724                 goto out_err;
1725         }
1726
1727         head += size;
1728
1729         if (skip > 0)
1730                 head += skip;
1731
1732         if (!session_done())
1733                 goto more;
1734 done:
1735         /* do the final flush for ordered samples */
1736         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1737         if (err)
1738                 goto out_err;
1739         err = auxtrace__flush_events(session, tool);
1740         if (err)
1741                 goto out_err;
1742         err = perf_session__flush_thread_stacks(session);
1743 out_err:
1744         free(buf);
1745         perf_session__warn_about_errors(session);
1746         ordered_events__free(&session->ordered_events);
1747         auxtrace__free_events(session);
1748         return err;
1749 }
1750
1751 static union perf_event *
1752 fetch_mmaped_event(struct perf_session *session,
1753                    u64 head, size_t mmap_size, char *buf)
1754 {
1755         union perf_event *event;
1756
1757         /*
1758          * Ensure we have enough space remaining to read
1759          * the size of the event in the headers.
1760          */
1761         if (head + sizeof(event->header) > mmap_size)
1762                 return NULL;
1763
1764         event = (union perf_event *)(buf + head);
1765
1766         if (session->header.needs_swap)
1767                 perf_event_header__bswap(&event->header);
1768
1769         if (head + event->header.size > mmap_size) {
1770                 /* We're not fetching the event so swap back again */
1771                 if (session->header.needs_swap)
1772                         perf_event_header__bswap(&event->header);
1773                 return NULL;
1774         }
1775
1776         return event;
1777 }
1778
1779 /*
1780  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1781  * slices. On 32bit we use 32MB.
1782  */
1783 #if BITS_PER_LONG == 64
1784 #define MMAP_SIZE ULLONG_MAX
1785 #define NUM_MMAPS 1
1786 #else
1787 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1788 #define NUM_MMAPS 128
1789 #endif
1790
1791 static int __perf_session__process_events(struct perf_session *session,
1792                                           u64 data_offset, u64 data_size,
1793                                           u64 file_size)
1794 {
1795         struct ordered_events *oe = &session->ordered_events;
1796         struct perf_tool *tool = session->tool;
1797         int fd = perf_data_file__fd(session->file);
1798         u64 head, page_offset, file_offset, file_pos, size;
1799         int err, mmap_prot, mmap_flags, map_idx = 0;
1800         size_t  mmap_size;
1801         char *buf, *mmaps[NUM_MMAPS];
1802         union perf_event *event;
1803         struct ui_progress prog;
1804         s64 skip;
1805
1806         perf_tool__fill_defaults(tool);
1807
1808         page_offset = page_size * (data_offset / page_size);
1809         file_offset = page_offset;
1810         head = data_offset - page_offset;
1811
1812         if (data_size == 0)
1813                 goto out;
1814
1815         if (data_offset + data_size < file_size)
1816                 file_size = data_offset + data_size;
1817
1818         ui_progress__init(&prog, file_size, "Processing events...");
1819
1820         mmap_size = MMAP_SIZE;
1821         if (mmap_size > file_size) {
1822                 mmap_size = file_size;
1823                 session->one_mmap = true;
1824         }
1825
1826         memset(mmaps, 0, sizeof(mmaps));
1827
1828         mmap_prot  = PROT_READ;
1829         mmap_flags = MAP_SHARED;
1830
1831         if (session->header.needs_swap) {
1832                 mmap_prot  |= PROT_WRITE;
1833                 mmap_flags = MAP_PRIVATE;
1834         }
1835 remap:
1836         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1837                    file_offset);
1838         if (buf == MAP_FAILED) {
1839                 pr_err("failed to mmap file\n");
1840                 err = -errno;
1841                 goto out_err;
1842         }
1843         mmaps[map_idx] = buf;
1844         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1845         file_pos = file_offset + head;
1846         if (session->one_mmap) {
1847                 session->one_mmap_addr = buf;
1848                 session->one_mmap_offset = file_offset;
1849         }
1850
1851 more:
1852         event = fetch_mmaped_event(session, head, mmap_size, buf);
1853         if (!event) {
1854                 if (mmaps[map_idx]) {
1855                         munmap(mmaps[map_idx], mmap_size);
1856                         mmaps[map_idx] = NULL;
1857                 }
1858
1859                 page_offset = page_size * (head / page_size);
1860                 file_offset += page_offset;
1861                 head -= page_offset;
1862                 goto remap;
1863         }
1864
1865         size = event->header.size;
1866
1867         if (size < sizeof(struct perf_event_header) ||
1868             (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1869                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1870                        file_offset + head, event->header.size,
1871                        event->header.type);
1872                 err = -EINVAL;
1873                 goto out_err;
1874         }
1875
1876         if (skip)
1877                 size += skip;
1878
1879         head += size;
1880         file_pos += size;
1881
1882         ui_progress__update(&prog, size);
1883
1884         if (session_done())
1885                 goto out;
1886
1887         if (file_pos < file_size)
1888                 goto more;
1889
1890 out:
1891         /* do the final flush for ordered samples */
1892         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1893         if (err)
1894                 goto out_err;
1895         err = auxtrace__flush_events(session, tool);
1896         if (err)
1897                 goto out_err;
1898         err = perf_session__flush_thread_stacks(session);
1899 out_err:
1900         ui_progress__finish();
1901         perf_session__warn_about_errors(session);
1902         /*
1903          * We may switching perf.data output, make ordered_events
1904          * reusable.
1905          */
1906         ordered_events__reinit(&session->ordered_events);
1907         auxtrace__free_events(session);
1908         session->one_mmap = false;
1909         return err;
1910 }
1911
1912 int perf_session__process_events(struct perf_session *session)
1913 {
1914         u64 size = perf_data_file__size(session->file);
1915         int err;
1916
1917         if (perf_session__register_idle_thread(session) < 0)
1918                 return -ENOMEM;
1919
1920         if (!perf_data_file__is_pipe(session->file))
1921                 err = __perf_session__process_events(session,
1922                                                      session->header.data_offset,
1923                                                      session->header.data_size, size);
1924         else
1925                 err = __perf_session__process_pipe_events(session);
1926
1927         return err;
1928 }
1929
1930 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1931 {
1932         struct perf_evsel *evsel;
1933
1934         evlist__for_each_entry(session->evlist, evsel) {
1935                 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1936                         return true;
1937         }
1938
1939         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1940         return false;
1941 }
1942
1943 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1944                                      const char *symbol_name, u64 addr)
1945 {
1946         char *bracket;
1947         int i;
1948         struct ref_reloc_sym *ref;
1949
1950         ref = zalloc(sizeof(struct ref_reloc_sym));
1951         if (ref == NULL)
1952                 return -ENOMEM;
1953
1954         ref->name = strdup(symbol_name);
1955         if (ref->name == NULL) {
1956                 free(ref);
1957                 return -ENOMEM;
1958         }
1959
1960         bracket = strchr(ref->name, ']');
1961         if (bracket)
1962                 *bracket = '\0';
1963
1964         ref->addr = addr;
1965
1966         for (i = 0; i < MAP__NR_TYPES; ++i) {
1967                 struct kmap *kmap = map__kmap(maps[i]);
1968
1969                 if (!kmap)
1970                         continue;
1971                 kmap->ref_reloc_sym = ref;
1972         }
1973
1974         return 0;
1975 }
1976
1977 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1978 {
1979         return machines__fprintf_dsos(&session->machines, fp);
1980 }
1981
1982 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1983                                           bool (skip)(struct dso *dso, int parm), int parm)
1984 {
1985         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1986 }
1987
1988 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1989 {
1990         size_t ret;
1991         const char *msg = "";
1992
1993         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
1994                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
1995
1996         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
1997
1998         ret += events_stats__fprintf(&session->evlist->stats, fp);
1999         return ret;
2000 }
2001
2002 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2003 {
2004         /*
2005          * FIXME: Here we have to actually print all the machines in this
2006          * session, not just the host...
2007          */
2008         return machine__fprintf(&session->machines.host, fp);
2009 }
2010
2011 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2012                                               unsigned int type)
2013 {
2014         struct perf_evsel *pos;
2015
2016         evlist__for_each_entry(session->evlist, pos) {
2017                 if (pos->attr.type == type)
2018                         return pos;
2019         }
2020         return NULL;
2021 }
2022
2023 int perf_session__cpu_bitmap(struct perf_session *session,
2024                              const char *cpu_list, unsigned long *cpu_bitmap)
2025 {
2026         int i, err = -1;
2027         struct cpu_map *map;
2028
2029         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2030                 struct perf_evsel *evsel;
2031
2032                 evsel = perf_session__find_first_evtype(session, i);
2033                 if (!evsel)
2034                         continue;
2035
2036                 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2037                         pr_err("File does not contain CPU events. "
2038                                "Remove -C option to proceed.\n");
2039                         return -1;
2040                 }
2041         }
2042
2043         map = cpu_map__new(cpu_list);
2044         if (map == NULL) {
2045                 pr_err("Invalid cpu_list\n");
2046                 return -1;
2047         }
2048
2049         for (i = 0; i < map->nr; i++) {
2050                 int cpu = map->map[i];
2051
2052                 if (cpu >= MAX_NR_CPUS) {
2053                         pr_err("Requested CPU %d too large. "
2054                                "Consider raising MAX_NR_CPUS\n", cpu);
2055                         goto out_delete_map;
2056                 }
2057
2058                 set_bit(cpu, cpu_bitmap);
2059         }
2060
2061         err = 0;
2062
2063 out_delete_map:
2064         cpu_map__put(map);
2065         return err;
2066 }
2067
2068 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2069                                 bool full)
2070 {
2071         if (session == NULL || fp == NULL)
2072                 return;
2073
2074         fprintf(fp, "# ========\n");
2075         perf_header__fprintf_info(session, fp, full);
2076         fprintf(fp, "# ========\n#\n");
2077 }
2078
2079
2080 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2081                                              const struct perf_evsel_str_handler *assocs,
2082                                              size_t nr_assocs)
2083 {
2084         struct perf_evsel *evsel;
2085         size_t i;
2086         int err;
2087
2088         for (i = 0; i < nr_assocs; i++) {
2089                 /*
2090                  * Adding a handler for an event not in the session,
2091                  * just ignore it.
2092                  */
2093                 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2094                 if (evsel == NULL)
2095                         continue;
2096
2097                 err = -EEXIST;
2098                 if (evsel->handler != NULL)
2099                         goto out;
2100                 evsel->handler = assocs[i].handler;
2101         }
2102
2103         err = 0;
2104 out:
2105         return err;
2106 }
2107
2108 int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
2109                                  union perf_event *event,
2110                                  struct perf_session *session)
2111 {
2112         struct perf_evlist *evlist = session->evlist;
2113         struct id_index_event *ie = &event->id_index;
2114         size_t i, nr, max_nr;
2115
2116         max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2117                  sizeof(struct id_index_entry);
2118         nr = ie->nr;
2119         if (nr > max_nr)
2120                 return -EINVAL;
2121
2122         if (dump_trace)
2123                 fprintf(stdout, " nr: %zu\n", nr);
2124
2125         for (i = 0; i < nr; i++) {
2126                 struct id_index_entry *e = &ie->entries[i];
2127                 struct perf_sample_id *sid;
2128
2129                 if (dump_trace) {
2130                         fprintf(stdout, " ... id: %"PRIu64, e->id);
2131                         fprintf(stdout, "  idx: %"PRIu64, e->idx);
2132                         fprintf(stdout, "  cpu: %"PRId64, e->cpu);
2133                         fprintf(stdout, "  tid: %"PRId64"\n", e->tid);
2134                 }
2135
2136                 sid = perf_evlist__id2sid(evlist, e->id);
2137                 if (!sid)
2138                         return -ENOENT;
2139                 sid->idx = e->idx;
2140                 sid->cpu = e->cpu;
2141                 sid->tid = e->tid;
2142         }
2143         return 0;
2144 }
2145
2146 int perf_event__synthesize_id_index(struct perf_tool *tool,
2147                                     perf_event__handler_t process,
2148                                     struct perf_evlist *evlist,
2149                                     struct machine *machine)
2150 {
2151         union perf_event *ev;
2152         struct perf_evsel *evsel;
2153         size_t nr = 0, i = 0, sz, max_nr, n;
2154         int err;
2155
2156         pr_debug2("Synthesizing id index\n");
2157
2158         max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2159                  sizeof(struct id_index_entry);
2160
2161         evlist__for_each_entry(evlist, evsel)
2162                 nr += evsel->ids;
2163
2164         n = nr > max_nr ? max_nr : nr;
2165         sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2166         ev = zalloc(sz);
2167         if (!ev)
2168                 return -ENOMEM;
2169
2170         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2171         ev->id_index.header.size = sz;
2172         ev->id_index.nr = n;
2173
2174         evlist__for_each_entry(evlist, evsel) {
2175                 u32 j;
2176
2177                 for (j = 0; j < evsel->ids; j++) {
2178                         struct id_index_entry *e;
2179                         struct perf_sample_id *sid;
2180
2181                         if (i >= n) {
2182                                 err = process(tool, ev, NULL, machine);
2183                                 if (err)
2184                                         goto out_err;
2185                                 nr -= n;
2186                                 i = 0;
2187                         }
2188
2189                         e = &ev->id_index.entries[i++];
2190
2191                         e->id = evsel->id[j];
2192
2193                         sid = perf_evlist__id2sid(evlist, e->id);
2194                         if (!sid) {
2195                                 free(ev);
2196                                 return -ENOENT;
2197                         }
2198
2199                         e->idx = sid->idx;
2200                         e->cpu = sid->cpu;
2201                         e->tid = sid->tid;
2202                 }
2203         }
2204
2205         sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2206         ev->id_index.header.size = sz;
2207         ev->id_index.nr = nr;
2208
2209         err = process(tool, ev, NULL, machine);
2210 out_err:
2211         free(ev);
2212
2213         return err;
2214 }