]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/session.c
Merge tag 'iommu-updates-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / tools / perf / util / session.c
1 #include <errno.h>
2 #include <inttypes.h>
3 #include <linux/kernel.h>
4 #include <traceevent/event-parse.h>
5 #include <api/fs/fs.h>
6
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <sys/types.h>
10 #include <sys/mman.h>
11
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "session.h"
15 #include "tool.h"
16 #include "sort.h"
17 #include "util.h"
18 #include "cpumap.h"
19 #include "perf_regs.h"
20 #include "asm/bug.h"
21 #include "auxtrace.h"
22 #include "thread.h"
23 #include "thread-stack.h"
24 #include "stat.h"
25
26 static int perf_session__deliver_event(struct perf_session *session,
27                                        union perf_event *event,
28                                        struct perf_sample *sample,
29                                        struct perf_tool *tool,
30                                        u64 file_offset);
31
32 static int perf_session__open(struct perf_session *session)
33 {
34         struct perf_data_file *file = session->file;
35
36         if (perf_session__read_header(session) < 0) {
37                 pr_err("incompatible file format (rerun with -v to learn more)\n");
38                 return -1;
39         }
40
41         if (perf_data_file__is_pipe(file))
42                 return 0;
43
44         if (perf_header__has_feat(&session->header, HEADER_STAT))
45                 return 0;
46
47         if (!perf_evlist__valid_sample_type(session->evlist)) {
48                 pr_err("non matching sample_type\n");
49                 return -1;
50         }
51
52         if (!perf_evlist__valid_sample_id_all(session->evlist)) {
53                 pr_err("non matching sample_id_all\n");
54                 return -1;
55         }
56
57         if (!perf_evlist__valid_read_format(session->evlist)) {
58                 pr_err("non matching read_format\n");
59                 return -1;
60         }
61
62         return 0;
63 }
64
65 void perf_session__set_id_hdr_size(struct perf_session *session)
66 {
67         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
68
69         machines__set_id_hdr_size(&session->machines, id_hdr_size);
70 }
71
72 int perf_session__create_kernel_maps(struct perf_session *session)
73 {
74         int ret = machine__create_kernel_maps(&session->machines.host);
75
76         if (ret >= 0)
77                 ret = machines__create_guest_kernel_maps(&session->machines);
78         return ret;
79 }
80
81 static void perf_session__destroy_kernel_maps(struct perf_session *session)
82 {
83         machines__destroy_kernel_maps(&session->machines);
84 }
85
86 static bool perf_session__has_comm_exec(struct perf_session *session)
87 {
88         struct perf_evsel *evsel;
89
90         evlist__for_each_entry(session->evlist, evsel) {
91                 if (evsel->attr.comm_exec)
92                         return true;
93         }
94
95         return false;
96 }
97
98 static void perf_session__set_comm_exec(struct perf_session *session)
99 {
100         bool comm_exec = perf_session__has_comm_exec(session);
101
102         machines__set_comm_exec(&session->machines, comm_exec);
103 }
104
105 static int ordered_events__deliver_event(struct ordered_events *oe,
106                                          struct ordered_event *event)
107 {
108         struct perf_sample sample;
109         struct perf_session *session = container_of(oe, struct perf_session,
110                                                     ordered_events);
111         int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
112
113         if (ret) {
114                 pr_err("Can't parse sample, err = %d\n", ret);
115                 return ret;
116         }
117
118         return perf_session__deliver_event(session, event->event, &sample,
119                                            session->tool, event->file_offset);
120 }
121
122 struct perf_session *perf_session__new(struct perf_data_file *file,
123                                        bool repipe, struct perf_tool *tool)
124 {
125         struct perf_session *session = zalloc(sizeof(*session));
126
127         if (!session)
128                 goto out;
129
130         session->repipe = repipe;
131         session->tool   = tool;
132         INIT_LIST_HEAD(&session->auxtrace_index);
133         machines__init(&session->machines);
134         ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
135
136         if (file) {
137                 if (perf_data_file__open(file))
138                         goto out_delete;
139
140                 session->file = file;
141
142                 if (perf_data_file__is_read(file)) {
143                         if (perf_session__open(session) < 0)
144                                 goto out_close;
145
146                         /*
147                          * set session attributes that are present in perf.data
148                          * but not in pipe-mode.
149                          */
150                         if (!file->is_pipe) {
151                                 perf_session__set_id_hdr_size(session);
152                                 perf_session__set_comm_exec(session);
153                         }
154                 }
155         } else  {
156                 session->machines.host.env = &perf_env;
157         }
158
159         if (!file || perf_data_file__is_write(file)) {
160                 /*
161                  * In O_RDONLY mode this will be performed when reading the
162                  * kernel MMAP event, in perf_event__process_mmap().
163                  */
164                 if (perf_session__create_kernel_maps(session) < 0)
165                         pr_warning("Cannot read kernel map\n");
166         }
167
168         /*
169          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
170          * processed, so perf_evlist__sample_id_all is not meaningful here.
171          */
172         if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps &&
173             tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
174                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
175                 tool->ordered_events = false;
176         }
177
178         return session;
179
180  out_close:
181         perf_data_file__close(file);
182  out_delete:
183         perf_session__delete(session);
184  out:
185         return NULL;
186 }
187
188 static void perf_session__delete_threads(struct perf_session *session)
189 {
190         machine__delete_threads(&session->machines.host);
191 }
192
193 void perf_session__delete(struct perf_session *session)
194 {
195         if (session == NULL)
196                 return;
197         auxtrace__free(session);
198         auxtrace_index__free(&session->auxtrace_index);
199         perf_session__destroy_kernel_maps(session);
200         perf_session__delete_threads(session);
201         perf_env__exit(&session->header.env);
202         machines__exit(&session->machines);
203         if (session->file)
204                 perf_data_file__close(session->file);
205         free(session);
206 }
207
208 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
209                                                  __maybe_unused,
210                                                  union perf_event *event
211                                                  __maybe_unused,
212                                                  struct perf_session *session
213                                                 __maybe_unused)
214 {
215         dump_printf(": unhandled!\n");
216         return 0;
217 }
218
219 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
220                                          union perf_event *event __maybe_unused,
221                                          struct perf_evlist **pevlist
222                                          __maybe_unused)
223 {
224         dump_printf(": unhandled!\n");
225         return 0;
226 }
227
228 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
229                                                  union perf_event *event __maybe_unused,
230                                                  struct perf_evlist **pevlist
231                                                  __maybe_unused)
232 {
233         if (dump_trace)
234                 perf_event__fprintf_event_update(event, stdout);
235
236         dump_printf(": unhandled!\n");
237         return 0;
238 }
239
240 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
241                                      union perf_event *event __maybe_unused,
242                                      struct perf_sample *sample __maybe_unused,
243                                      struct perf_evsel *evsel __maybe_unused,
244                                      struct machine *machine __maybe_unused)
245 {
246         dump_printf(": unhandled!\n");
247         return 0;
248 }
249
250 static int process_event_stub(struct perf_tool *tool __maybe_unused,
251                               union perf_event *event __maybe_unused,
252                               struct perf_sample *sample __maybe_unused,
253                               struct machine *machine __maybe_unused)
254 {
255         dump_printf(": unhandled!\n");
256         return 0;
257 }
258
259 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
260                                        union perf_event *event __maybe_unused,
261                                        struct ordered_events *oe __maybe_unused)
262 {
263         dump_printf(": unhandled!\n");
264         return 0;
265 }
266
267 static int process_finished_round(struct perf_tool *tool,
268                                   union perf_event *event,
269                                   struct ordered_events *oe);
270
271 static int skipn(int fd, off_t n)
272 {
273         char buf[4096];
274         ssize_t ret;
275
276         while (n > 0) {
277                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
278                 if (ret <= 0)
279                         return ret;
280                 n -= ret;
281         }
282
283         return 0;
284 }
285
286 static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
287                                        union perf_event *event,
288                                        struct perf_session *session
289                                        __maybe_unused)
290 {
291         dump_printf(": unhandled!\n");
292         if (perf_data_file__is_pipe(session->file))
293                 skipn(perf_data_file__fd(session->file), event->auxtrace.size);
294         return event->auxtrace.size;
295 }
296
297 static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
298                                   union perf_event *event __maybe_unused,
299                                   struct perf_session *session __maybe_unused)
300 {
301         dump_printf(": unhandled!\n");
302         return 0;
303 }
304
305
306 static
307 int process_event_thread_map_stub(struct perf_tool *tool __maybe_unused,
308                                   union perf_event *event __maybe_unused,
309                                   struct perf_session *session __maybe_unused)
310 {
311         if (dump_trace)
312                 perf_event__fprintf_thread_map(event, stdout);
313
314         dump_printf(": unhandled!\n");
315         return 0;
316 }
317
318 static
319 int process_event_cpu_map_stub(struct perf_tool *tool __maybe_unused,
320                                union perf_event *event __maybe_unused,
321                                struct perf_session *session __maybe_unused)
322 {
323         if (dump_trace)
324                 perf_event__fprintf_cpu_map(event, stdout);
325
326         dump_printf(": unhandled!\n");
327         return 0;
328 }
329
330 static
331 int process_event_stat_config_stub(struct perf_tool *tool __maybe_unused,
332                                    union perf_event *event __maybe_unused,
333                                    struct perf_session *session __maybe_unused)
334 {
335         if (dump_trace)
336                 perf_event__fprintf_stat_config(event, stdout);
337
338         dump_printf(": unhandled!\n");
339         return 0;
340 }
341
342 static int process_stat_stub(struct perf_tool *tool __maybe_unused,
343                              union perf_event *event __maybe_unused,
344                              struct perf_session *perf_session
345                              __maybe_unused)
346 {
347         if (dump_trace)
348                 perf_event__fprintf_stat(event, stdout);
349
350         dump_printf(": unhandled!\n");
351         return 0;
352 }
353
354 static int process_stat_round_stub(struct perf_tool *tool __maybe_unused,
355                                    union perf_event *event __maybe_unused,
356                                    struct perf_session *perf_session
357                                    __maybe_unused)
358 {
359         if (dump_trace)
360                 perf_event__fprintf_stat_round(event, stdout);
361
362         dump_printf(": unhandled!\n");
363         return 0;
364 }
365
366 void perf_tool__fill_defaults(struct perf_tool *tool)
367 {
368         if (tool->sample == NULL)
369                 tool->sample = process_event_sample_stub;
370         if (tool->mmap == NULL)
371                 tool->mmap = process_event_stub;
372         if (tool->mmap2 == NULL)
373                 tool->mmap2 = process_event_stub;
374         if (tool->comm == NULL)
375                 tool->comm = process_event_stub;
376         if (tool->fork == NULL)
377                 tool->fork = process_event_stub;
378         if (tool->exit == NULL)
379                 tool->exit = process_event_stub;
380         if (tool->lost == NULL)
381                 tool->lost = perf_event__process_lost;
382         if (tool->lost_samples == NULL)
383                 tool->lost_samples = perf_event__process_lost_samples;
384         if (tool->aux == NULL)
385                 tool->aux = perf_event__process_aux;
386         if (tool->itrace_start == NULL)
387                 tool->itrace_start = perf_event__process_itrace_start;
388         if (tool->context_switch == NULL)
389                 tool->context_switch = perf_event__process_switch;
390         if (tool->read == NULL)
391                 tool->read = process_event_sample_stub;
392         if (tool->throttle == NULL)
393                 tool->throttle = process_event_stub;
394         if (tool->unthrottle == NULL)
395                 tool->unthrottle = process_event_stub;
396         if (tool->attr == NULL)
397                 tool->attr = process_event_synth_attr_stub;
398         if (tool->event_update == NULL)
399                 tool->event_update = process_event_synth_event_update_stub;
400         if (tool->tracing_data == NULL)
401                 tool->tracing_data = process_event_synth_tracing_data_stub;
402         if (tool->build_id == NULL)
403                 tool->build_id = process_event_op2_stub;
404         if (tool->finished_round == NULL) {
405                 if (tool->ordered_events)
406                         tool->finished_round = process_finished_round;
407                 else
408                         tool->finished_round = process_finished_round_stub;
409         }
410         if (tool->id_index == NULL)
411                 tool->id_index = process_event_op2_stub;
412         if (tool->auxtrace_info == NULL)
413                 tool->auxtrace_info = process_event_op2_stub;
414         if (tool->auxtrace == NULL)
415                 tool->auxtrace = process_event_auxtrace_stub;
416         if (tool->auxtrace_error == NULL)
417                 tool->auxtrace_error = process_event_op2_stub;
418         if (tool->thread_map == NULL)
419                 tool->thread_map = process_event_thread_map_stub;
420         if (tool->cpu_map == NULL)
421                 tool->cpu_map = process_event_cpu_map_stub;
422         if (tool->stat_config == NULL)
423                 tool->stat_config = process_event_stat_config_stub;
424         if (tool->stat == NULL)
425                 tool->stat = process_stat_stub;
426         if (tool->stat_round == NULL)
427                 tool->stat_round = process_stat_round_stub;
428         if (tool->time_conv == NULL)
429                 tool->time_conv = process_event_op2_stub;
430 }
431
432 static void swap_sample_id_all(union perf_event *event, void *data)
433 {
434         void *end = (void *) event + event->header.size;
435         int size = end - data;
436
437         BUG_ON(size % sizeof(u64));
438         mem_bswap_64(data, size);
439 }
440
441 static void perf_event__all64_swap(union perf_event *event,
442                                    bool sample_id_all __maybe_unused)
443 {
444         struct perf_event_header *hdr = &event->header;
445         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
446 }
447
448 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
449 {
450         event->comm.pid = bswap_32(event->comm.pid);
451         event->comm.tid = bswap_32(event->comm.tid);
452
453         if (sample_id_all) {
454                 void *data = &event->comm.comm;
455
456                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
457                 swap_sample_id_all(event, data);
458         }
459 }
460
461 static void perf_event__mmap_swap(union perf_event *event,
462                                   bool sample_id_all)
463 {
464         event->mmap.pid   = bswap_32(event->mmap.pid);
465         event->mmap.tid   = bswap_32(event->mmap.tid);
466         event->mmap.start = bswap_64(event->mmap.start);
467         event->mmap.len   = bswap_64(event->mmap.len);
468         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
469
470         if (sample_id_all) {
471                 void *data = &event->mmap.filename;
472
473                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
474                 swap_sample_id_all(event, data);
475         }
476 }
477
478 static void perf_event__mmap2_swap(union perf_event *event,
479                                   bool sample_id_all)
480 {
481         event->mmap2.pid   = bswap_32(event->mmap2.pid);
482         event->mmap2.tid   = bswap_32(event->mmap2.tid);
483         event->mmap2.start = bswap_64(event->mmap2.start);
484         event->mmap2.len   = bswap_64(event->mmap2.len);
485         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
486         event->mmap2.maj   = bswap_32(event->mmap2.maj);
487         event->mmap2.min   = bswap_32(event->mmap2.min);
488         event->mmap2.ino   = bswap_64(event->mmap2.ino);
489
490         if (sample_id_all) {
491                 void *data = &event->mmap2.filename;
492
493                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
494                 swap_sample_id_all(event, data);
495         }
496 }
497 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
498 {
499         event->fork.pid  = bswap_32(event->fork.pid);
500         event->fork.tid  = bswap_32(event->fork.tid);
501         event->fork.ppid = bswap_32(event->fork.ppid);
502         event->fork.ptid = bswap_32(event->fork.ptid);
503         event->fork.time = bswap_64(event->fork.time);
504
505         if (sample_id_all)
506                 swap_sample_id_all(event, &event->fork + 1);
507 }
508
509 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
510 {
511         event->read.pid          = bswap_32(event->read.pid);
512         event->read.tid          = bswap_32(event->read.tid);
513         event->read.value        = bswap_64(event->read.value);
514         event->read.time_enabled = bswap_64(event->read.time_enabled);
515         event->read.time_running = bswap_64(event->read.time_running);
516         event->read.id           = bswap_64(event->read.id);
517
518         if (sample_id_all)
519                 swap_sample_id_all(event, &event->read + 1);
520 }
521
522 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
523 {
524         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
525         event->aux.aux_size   = bswap_64(event->aux.aux_size);
526         event->aux.flags      = bswap_64(event->aux.flags);
527
528         if (sample_id_all)
529                 swap_sample_id_all(event, &event->aux + 1);
530 }
531
532 static void perf_event__itrace_start_swap(union perf_event *event,
533                                           bool sample_id_all)
534 {
535         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
536         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
537
538         if (sample_id_all)
539                 swap_sample_id_all(event, &event->itrace_start + 1);
540 }
541
542 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
543 {
544         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
545                 event->context_switch.next_prev_pid =
546                                 bswap_32(event->context_switch.next_prev_pid);
547                 event->context_switch.next_prev_tid =
548                                 bswap_32(event->context_switch.next_prev_tid);
549         }
550
551         if (sample_id_all)
552                 swap_sample_id_all(event, &event->context_switch + 1);
553 }
554
555 static void perf_event__throttle_swap(union perf_event *event,
556                                       bool sample_id_all)
557 {
558         event->throttle.time      = bswap_64(event->throttle.time);
559         event->throttle.id        = bswap_64(event->throttle.id);
560         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
561
562         if (sample_id_all)
563                 swap_sample_id_all(event, &event->throttle + 1);
564 }
565
566 static u8 revbyte(u8 b)
567 {
568         int rev = (b >> 4) | ((b & 0xf) << 4);
569         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
570         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
571         return (u8) rev;
572 }
573
574 /*
575  * XXX this is hack in attempt to carry flags bitfield
576  * through endian village. ABI says:
577  *
578  * Bit-fields are allocated from right to left (least to most significant)
579  * on little-endian implementations and from left to right (most to least
580  * significant) on big-endian implementations.
581  *
582  * The above seems to be byte specific, so we need to reverse each
583  * byte of the bitfield. 'Internet' also says this might be implementation
584  * specific and we probably need proper fix and carry perf_event_attr
585  * bitfield flags in separate data file FEAT_ section. Thought this seems
586  * to work for now.
587  */
588 static void swap_bitfield(u8 *p, unsigned len)
589 {
590         unsigned i;
591
592         for (i = 0; i < len; i++) {
593                 *p = revbyte(*p);
594                 p++;
595         }
596 }
597
598 /* exported for swapping attributes in file header */
599 void perf_event__attr_swap(struct perf_event_attr *attr)
600 {
601         attr->type              = bswap_32(attr->type);
602         attr->size              = bswap_32(attr->size);
603
604 #define bswap_safe(f, n)                                        \
605         (attr->size > (offsetof(struct perf_event_attr, f) +    \
606                        sizeof(attr->f) * (n)))
607 #define bswap_field(f, sz)                      \
608 do {                                            \
609         if (bswap_safe(f, 0))                   \
610                 attr->f = bswap_##sz(attr->f);  \
611 } while(0)
612 #define bswap_field_16(f) bswap_field(f, 16)
613 #define bswap_field_32(f) bswap_field(f, 32)
614 #define bswap_field_64(f) bswap_field(f, 64)
615
616         bswap_field_64(config);
617         bswap_field_64(sample_period);
618         bswap_field_64(sample_type);
619         bswap_field_64(read_format);
620         bswap_field_32(wakeup_events);
621         bswap_field_32(bp_type);
622         bswap_field_64(bp_addr);
623         bswap_field_64(bp_len);
624         bswap_field_64(branch_sample_type);
625         bswap_field_64(sample_regs_user);
626         bswap_field_32(sample_stack_user);
627         bswap_field_32(aux_watermark);
628         bswap_field_16(sample_max_stack);
629
630         /*
631          * After read_format are bitfields. Check read_format because
632          * we are unable to use offsetof on bitfield.
633          */
634         if (bswap_safe(read_format, 1))
635                 swap_bitfield((u8 *) (&attr->read_format + 1),
636                               sizeof(u64));
637 #undef bswap_field_64
638 #undef bswap_field_32
639 #undef bswap_field
640 #undef bswap_safe
641 }
642
643 static void perf_event__hdr_attr_swap(union perf_event *event,
644                                       bool sample_id_all __maybe_unused)
645 {
646         size_t size;
647
648         perf_event__attr_swap(&event->attr.attr);
649
650         size = event->header.size;
651         size -= (void *)&event->attr.id - (void *)event;
652         mem_bswap_64(event->attr.id, size);
653 }
654
655 static void perf_event__event_update_swap(union perf_event *event,
656                                           bool sample_id_all __maybe_unused)
657 {
658         event->event_update.type = bswap_64(event->event_update.type);
659         event->event_update.id   = bswap_64(event->event_update.id);
660 }
661
662 static void perf_event__event_type_swap(union perf_event *event,
663                                         bool sample_id_all __maybe_unused)
664 {
665         event->event_type.event_type.event_id =
666                 bswap_64(event->event_type.event_type.event_id);
667 }
668
669 static void perf_event__tracing_data_swap(union perf_event *event,
670                                           bool sample_id_all __maybe_unused)
671 {
672         event->tracing_data.size = bswap_32(event->tracing_data.size);
673 }
674
675 static void perf_event__auxtrace_info_swap(union perf_event *event,
676                                            bool sample_id_all __maybe_unused)
677 {
678         size_t size;
679
680         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
681
682         size = event->header.size;
683         size -= (void *)&event->auxtrace_info.priv - (void *)event;
684         mem_bswap_64(event->auxtrace_info.priv, size);
685 }
686
687 static void perf_event__auxtrace_swap(union perf_event *event,
688                                       bool sample_id_all __maybe_unused)
689 {
690         event->auxtrace.size      = bswap_64(event->auxtrace.size);
691         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
692         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
693         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
694         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
695         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
696 }
697
698 static void perf_event__auxtrace_error_swap(union perf_event *event,
699                                             bool sample_id_all __maybe_unused)
700 {
701         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
702         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
703         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
704         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
705         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
706         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
707 }
708
709 static void perf_event__thread_map_swap(union perf_event *event,
710                                         bool sample_id_all __maybe_unused)
711 {
712         unsigned i;
713
714         event->thread_map.nr = bswap_64(event->thread_map.nr);
715
716         for (i = 0; i < event->thread_map.nr; i++)
717                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
718 }
719
720 static void perf_event__cpu_map_swap(union perf_event *event,
721                                      bool sample_id_all __maybe_unused)
722 {
723         struct cpu_map_data *data = &event->cpu_map.data;
724         struct cpu_map_entries *cpus;
725         struct cpu_map_mask *mask;
726         unsigned i;
727
728         data->type = bswap_64(data->type);
729
730         switch (data->type) {
731         case PERF_CPU_MAP__CPUS:
732                 cpus = (struct cpu_map_entries *)data->data;
733
734                 cpus->nr = bswap_16(cpus->nr);
735
736                 for (i = 0; i < cpus->nr; i++)
737                         cpus->cpu[i] = bswap_16(cpus->cpu[i]);
738                 break;
739         case PERF_CPU_MAP__MASK:
740                 mask = (struct cpu_map_mask *) data->data;
741
742                 mask->nr = bswap_16(mask->nr);
743                 mask->long_size = bswap_16(mask->long_size);
744
745                 switch (mask->long_size) {
746                 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
747                 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
748                 default:
749                         pr_err("cpu_map swap: unsupported long size\n");
750                 }
751         default:
752                 break;
753         }
754 }
755
756 static void perf_event__stat_config_swap(union perf_event *event,
757                                          bool sample_id_all __maybe_unused)
758 {
759         u64 size;
760
761         size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
762         size += 1; /* nr item itself */
763         mem_bswap_64(&event->stat_config.nr, size);
764 }
765
766 static void perf_event__stat_swap(union perf_event *event,
767                                   bool sample_id_all __maybe_unused)
768 {
769         event->stat.id     = bswap_64(event->stat.id);
770         event->stat.thread = bswap_32(event->stat.thread);
771         event->stat.cpu    = bswap_32(event->stat.cpu);
772         event->stat.val    = bswap_64(event->stat.val);
773         event->stat.ena    = bswap_64(event->stat.ena);
774         event->stat.run    = bswap_64(event->stat.run);
775 }
776
777 static void perf_event__stat_round_swap(union perf_event *event,
778                                         bool sample_id_all __maybe_unused)
779 {
780         event->stat_round.type = bswap_64(event->stat_round.type);
781         event->stat_round.time = bswap_64(event->stat_round.time);
782 }
783
784 typedef void (*perf_event__swap_op)(union perf_event *event,
785                                     bool sample_id_all);
786
787 static perf_event__swap_op perf_event__swap_ops[] = {
788         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
789         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
790         [PERF_RECORD_COMM]                = perf_event__comm_swap,
791         [PERF_RECORD_FORK]                = perf_event__task_swap,
792         [PERF_RECORD_EXIT]                = perf_event__task_swap,
793         [PERF_RECORD_LOST]                = perf_event__all64_swap,
794         [PERF_RECORD_READ]                = perf_event__read_swap,
795         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
796         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
797         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
798         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
799         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
800         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
801         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
802         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
803         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
804         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
805         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
806         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
807         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
808         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
809         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
810         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
811         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
812         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
813         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
814         [PERF_RECORD_STAT]                = perf_event__stat_swap,
815         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
816         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
817         [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
818         [PERF_RECORD_HEADER_MAX]          = NULL,
819 };
820
821 /*
822  * When perf record finishes a pass on every buffers, it records this pseudo
823  * event.
824  * We record the max timestamp t found in the pass n.
825  * Assuming these timestamps are monotonic across cpus, we know that if
826  * a buffer still has events with timestamps below t, they will be all
827  * available and then read in the pass n + 1.
828  * Hence when we start to read the pass n + 2, we can safely flush every
829  * events with timestamps below t.
830  *
831  *    ============ PASS n =================
832  *       CPU 0         |   CPU 1
833  *                     |
834  *    cnt1 timestamps  |   cnt2 timestamps
835  *          1          |         2
836  *          2          |         3
837  *          -          |         4  <--- max recorded
838  *
839  *    ============ PASS n + 1 ==============
840  *       CPU 0         |   CPU 1
841  *                     |
842  *    cnt1 timestamps  |   cnt2 timestamps
843  *          3          |         5
844  *          4          |         6
845  *          5          |         7 <---- max recorded
846  *
847  *      Flush every events below timestamp 4
848  *
849  *    ============ PASS n + 2 ==============
850  *       CPU 0         |   CPU 1
851  *                     |
852  *    cnt1 timestamps  |   cnt2 timestamps
853  *          6          |         8
854  *          7          |         9
855  *          -          |         10
856  *
857  *      Flush every events below timestamp 7
858  *      etc...
859  */
860 static int process_finished_round(struct perf_tool *tool __maybe_unused,
861                                   union perf_event *event __maybe_unused,
862                                   struct ordered_events *oe)
863 {
864         if (dump_trace)
865                 fprintf(stdout, "\n");
866         return ordered_events__flush(oe, OE_FLUSH__ROUND);
867 }
868
869 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
870                               struct perf_sample *sample, u64 file_offset)
871 {
872         return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
873 }
874
875 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
876 {
877         struct ip_callchain *callchain = sample->callchain;
878         struct branch_stack *lbr_stack = sample->branch_stack;
879         u64 kernel_callchain_nr = callchain->nr;
880         unsigned int i;
881
882         for (i = 0; i < kernel_callchain_nr; i++) {
883                 if (callchain->ips[i] == PERF_CONTEXT_USER)
884                         break;
885         }
886
887         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
888                 u64 total_nr;
889                 /*
890                  * LBR callstack can only get user call chain,
891                  * i is kernel call chain number,
892                  * 1 is PERF_CONTEXT_USER.
893                  *
894                  * The user call chain is stored in LBR registers.
895                  * LBR are pair registers. The caller is stored
896                  * in "from" register, while the callee is stored
897                  * in "to" register.
898                  * For example, there is a call stack
899                  * "A"->"B"->"C"->"D".
900                  * The LBR registers will recorde like
901                  * "C"->"D", "B"->"C", "A"->"B".
902                  * So only the first "to" register and all "from"
903                  * registers are needed to construct the whole stack.
904                  */
905                 total_nr = i + 1 + lbr_stack->nr + 1;
906                 kernel_callchain_nr = i + 1;
907
908                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
909
910                 for (i = 0; i < kernel_callchain_nr; i++)
911                         printf("..... %2d: %016" PRIx64 "\n",
912                                i, callchain->ips[i]);
913
914                 printf("..... %2d: %016" PRIx64 "\n",
915                        (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
916                 for (i = 0; i < lbr_stack->nr; i++)
917                         printf("..... %2d: %016" PRIx64 "\n",
918                                (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
919         }
920 }
921
922 static void callchain__printf(struct perf_evsel *evsel,
923                               struct perf_sample *sample)
924 {
925         unsigned int i;
926         struct ip_callchain *callchain = sample->callchain;
927
928         if (perf_evsel__has_branch_callstack(evsel))
929                 callchain__lbr_callstack_printf(sample);
930
931         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
932
933         for (i = 0; i < callchain->nr; i++)
934                 printf("..... %2d: %016" PRIx64 "\n",
935                        i, callchain->ips[i]);
936 }
937
938 static void branch_stack__printf(struct perf_sample *sample)
939 {
940         uint64_t i;
941
942         printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
943
944         for (i = 0; i < sample->branch_stack->nr; i++) {
945                 struct branch_entry *e = &sample->branch_stack->entries[i];
946
947                 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
948                         i, e->from, e->to,
949                         (unsigned short)e->flags.cycles,
950                         e->flags.mispred ? "M" : " ",
951                         e->flags.predicted ? "P" : " ",
952                         e->flags.abort ? "A" : " ",
953                         e->flags.in_tx ? "T" : " ",
954                         (unsigned)e->flags.reserved);
955         }
956 }
957
958 static void regs_dump__printf(u64 mask, u64 *regs)
959 {
960         unsigned rid, i = 0;
961
962         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
963                 u64 val = regs[i++];
964
965                 printf(".... %-5s 0x%" PRIx64 "\n",
966                        perf_reg_name(rid), val);
967         }
968 }
969
970 static const char *regs_abi[] = {
971         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
972         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
973         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
974 };
975
976 static inline const char *regs_dump_abi(struct regs_dump *d)
977 {
978         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
979                 return "unknown";
980
981         return regs_abi[d->abi];
982 }
983
984 static void regs__printf(const char *type, struct regs_dump *regs)
985 {
986         u64 mask = regs->mask;
987
988         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
989                type,
990                mask,
991                regs_dump_abi(regs));
992
993         regs_dump__printf(mask, regs->regs);
994 }
995
996 static void regs_user__printf(struct perf_sample *sample)
997 {
998         struct regs_dump *user_regs = &sample->user_regs;
999
1000         if (user_regs->regs)
1001                 regs__printf("user", user_regs);
1002 }
1003
1004 static void regs_intr__printf(struct perf_sample *sample)
1005 {
1006         struct regs_dump *intr_regs = &sample->intr_regs;
1007
1008         if (intr_regs->regs)
1009                 regs__printf("intr", intr_regs);
1010 }
1011
1012 static void stack_user__printf(struct stack_dump *dump)
1013 {
1014         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1015                dump->size, dump->offset);
1016 }
1017
1018 static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
1019                                        union perf_event *event,
1020                                        struct perf_sample *sample)
1021 {
1022         u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1023
1024         if (event->header.type != PERF_RECORD_SAMPLE &&
1025             !perf_evlist__sample_id_all(evlist)) {
1026                 fputs("-1 -1 ", stdout);
1027                 return;
1028         }
1029
1030         if ((sample_type & PERF_SAMPLE_CPU))
1031                 printf("%u ", sample->cpu);
1032
1033         if (sample_type & PERF_SAMPLE_TIME)
1034                 printf("%" PRIu64 " ", sample->time);
1035 }
1036
1037 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1038 {
1039         printf("... sample_read:\n");
1040
1041         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1042                 printf("...... time enabled %016" PRIx64 "\n",
1043                        sample->read.time_enabled);
1044
1045         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1046                 printf("...... time running %016" PRIx64 "\n",
1047                        sample->read.time_running);
1048
1049         if (read_format & PERF_FORMAT_GROUP) {
1050                 u64 i;
1051
1052                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1053
1054                 for (i = 0; i < sample->read.group.nr; i++) {
1055                         struct sample_read_value *value;
1056
1057                         value = &sample->read.group.values[i];
1058                         printf("..... id %016" PRIx64
1059                                ", value %016" PRIx64 "\n",
1060                                value->id, value->value);
1061                 }
1062         } else
1063                 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1064                         sample->read.one.id, sample->read.one.value);
1065 }
1066
1067 static void dump_event(struct perf_evlist *evlist, union perf_event *event,
1068                        u64 file_offset, struct perf_sample *sample)
1069 {
1070         if (!dump_trace)
1071                 return;
1072
1073         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1074                file_offset, event->header.size, event->header.type);
1075
1076         trace_event(event);
1077
1078         if (sample)
1079                 perf_evlist__print_tstamp(evlist, event, sample);
1080
1081         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1082                event->header.size, perf_event__name(event->header.type));
1083 }
1084
1085 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
1086                         struct perf_sample *sample)
1087 {
1088         u64 sample_type;
1089
1090         if (!dump_trace)
1091                 return;
1092
1093         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1094                event->header.misc, sample->pid, sample->tid, sample->ip,
1095                sample->period, sample->addr);
1096
1097         sample_type = evsel->attr.sample_type;
1098
1099         if (sample_type & PERF_SAMPLE_CALLCHAIN)
1100                 callchain__printf(evsel, sample);
1101
1102         if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel))
1103                 branch_stack__printf(sample);
1104
1105         if (sample_type & PERF_SAMPLE_REGS_USER)
1106                 regs_user__printf(sample);
1107
1108         if (sample_type & PERF_SAMPLE_REGS_INTR)
1109                 regs_intr__printf(sample);
1110
1111         if (sample_type & PERF_SAMPLE_STACK_USER)
1112                 stack_user__printf(&sample->user_stack);
1113
1114         if (sample_type & PERF_SAMPLE_WEIGHT)
1115                 printf("... weight: %" PRIu64 "\n", sample->weight);
1116
1117         if (sample_type & PERF_SAMPLE_DATA_SRC)
1118                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1119
1120         if (sample_type & PERF_SAMPLE_TRANSACTION)
1121                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1122
1123         if (sample_type & PERF_SAMPLE_READ)
1124                 sample_read__printf(sample, evsel->attr.read_format);
1125 }
1126
1127 static struct machine *machines__find_for_cpumode(struct machines *machines,
1128                                                union perf_event *event,
1129                                                struct perf_sample *sample)
1130 {
1131         struct machine *machine;
1132
1133         if (perf_guest &&
1134             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1135              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1136                 u32 pid;
1137
1138                 if (event->header.type == PERF_RECORD_MMAP
1139                     || event->header.type == PERF_RECORD_MMAP2)
1140                         pid = event->mmap.pid;
1141                 else
1142                         pid = sample->pid;
1143
1144                 machine = machines__find(machines, pid);
1145                 if (!machine)
1146                         machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1147                 return machine;
1148         }
1149
1150         return &machines->host;
1151 }
1152
1153 static int deliver_sample_value(struct perf_evlist *evlist,
1154                                 struct perf_tool *tool,
1155                                 union perf_event *event,
1156                                 struct perf_sample *sample,
1157                                 struct sample_read_value *v,
1158                                 struct machine *machine)
1159 {
1160         struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1161
1162         if (sid) {
1163                 sample->id     = v->id;
1164                 sample->period = v->value - sid->period;
1165                 sid->period    = v->value;
1166         }
1167
1168         if (!sid || sid->evsel == NULL) {
1169                 ++evlist->stats.nr_unknown_id;
1170                 return 0;
1171         }
1172
1173         return tool->sample(tool, event, sample, sid->evsel, machine);
1174 }
1175
1176 static int deliver_sample_group(struct perf_evlist *evlist,
1177                                 struct perf_tool *tool,
1178                                 union  perf_event *event,
1179                                 struct perf_sample *sample,
1180                                 struct machine *machine)
1181 {
1182         int ret = -EINVAL;
1183         u64 i;
1184
1185         for (i = 0; i < sample->read.group.nr; i++) {
1186                 ret = deliver_sample_value(evlist, tool, event, sample,
1187                                            &sample->read.group.values[i],
1188                                            machine);
1189                 if (ret)
1190                         break;
1191         }
1192
1193         return ret;
1194 }
1195
1196 static int
1197  perf_evlist__deliver_sample(struct perf_evlist *evlist,
1198                              struct perf_tool *tool,
1199                              union  perf_event *event,
1200                              struct perf_sample *sample,
1201                              struct perf_evsel *evsel,
1202                              struct machine *machine)
1203 {
1204         /* We know evsel != NULL. */
1205         u64 sample_type = evsel->attr.sample_type;
1206         u64 read_format = evsel->attr.read_format;
1207
1208         /* Standard sample delivery. */
1209         if (!(sample_type & PERF_SAMPLE_READ))
1210                 return tool->sample(tool, event, sample, evsel, machine);
1211
1212         /* For PERF_SAMPLE_READ we have either single or group mode. */
1213         if (read_format & PERF_FORMAT_GROUP)
1214                 return deliver_sample_group(evlist, tool, event, sample,
1215                                             machine);
1216         else
1217                 return deliver_sample_value(evlist, tool, event, sample,
1218                                             &sample->read.one, machine);
1219 }
1220
1221 static int machines__deliver_event(struct machines *machines,
1222                                    struct perf_evlist *evlist,
1223                                    union perf_event *event,
1224                                    struct perf_sample *sample,
1225                                    struct perf_tool *tool, u64 file_offset)
1226 {
1227         struct perf_evsel *evsel;
1228         struct machine *machine;
1229
1230         dump_event(evlist, event, file_offset, sample);
1231
1232         evsel = perf_evlist__id2evsel(evlist, sample->id);
1233
1234         machine = machines__find_for_cpumode(machines, event, sample);
1235
1236         switch (event->header.type) {
1237         case PERF_RECORD_SAMPLE:
1238                 if (evsel == NULL) {
1239                         ++evlist->stats.nr_unknown_id;
1240                         return 0;
1241                 }
1242                 dump_sample(evsel, event, sample);
1243                 if (machine == NULL) {
1244                         ++evlist->stats.nr_unprocessable_samples;
1245                         return 0;
1246                 }
1247                 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1248         case PERF_RECORD_MMAP:
1249                 return tool->mmap(tool, event, sample, machine);
1250         case PERF_RECORD_MMAP2:
1251                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1252                         ++evlist->stats.nr_proc_map_timeout;
1253                 return tool->mmap2(tool, event, sample, machine);
1254         case PERF_RECORD_COMM:
1255                 return tool->comm(tool, event, sample, machine);
1256         case PERF_RECORD_NAMESPACES:
1257                 return tool->namespaces(tool, event, sample, machine);
1258         case PERF_RECORD_FORK:
1259                 return tool->fork(tool, event, sample, machine);
1260         case PERF_RECORD_EXIT:
1261                 return tool->exit(tool, event, sample, machine);
1262         case PERF_RECORD_LOST:
1263                 if (tool->lost == perf_event__process_lost)
1264                         evlist->stats.total_lost += event->lost.lost;
1265                 return tool->lost(tool, event, sample, machine);
1266         case PERF_RECORD_LOST_SAMPLES:
1267                 if (tool->lost_samples == perf_event__process_lost_samples)
1268                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1269                 return tool->lost_samples(tool, event, sample, machine);
1270         case PERF_RECORD_READ:
1271                 return tool->read(tool, event, sample, evsel, machine);
1272         case PERF_RECORD_THROTTLE:
1273                 return tool->throttle(tool, event, sample, machine);
1274         case PERF_RECORD_UNTHROTTLE:
1275                 return tool->unthrottle(tool, event, sample, machine);
1276         case PERF_RECORD_AUX:
1277                 if (tool->aux == perf_event__process_aux) {
1278                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1279                                 evlist->stats.total_aux_lost += 1;
1280                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1281                                 evlist->stats.total_aux_partial += 1;
1282                 }
1283                 return tool->aux(tool, event, sample, machine);
1284         case PERF_RECORD_ITRACE_START:
1285                 return tool->itrace_start(tool, event, sample, machine);
1286         case PERF_RECORD_SWITCH:
1287         case PERF_RECORD_SWITCH_CPU_WIDE:
1288                 return tool->context_switch(tool, event, sample, machine);
1289         default:
1290                 ++evlist->stats.nr_unknown_events;
1291                 return -1;
1292         }
1293 }
1294
1295 static int perf_session__deliver_event(struct perf_session *session,
1296                                        union perf_event *event,
1297                                        struct perf_sample *sample,
1298                                        struct perf_tool *tool,
1299                                        u64 file_offset)
1300 {
1301         int ret;
1302
1303         ret = auxtrace__process_event(session, event, sample, tool);
1304         if (ret < 0)
1305                 return ret;
1306         if (ret > 0)
1307                 return 0;
1308
1309         return machines__deliver_event(&session->machines, session->evlist,
1310                                        event, sample, tool, file_offset);
1311 }
1312
1313 static s64 perf_session__process_user_event(struct perf_session *session,
1314                                             union perf_event *event,
1315                                             u64 file_offset)
1316 {
1317         struct ordered_events *oe = &session->ordered_events;
1318         struct perf_tool *tool = session->tool;
1319         int fd = perf_data_file__fd(session->file);
1320         int err;
1321
1322         dump_event(session->evlist, event, file_offset, NULL);
1323
1324         /* These events are processed right away */
1325         switch (event->header.type) {
1326         case PERF_RECORD_HEADER_ATTR:
1327                 err = tool->attr(tool, event, &session->evlist);
1328                 if (err == 0) {
1329                         perf_session__set_id_hdr_size(session);
1330                         perf_session__set_comm_exec(session);
1331                 }
1332                 return err;
1333         case PERF_RECORD_EVENT_UPDATE:
1334                 return tool->event_update(tool, event, &session->evlist);
1335         case PERF_RECORD_HEADER_EVENT_TYPE:
1336                 /*
1337                  * Depreceated, but we need to handle it for sake
1338                  * of old data files create in pipe mode.
1339                  */
1340                 return 0;
1341         case PERF_RECORD_HEADER_TRACING_DATA:
1342                 /* setup for reading amidst mmap */
1343                 lseek(fd, file_offset, SEEK_SET);
1344                 return tool->tracing_data(tool, event, session);
1345         case PERF_RECORD_HEADER_BUILD_ID:
1346                 return tool->build_id(tool, event, session);
1347         case PERF_RECORD_FINISHED_ROUND:
1348                 return tool->finished_round(tool, event, oe);
1349         case PERF_RECORD_ID_INDEX:
1350                 return tool->id_index(tool, event, session);
1351         case PERF_RECORD_AUXTRACE_INFO:
1352                 return tool->auxtrace_info(tool, event, session);
1353         case PERF_RECORD_AUXTRACE:
1354                 /* setup for reading amidst mmap */
1355                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1356                 return tool->auxtrace(tool, event, session);
1357         case PERF_RECORD_AUXTRACE_ERROR:
1358                 perf_session__auxtrace_error_inc(session, event);
1359                 return tool->auxtrace_error(tool, event, session);
1360         case PERF_RECORD_THREAD_MAP:
1361                 return tool->thread_map(tool, event, session);
1362         case PERF_RECORD_CPU_MAP:
1363                 return tool->cpu_map(tool, event, session);
1364         case PERF_RECORD_STAT_CONFIG:
1365                 return tool->stat_config(tool, event, session);
1366         case PERF_RECORD_STAT:
1367                 return tool->stat(tool, event, session);
1368         case PERF_RECORD_STAT_ROUND:
1369                 return tool->stat_round(tool, event, session);
1370         case PERF_RECORD_TIME_CONV:
1371                 session->time_conv = event->time_conv;
1372                 return tool->time_conv(tool, event, session);
1373         default:
1374                 return -EINVAL;
1375         }
1376 }
1377
1378 int perf_session__deliver_synth_event(struct perf_session *session,
1379                                       union perf_event *event,
1380                                       struct perf_sample *sample)
1381 {
1382         struct perf_evlist *evlist = session->evlist;
1383         struct perf_tool *tool = session->tool;
1384
1385         events_stats__inc(&evlist->stats, event->header.type);
1386
1387         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1388                 return perf_session__process_user_event(session, event, 0);
1389
1390         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1391 }
1392
1393 static void event_swap(union perf_event *event, bool sample_id_all)
1394 {
1395         perf_event__swap_op swap;
1396
1397         swap = perf_event__swap_ops[event->header.type];
1398         if (swap)
1399                 swap(event, sample_id_all);
1400 }
1401
1402 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1403                              void *buf, size_t buf_sz,
1404                              union perf_event **event_ptr,
1405                              struct perf_sample *sample)
1406 {
1407         union perf_event *event;
1408         size_t hdr_sz, rest;
1409         int fd;
1410
1411         if (session->one_mmap && !session->header.needs_swap) {
1412                 event = file_offset - session->one_mmap_offset +
1413                         session->one_mmap_addr;
1414                 goto out_parse_sample;
1415         }
1416
1417         if (perf_data_file__is_pipe(session->file))
1418                 return -1;
1419
1420         fd = perf_data_file__fd(session->file);
1421         hdr_sz = sizeof(struct perf_event_header);
1422
1423         if (buf_sz < hdr_sz)
1424                 return -1;
1425
1426         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1427             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1428                 return -1;
1429
1430         event = (union perf_event *)buf;
1431
1432         if (session->header.needs_swap)
1433                 perf_event_header__bswap(&event->header);
1434
1435         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1436                 return -1;
1437
1438         rest = event->header.size - hdr_sz;
1439
1440         if (readn(fd, buf, rest) != (ssize_t)rest)
1441                 return -1;
1442
1443         if (session->header.needs_swap)
1444                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1445
1446 out_parse_sample:
1447
1448         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1449             perf_evlist__parse_sample(session->evlist, event, sample))
1450                 return -1;
1451
1452         *event_ptr = event;
1453
1454         return 0;
1455 }
1456
1457 static s64 perf_session__process_event(struct perf_session *session,
1458                                        union perf_event *event, u64 file_offset)
1459 {
1460         struct perf_evlist *evlist = session->evlist;
1461         struct perf_tool *tool = session->tool;
1462         struct perf_sample sample;
1463         int ret;
1464
1465         if (session->header.needs_swap)
1466                 event_swap(event, perf_evlist__sample_id_all(evlist));
1467
1468         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1469                 return -EINVAL;
1470
1471         events_stats__inc(&evlist->stats, event->header.type);
1472
1473         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1474                 return perf_session__process_user_event(session, event, file_offset);
1475
1476         /*
1477          * For all kernel events we get the sample data
1478          */
1479         ret = perf_evlist__parse_sample(evlist, event, &sample);
1480         if (ret)
1481                 return ret;
1482
1483         if (tool->ordered_events) {
1484                 ret = perf_session__queue_event(session, event, &sample, file_offset);
1485                 if (ret != -ETIME)
1486                         return ret;
1487         }
1488
1489         return perf_session__deliver_event(session, event, &sample, tool,
1490                                            file_offset);
1491 }
1492
1493 void perf_event_header__bswap(struct perf_event_header *hdr)
1494 {
1495         hdr->type = bswap_32(hdr->type);
1496         hdr->misc = bswap_16(hdr->misc);
1497         hdr->size = bswap_16(hdr->size);
1498 }
1499
1500 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1501 {
1502         return machine__findnew_thread(&session->machines.host, -1, pid);
1503 }
1504
1505 int perf_session__register_idle_thread(struct perf_session *session)
1506 {
1507         struct thread *thread;
1508         int err = 0;
1509
1510         thread = machine__findnew_thread(&session->machines.host, 0, 0);
1511         if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1512                 pr_err("problem inserting idle task.\n");
1513                 err = -1;
1514         }
1515
1516         if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1517                 pr_err("problem inserting idle task.\n");
1518                 err = -1;
1519         }
1520
1521         /* machine__findnew_thread() got the thread, so put it */
1522         thread__put(thread);
1523         return err;
1524 }
1525
1526 static void
1527 perf_session__warn_order(const struct perf_session *session)
1528 {
1529         const struct ordered_events *oe = &session->ordered_events;
1530         struct perf_evsel *evsel;
1531         bool should_warn = true;
1532
1533         evlist__for_each_entry(session->evlist, evsel) {
1534                 if (evsel->attr.write_backward)
1535                         should_warn = false;
1536         }
1537
1538         if (!should_warn)
1539                 return;
1540         if (oe->nr_unordered_events != 0)
1541                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1542 }
1543
1544 static void perf_session__warn_about_errors(const struct perf_session *session)
1545 {
1546         const struct events_stats *stats = &session->evlist->stats;
1547
1548         if (session->tool->lost == perf_event__process_lost &&
1549             stats->nr_events[PERF_RECORD_LOST] != 0) {
1550                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1551                             "Check IO/CPU overload!\n\n",
1552                             stats->nr_events[0],
1553                             stats->nr_events[PERF_RECORD_LOST]);
1554         }
1555
1556         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1557                 double drop_rate;
1558
1559                 drop_rate = (double)stats->total_lost_samples /
1560                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1561                 if (drop_rate > 0.05) {
1562                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
1563                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1564                                     drop_rate * 100.0);
1565                 }
1566         }
1567
1568         if (session->tool->aux == perf_event__process_aux &&
1569             stats->total_aux_lost != 0) {
1570                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1571                             stats->total_aux_lost,
1572                             stats->nr_events[PERF_RECORD_AUX]);
1573         }
1574
1575         if (session->tool->aux == perf_event__process_aux &&
1576             stats->total_aux_partial != 0) {
1577                 bool vmm_exclusive = false;
1578
1579                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1580                                        &vmm_exclusive);
1581
1582                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1583                             "Are you running a KVM guest in the background?%s\n\n",
1584                             stats->total_aux_partial,
1585                             stats->nr_events[PERF_RECORD_AUX],
1586                             vmm_exclusive ?
1587                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1588                             "will reduce the gaps to only guest's timeslices." :
1589                             "");
1590         }
1591
1592         if (stats->nr_unknown_events != 0) {
1593                 ui__warning("Found %u unknown events!\n\n"
1594                             "Is this an older tool processing a perf.data "
1595                             "file generated by a more recent tool?\n\n"
1596                             "If that is not the case, consider "
1597                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1598                             stats->nr_unknown_events);
1599         }
1600
1601         if (stats->nr_unknown_id != 0) {
1602                 ui__warning("%u samples with id not present in the header\n",
1603                             stats->nr_unknown_id);
1604         }
1605
1606         if (stats->nr_invalid_chains != 0) {
1607                 ui__warning("Found invalid callchains!\n\n"
1608                             "%u out of %u events were discarded for this reason.\n\n"
1609                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1610                             stats->nr_invalid_chains,
1611                             stats->nr_events[PERF_RECORD_SAMPLE]);
1612         }
1613
1614         if (stats->nr_unprocessable_samples != 0) {
1615                 ui__warning("%u unprocessable samples recorded.\n"
1616                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1617                             stats->nr_unprocessable_samples);
1618         }
1619
1620         perf_session__warn_order(session);
1621
1622         events_stats__auxtrace_error_warn(stats);
1623
1624         if (stats->nr_proc_map_timeout != 0) {
1625                 ui__warning("%d map information files for pre-existing threads were\n"
1626                             "not processed, if there are samples for addresses they\n"
1627                             "will not be resolved, you may find out which are these\n"
1628                             "threads by running with -v and redirecting the output\n"
1629                             "to a file.\n"
1630                             "The time limit to process proc map is too short?\n"
1631                             "Increase it by --proc-map-timeout\n",
1632                             stats->nr_proc_map_timeout);
1633         }
1634 }
1635
1636 static int perf_session__flush_thread_stack(struct thread *thread,
1637                                             void *p __maybe_unused)
1638 {
1639         return thread_stack__flush(thread);
1640 }
1641
1642 static int perf_session__flush_thread_stacks(struct perf_session *session)
1643 {
1644         return machines__for_each_thread(&session->machines,
1645                                          perf_session__flush_thread_stack,
1646                                          NULL);
1647 }
1648
1649 volatile int session_done;
1650
1651 static int __perf_session__process_pipe_events(struct perf_session *session)
1652 {
1653         struct ordered_events *oe = &session->ordered_events;
1654         struct perf_tool *tool = session->tool;
1655         int fd = perf_data_file__fd(session->file);
1656         union perf_event *event;
1657         uint32_t size, cur_size = 0;
1658         void *buf = NULL;
1659         s64 skip = 0;
1660         u64 head;
1661         ssize_t err;
1662         void *p;
1663
1664         perf_tool__fill_defaults(tool);
1665
1666         head = 0;
1667         cur_size = sizeof(union perf_event);
1668
1669         buf = malloc(cur_size);
1670         if (!buf)
1671                 return -errno;
1672         ordered_events__set_copy_on_queue(oe, true);
1673 more:
1674         event = buf;
1675         err = readn(fd, event, sizeof(struct perf_event_header));
1676         if (err <= 0) {
1677                 if (err == 0)
1678                         goto done;
1679
1680                 pr_err("failed to read event header\n");
1681                 goto out_err;
1682         }
1683
1684         if (session->header.needs_swap)
1685                 perf_event_header__bswap(&event->header);
1686
1687         size = event->header.size;
1688         if (size < sizeof(struct perf_event_header)) {
1689                 pr_err("bad event header size\n");
1690                 goto out_err;
1691         }
1692
1693         if (size > cur_size) {
1694                 void *new = realloc(buf, size);
1695                 if (!new) {
1696                         pr_err("failed to allocate memory to read event\n");
1697                         goto out_err;
1698                 }
1699                 buf = new;
1700                 cur_size = size;
1701                 event = buf;
1702         }
1703         p = event;
1704         p += sizeof(struct perf_event_header);
1705
1706         if (size - sizeof(struct perf_event_header)) {
1707                 err = readn(fd, p, size - sizeof(struct perf_event_header));
1708                 if (err <= 0) {
1709                         if (err == 0) {
1710                                 pr_err("unexpected end of event stream\n");
1711                                 goto done;
1712                         }
1713
1714                         pr_err("failed to read event data\n");
1715                         goto out_err;
1716                 }
1717         }
1718
1719         if ((skip = perf_session__process_event(session, event, head)) < 0) {
1720                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1721                        head, event->header.size, event->header.type);
1722                 err = -EINVAL;
1723                 goto out_err;
1724         }
1725
1726         head += size;
1727
1728         if (skip > 0)
1729                 head += skip;
1730
1731         if (!session_done())
1732                 goto more;
1733 done:
1734         /* do the final flush for ordered samples */
1735         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1736         if (err)
1737                 goto out_err;
1738         err = auxtrace__flush_events(session, tool);
1739         if (err)
1740                 goto out_err;
1741         err = perf_session__flush_thread_stacks(session);
1742 out_err:
1743         free(buf);
1744         perf_session__warn_about_errors(session);
1745         ordered_events__free(&session->ordered_events);
1746         auxtrace__free_events(session);
1747         return err;
1748 }
1749
1750 static union perf_event *
1751 fetch_mmaped_event(struct perf_session *session,
1752                    u64 head, size_t mmap_size, char *buf)
1753 {
1754         union perf_event *event;
1755
1756         /*
1757          * Ensure we have enough space remaining to read
1758          * the size of the event in the headers.
1759          */
1760         if (head + sizeof(event->header) > mmap_size)
1761                 return NULL;
1762
1763         event = (union perf_event *)(buf + head);
1764
1765         if (session->header.needs_swap)
1766                 perf_event_header__bswap(&event->header);
1767
1768         if (head + event->header.size > mmap_size) {
1769                 /* We're not fetching the event so swap back again */
1770                 if (session->header.needs_swap)
1771                         perf_event_header__bswap(&event->header);
1772                 return NULL;
1773         }
1774
1775         return event;
1776 }
1777
1778 /*
1779  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1780  * slices. On 32bit we use 32MB.
1781  */
1782 #if BITS_PER_LONG == 64
1783 #define MMAP_SIZE ULLONG_MAX
1784 #define NUM_MMAPS 1
1785 #else
1786 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1787 #define NUM_MMAPS 128
1788 #endif
1789
1790 static int __perf_session__process_events(struct perf_session *session,
1791                                           u64 data_offset, u64 data_size,
1792                                           u64 file_size)
1793 {
1794         struct ordered_events *oe = &session->ordered_events;
1795         struct perf_tool *tool = session->tool;
1796         int fd = perf_data_file__fd(session->file);
1797         u64 head, page_offset, file_offset, file_pos, size;
1798         int err, mmap_prot, mmap_flags, map_idx = 0;
1799         size_t  mmap_size;
1800         char *buf, *mmaps[NUM_MMAPS];
1801         union perf_event *event;
1802         struct ui_progress prog;
1803         s64 skip;
1804
1805         perf_tool__fill_defaults(tool);
1806
1807         page_offset = page_size * (data_offset / page_size);
1808         file_offset = page_offset;
1809         head = data_offset - page_offset;
1810
1811         if (data_size == 0)
1812                 goto out;
1813
1814         if (data_offset + data_size < file_size)
1815                 file_size = data_offset + data_size;
1816
1817         ui_progress__init(&prog, file_size, "Processing events...");
1818
1819         mmap_size = MMAP_SIZE;
1820         if (mmap_size > file_size) {
1821                 mmap_size = file_size;
1822                 session->one_mmap = true;
1823         }
1824
1825         memset(mmaps, 0, sizeof(mmaps));
1826
1827         mmap_prot  = PROT_READ;
1828         mmap_flags = MAP_SHARED;
1829
1830         if (session->header.needs_swap) {
1831                 mmap_prot  |= PROT_WRITE;
1832                 mmap_flags = MAP_PRIVATE;
1833         }
1834 remap:
1835         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
1836                    file_offset);
1837         if (buf == MAP_FAILED) {
1838                 pr_err("failed to mmap file\n");
1839                 err = -errno;
1840                 goto out_err;
1841         }
1842         mmaps[map_idx] = buf;
1843         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1844         file_pos = file_offset + head;
1845         if (session->one_mmap) {
1846                 session->one_mmap_addr = buf;
1847                 session->one_mmap_offset = file_offset;
1848         }
1849
1850 more:
1851         event = fetch_mmaped_event(session, head, mmap_size, buf);
1852         if (!event) {
1853                 if (mmaps[map_idx]) {
1854                         munmap(mmaps[map_idx], mmap_size);
1855                         mmaps[map_idx] = NULL;
1856                 }
1857
1858                 page_offset = page_size * (head / page_size);
1859                 file_offset += page_offset;
1860                 head -= page_offset;
1861                 goto remap;
1862         }
1863
1864         size = event->header.size;
1865
1866         if (size < sizeof(struct perf_event_header) ||
1867             (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1868                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1869                        file_offset + head, event->header.size,
1870                        event->header.type);
1871                 err = -EINVAL;
1872                 goto out_err;
1873         }
1874
1875         if (skip)
1876                 size += skip;
1877
1878         head += size;
1879         file_pos += size;
1880
1881         ui_progress__update(&prog, size);
1882
1883         if (session_done())
1884                 goto out;
1885
1886         if (file_pos < file_size)
1887                 goto more;
1888
1889 out:
1890         /* do the final flush for ordered samples */
1891         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1892         if (err)
1893                 goto out_err;
1894         err = auxtrace__flush_events(session, tool);
1895         if (err)
1896                 goto out_err;
1897         err = perf_session__flush_thread_stacks(session);
1898 out_err:
1899         ui_progress__finish();
1900         perf_session__warn_about_errors(session);
1901         /*
1902          * We may switching perf.data output, make ordered_events
1903          * reusable.
1904          */
1905         ordered_events__reinit(&session->ordered_events);
1906         auxtrace__free_events(session);
1907         session->one_mmap = false;
1908         return err;
1909 }
1910
1911 int perf_session__process_events(struct perf_session *session)
1912 {
1913         u64 size = perf_data_file__size(session->file);
1914         int err;
1915
1916         if (perf_session__register_idle_thread(session) < 0)
1917                 return -ENOMEM;
1918
1919         if (!perf_data_file__is_pipe(session->file))
1920                 err = __perf_session__process_events(session,
1921                                                      session->header.data_offset,
1922                                                      session->header.data_size, size);
1923         else
1924                 err = __perf_session__process_pipe_events(session);
1925
1926         return err;
1927 }
1928
1929 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1930 {
1931         struct perf_evsel *evsel;
1932
1933         evlist__for_each_entry(session->evlist, evsel) {
1934                 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1935                         return true;
1936         }
1937
1938         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1939         return false;
1940 }
1941
1942 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1943                                      const char *symbol_name, u64 addr)
1944 {
1945         char *bracket;
1946         int i;
1947         struct ref_reloc_sym *ref;
1948
1949         ref = zalloc(sizeof(struct ref_reloc_sym));
1950         if (ref == NULL)
1951                 return -ENOMEM;
1952
1953         ref->name = strdup(symbol_name);
1954         if (ref->name == NULL) {
1955                 free(ref);
1956                 return -ENOMEM;
1957         }
1958
1959         bracket = strchr(ref->name, ']');
1960         if (bracket)
1961                 *bracket = '\0';
1962
1963         ref->addr = addr;
1964
1965         for (i = 0; i < MAP__NR_TYPES; ++i) {
1966                 struct kmap *kmap = map__kmap(maps[i]);
1967
1968                 if (!kmap)
1969                         continue;
1970                 kmap->ref_reloc_sym = ref;
1971         }
1972
1973         return 0;
1974 }
1975
1976 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
1977 {
1978         return machines__fprintf_dsos(&session->machines, fp);
1979 }
1980
1981 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
1982                                           bool (skip)(struct dso *dso, int parm), int parm)
1983 {
1984         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
1985 }
1986
1987 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1988 {
1989         size_t ret;
1990         const char *msg = "";
1991
1992         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
1993                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
1994
1995         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
1996
1997         ret += events_stats__fprintf(&session->evlist->stats, fp);
1998         return ret;
1999 }
2000
2001 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2002 {
2003         /*
2004          * FIXME: Here we have to actually print all the machines in this
2005          * session, not just the host...
2006          */
2007         return machine__fprintf(&session->machines.host, fp);
2008 }
2009
2010 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
2011                                               unsigned int type)
2012 {
2013         struct perf_evsel *pos;
2014
2015         evlist__for_each_entry(session->evlist, pos) {
2016                 if (pos->attr.type == type)
2017                         return pos;
2018         }
2019         return NULL;
2020 }
2021
2022 int perf_session__cpu_bitmap(struct perf_session *session,
2023                              const char *cpu_list, unsigned long *cpu_bitmap)
2024 {
2025         int i, err = -1;
2026         struct cpu_map *map;
2027
2028         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2029                 struct perf_evsel *evsel;
2030
2031                 evsel = perf_session__find_first_evtype(session, i);
2032                 if (!evsel)
2033                         continue;
2034
2035                 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
2036                         pr_err("File does not contain CPU events. "
2037                                "Remove -c option to proceed.\n");
2038                         return -1;
2039                 }
2040         }
2041
2042         map = cpu_map__new(cpu_list);
2043         if (map == NULL) {
2044                 pr_err("Invalid cpu_list\n");
2045                 return -1;
2046         }
2047
2048         for (i = 0; i < map->nr; i++) {
2049                 int cpu = map->map[i];
2050
2051                 if (cpu >= MAX_NR_CPUS) {
2052                         pr_err("Requested CPU %d too large. "
2053                                "Consider raising MAX_NR_CPUS\n", cpu);
2054                         goto out_delete_map;
2055                 }
2056
2057                 set_bit(cpu, cpu_bitmap);
2058         }
2059
2060         err = 0;
2061
2062 out_delete_map:
2063         cpu_map__put(map);
2064         return err;
2065 }
2066
2067 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2068                                 bool full)
2069 {
2070         if (session == NULL || fp == NULL)
2071                 return;
2072
2073         fprintf(fp, "# ========\n");
2074         perf_header__fprintf_info(session, fp, full);
2075         fprintf(fp, "# ========\n#\n");
2076 }
2077
2078
2079 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2080                                              const struct perf_evsel_str_handler *assocs,
2081                                              size_t nr_assocs)
2082 {
2083         struct perf_evsel *evsel;
2084         size_t i;
2085         int err;
2086
2087         for (i = 0; i < nr_assocs; i++) {
2088                 /*
2089                  * Adding a handler for an event not in the session,
2090                  * just ignore it.
2091                  */
2092                 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2093                 if (evsel == NULL)
2094                         continue;
2095
2096                 err = -EEXIST;
2097                 if (evsel->handler != NULL)
2098                         goto out;
2099                 evsel->handler = assocs[i].handler;
2100         }
2101
2102         err = 0;
2103 out:
2104         return err;
2105 }
2106
2107 int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
2108                                  union perf_event *event,
2109                                  struct perf_session *session)
2110 {
2111         struct perf_evlist *evlist = session->evlist;
2112         struct id_index_event *ie = &event->id_index;
2113         size_t i, nr, max_nr;
2114
2115         max_nr = (ie->header.size - sizeof(struct id_index_event)) /
2116                  sizeof(struct id_index_entry);
2117         nr = ie->nr;
2118         if (nr > max_nr)
2119                 return -EINVAL;
2120
2121         if (dump_trace)
2122                 fprintf(stdout, " nr: %zu\n", nr);
2123
2124         for (i = 0; i < nr; i++) {
2125                 struct id_index_entry *e = &ie->entries[i];
2126                 struct perf_sample_id *sid;
2127
2128                 if (dump_trace) {
2129                         fprintf(stdout, " ... id: %"PRIu64, e->id);
2130                         fprintf(stdout, "  idx: %"PRIu64, e->idx);
2131                         fprintf(stdout, "  cpu: %"PRId64, e->cpu);
2132                         fprintf(stdout, "  tid: %"PRId64"\n", e->tid);
2133                 }
2134
2135                 sid = perf_evlist__id2sid(evlist, e->id);
2136                 if (!sid)
2137                         return -ENOENT;
2138                 sid->idx = e->idx;
2139                 sid->cpu = e->cpu;
2140                 sid->tid = e->tid;
2141         }
2142         return 0;
2143 }
2144
2145 int perf_event__synthesize_id_index(struct perf_tool *tool,
2146                                     perf_event__handler_t process,
2147                                     struct perf_evlist *evlist,
2148                                     struct machine *machine)
2149 {
2150         union perf_event *ev;
2151         struct perf_evsel *evsel;
2152         size_t nr = 0, i = 0, sz, max_nr, n;
2153         int err;
2154
2155         pr_debug2("Synthesizing id index\n");
2156
2157         max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
2158                  sizeof(struct id_index_entry);
2159
2160         evlist__for_each_entry(evlist, evsel)
2161                 nr += evsel->ids;
2162
2163         n = nr > max_nr ? max_nr : nr;
2164         sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
2165         ev = zalloc(sz);
2166         if (!ev)
2167                 return -ENOMEM;
2168
2169         ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2170         ev->id_index.header.size = sz;
2171         ev->id_index.nr = n;
2172
2173         evlist__for_each_entry(evlist, evsel) {
2174                 u32 j;
2175
2176                 for (j = 0; j < evsel->ids; j++) {
2177                         struct id_index_entry *e;
2178                         struct perf_sample_id *sid;
2179
2180                         if (i >= n) {
2181                                 err = process(tool, ev, NULL, machine);
2182                                 if (err)
2183                                         goto out_err;
2184                                 nr -= n;
2185                                 i = 0;
2186                         }
2187
2188                         e = &ev->id_index.entries[i++];
2189
2190                         e->id = evsel->id[j];
2191
2192                         sid = perf_evlist__id2sid(evlist, e->id);
2193                         if (!sid) {
2194                                 free(ev);
2195                                 return -ENOENT;
2196                         }
2197
2198                         e->idx = sid->idx;
2199                         e->cpu = sid->cpu;
2200                         e->tid = sid->tid;
2201                 }
2202         }
2203
2204         sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
2205         ev->id_index.header.size = sz;
2206         ev->id_index.nr = nr;
2207
2208         err = process(tool, ev, NULL, machine);
2209 out_err:
2210         free(ev);
2211
2212         return err;
2213 }