]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/event.h
Merge branch 'uaccess-work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / tools / perf / util / event.h
1 #ifndef __PERF_RECORD_H
2 #define __PERF_RECORD_H
3
4 #include <limits.h>
5 #include <stdio.h>
6 #include <linux/kernel.h>
7
8 #include "../perf.h"
9 #include "build-id.h"
10 #include "perf_regs.h"
11
12 struct mmap_event {
13         struct perf_event_header header;
14         u32 pid, tid;
15         u64 start;
16         u64 len;
17         u64 pgoff;
18         char filename[PATH_MAX];
19 };
20
21 struct mmap2_event {
22         struct perf_event_header header;
23         u32 pid, tid;
24         u64 start;
25         u64 len;
26         u64 pgoff;
27         u32 maj;
28         u32 min;
29         u64 ino;
30         u64 ino_generation;
31         u32 prot;
32         u32 flags;
33         char filename[PATH_MAX];
34 };
35
36 struct comm_event {
37         struct perf_event_header header;
38         u32 pid, tid;
39         char comm[16];
40 };
41
42 struct namespaces_event {
43         struct perf_event_header header;
44         u32 pid, tid;
45         u64 nr_namespaces;
46         struct perf_ns_link_info link_info[];
47 };
48
49 struct fork_event {
50         struct perf_event_header header;
51         u32 pid, ppid;
52         u32 tid, ptid;
53         u64 time;
54 };
55
56 struct lost_event {
57         struct perf_event_header header;
58         u64 id;
59         u64 lost;
60 };
61
62 struct lost_samples_event {
63         struct perf_event_header header;
64         u64 lost;
65 };
66
67 /*
68  * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
69  */
70 struct read_event {
71         struct perf_event_header header;
72         u32 pid, tid;
73         u64 value;
74         u64 time_enabled;
75         u64 time_running;
76         u64 id;
77 };
78
79 struct throttle_event {
80         struct perf_event_header header;
81         u64 time;
82         u64 id;
83         u64 stream_id;
84 };
85
86 #define PERF_SAMPLE_MASK                                \
87         (PERF_SAMPLE_IP | PERF_SAMPLE_TID |             \
88          PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR |          \
89         PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID |        \
90          PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD |         \
91          PERF_SAMPLE_IDENTIFIER)
92
93 /* perf sample has 16 bits size limit */
94 #define PERF_SAMPLE_MAX_SIZE (1 << 16)
95
96 struct sample_event {
97         struct perf_event_header        header;
98         u64 array[];
99 };
100
101 struct regs_dump {
102         u64 abi;
103         u64 mask;
104         u64 *regs;
105
106         /* Cached values/mask filled by first register access. */
107         u64 cache_regs[PERF_REGS_MAX];
108         u64 cache_mask;
109 };
110
111 struct stack_dump {
112         u16 offset;
113         u64 size;
114         char *data;
115 };
116
117 struct sample_read_value {
118         u64 value;
119         u64 id;
120 };
121
122 struct sample_read {
123         u64 time_enabled;
124         u64 time_running;
125         union {
126                 struct {
127                         u64 nr;
128                         struct sample_read_value *values;
129                 } group;
130                 struct sample_read_value one;
131         };
132 };
133
134 struct ip_callchain {
135         u64 nr;
136         u64 ips[0];
137 };
138
139 struct branch_flags {
140         u64 mispred:1;
141         u64 predicted:1;
142         u64 in_tx:1;
143         u64 abort:1;
144         u64 cycles:16;
145         u64 reserved:44;
146 };
147
148 struct branch_entry {
149         u64                     from;
150         u64                     to;
151         struct branch_flags     flags;
152 };
153
154 struct branch_stack {
155         u64                     nr;
156         struct branch_entry     entries[0];
157 };
158
159 enum {
160         PERF_IP_FLAG_BRANCH             = 1ULL << 0,
161         PERF_IP_FLAG_CALL               = 1ULL << 1,
162         PERF_IP_FLAG_RETURN             = 1ULL << 2,
163         PERF_IP_FLAG_CONDITIONAL        = 1ULL << 3,
164         PERF_IP_FLAG_SYSCALLRET         = 1ULL << 4,
165         PERF_IP_FLAG_ASYNC              = 1ULL << 5,
166         PERF_IP_FLAG_INTERRUPT          = 1ULL << 6,
167         PERF_IP_FLAG_TX_ABORT           = 1ULL << 7,
168         PERF_IP_FLAG_TRACE_BEGIN        = 1ULL << 8,
169         PERF_IP_FLAG_TRACE_END          = 1ULL << 9,
170         PERF_IP_FLAG_IN_TX              = 1ULL << 10,
171 };
172
173 #define PERF_IP_FLAG_CHARS "bcrosyiABEx"
174
175 #define PERF_BRANCH_MASK                (\
176         PERF_IP_FLAG_BRANCH             |\
177         PERF_IP_FLAG_CALL               |\
178         PERF_IP_FLAG_RETURN             |\
179         PERF_IP_FLAG_CONDITIONAL        |\
180         PERF_IP_FLAG_SYSCALLRET         |\
181         PERF_IP_FLAG_ASYNC              |\
182         PERF_IP_FLAG_INTERRUPT          |\
183         PERF_IP_FLAG_TX_ABORT           |\
184         PERF_IP_FLAG_TRACE_BEGIN        |\
185         PERF_IP_FLAG_TRACE_END)
186
187 #define MAX_INSN 16
188
189 struct perf_sample {
190         u64 ip;
191         u32 pid, tid;
192         u64 time;
193         u64 addr;
194         u64 id;
195         u64 stream_id;
196         u64 period;
197         u64 weight;
198         u64 transaction;
199         u32 cpu;
200         u32 raw_size;
201         u64 data_src;
202         u32 flags;
203         u16 insn_len;
204         u8  cpumode;
205         char insn[MAX_INSN];
206         void *raw_data;
207         struct ip_callchain *callchain;
208         struct branch_stack *branch_stack;
209         struct regs_dump  user_regs;
210         struct regs_dump  intr_regs;
211         struct stack_dump user_stack;
212         struct sample_read read;
213 };
214
215 #define PERF_MEM_DATA_SRC_NONE \
216         (PERF_MEM_S(OP, NA) |\
217          PERF_MEM_S(LVL, NA) |\
218          PERF_MEM_S(SNOOP, NA) |\
219          PERF_MEM_S(LOCK, NA) |\
220          PERF_MEM_S(TLB, NA))
221
222 struct build_id_event {
223         struct perf_event_header header;
224         pid_t                    pid;
225         u8                       build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
226         char                     filename[];
227 };
228
229 enum perf_user_event_type { /* above any possible kernel type */
230         PERF_RECORD_USER_TYPE_START             = 64,
231         PERF_RECORD_HEADER_ATTR                 = 64,
232         PERF_RECORD_HEADER_EVENT_TYPE           = 65, /* deprecated */
233         PERF_RECORD_HEADER_TRACING_DATA         = 66,
234         PERF_RECORD_HEADER_BUILD_ID             = 67,
235         PERF_RECORD_FINISHED_ROUND              = 68,
236         PERF_RECORD_ID_INDEX                    = 69,
237         PERF_RECORD_AUXTRACE_INFO               = 70,
238         PERF_RECORD_AUXTRACE                    = 71,
239         PERF_RECORD_AUXTRACE_ERROR              = 72,
240         PERF_RECORD_THREAD_MAP                  = 73,
241         PERF_RECORD_CPU_MAP                     = 74,
242         PERF_RECORD_STAT_CONFIG                 = 75,
243         PERF_RECORD_STAT                        = 76,
244         PERF_RECORD_STAT_ROUND                  = 77,
245         PERF_RECORD_EVENT_UPDATE                = 78,
246         PERF_RECORD_TIME_CONV                   = 79,
247         PERF_RECORD_HEADER_MAX
248 };
249
250 enum auxtrace_error_type {
251         PERF_AUXTRACE_ERROR_ITRACE  = 1,
252         PERF_AUXTRACE_ERROR_MAX
253 };
254
255 /* Attribute type for custom synthesized events */
256 #define PERF_TYPE_SYNTH         (INT_MAX + 1U)
257
258 /* Attribute config for custom synthesized events */
259 enum perf_synth_id {
260         PERF_SYNTH_INTEL_PTWRITE,
261         PERF_SYNTH_INTEL_MWAIT,
262         PERF_SYNTH_INTEL_PWRE,
263         PERF_SYNTH_INTEL_EXSTOP,
264         PERF_SYNTH_INTEL_PWRX,
265         PERF_SYNTH_INTEL_CBR,
266 };
267
268 /*
269  * Raw data formats for synthesized events. Note that 4 bytes of padding are
270  * present to match the 'size' member of PERF_SAMPLE_RAW data which is always
271  * 8-byte aligned. That means we must dereference raw_data with an offset of 4.
272  * Refer perf_sample__synth_ptr() and perf_synth__raw_data().  It also means the
273  * structure sizes are 4 bytes bigger than the raw_size, refer
274  * perf_synth__raw_size().
275  */
276
277 struct perf_synth_intel_ptwrite {
278         u32 padding;
279         union {
280                 struct {
281                         u32     ip              :  1,
282                                 reserved        : 31;
283                 };
284                 u32     flags;
285         };
286         u64     payload;
287 };
288
289 struct perf_synth_intel_mwait {
290         u32 padding;
291         u32 reserved;
292         union {
293                 struct {
294                         u64     hints           :  8,
295                                 reserved1       : 24,
296                                 extensions      :  2,
297                                 reserved2       : 30;
298                 };
299                 u64     payload;
300         };
301 };
302
303 struct perf_synth_intel_pwre {
304         u32 padding;
305         u32 reserved;
306         union {
307                 struct {
308                         u64     reserved1       :  7,
309                                 hw              :  1,
310                                 subcstate       :  4,
311                                 cstate          :  4,
312                                 reserved2       : 48;
313                 };
314                 u64     payload;
315         };
316 };
317
318 struct perf_synth_intel_exstop {
319         u32 padding;
320         union {
321                 struct {
322                         u32     ip              :  1,
323                                 reserved        : 31;
324                 };
325                 u32     flags;
326         };
327 };
328
329 struct perf_synth_intel_pwrx {
330         u32 padding;
331         u32 reserved;
332         union {
333                 struct {
334                         u64     deepest_cstate  :  4,
335                                 last_cstate     :  4,
336                                 wake_reason     :  4,
337                                 reserved1       : 52;
338                 };
339                 u64     payload;
340         };
341 };
342
343 struct perf_synth_intel_cbr {
344         u32 padding;
345         union {
346                 struct {
347                         u32     cbr             :  8,
348                                 reserved1       :  8,
349                                 max_nonturbo    :  8,
350                                 reserved2       :  8;
351                 };
352                 u32     flags;
353         };
354         u32 freq;
355         u32 reserved3;
356 };
357
358 /*
359  * raw_data is always 4 bytes from an 8-byte boundary, so subtract 4 to get
360  * 8-byte alignment.
361  */
362 static inline void *perf_sample__synth_ptr(struct perf_sample *sample)
363 {
364         return sample->raw_data - 4;
365 }
366
367 static inline void *perf_synth__raw_data(void *p)
368 {
369         return p + 4;
370 }
371
372 #define perf_synth__raw_size(d) (sizeof(d) - 4)
373
374 #define perf_sample__bad_synth_size(s, d) ((s)->raw_size < sizeof(d) - 4)
375
376 /*
377  * The kernel collects the number of events it couldn't send in a stretch and
378  * when possible sends this number in a PERF_RECORD_LOST event. The number of
379  * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
380  * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
381  * the sum of all struct lost_event.lost fields reported.
382  *
383  * The kernel discards mixed up samples and sends the number in a
384  * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
385  * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells
386  * exactly how many samples the kernel in fact dropped, i.e. it is the sum of
387  * all struct lost_samples_event.lost fields reported.
388  *
389  * The total_period is needed because by default auto-freq is used, so
390  * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
391  * the total number of low level events, it is necessary to to sum all struct
392  * sample_event.period and stash the result in total_period.
393  */
394 struct events_stats {
395         u64 total_period;
396         u64 total_non_filtered_period;
397         u64 total_lost;
398         u64 total_lost_samples;
399         u64 total_aux_lost;
400         u64 total_aux_partial;
401         u64 total_invalid_chains;
402         u32 nr_events[PERF_RECORD_HEADER_MAX];
403         u32 nr_non_filtered_samples;
404         u32 nr_lost_warned;
405         u32 nr_unknown_events;
406         u32 nr_invalid_chains;
407         u32 nr_unknown_id;
408         u32 nr_unprocessable_samples;
409         u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX];
410         u32 nr_proc_map_timeout;
411 };
412
413 enum {
414         PERF_CPU_MAP__CPUS = 0,
415         PERF_CPU_MAP__MASK = 1,
416 };
417
418 struct cpu_map_entries {
419         u16     nr;
420         u16     cpu[];
421 };
422
423 struct cpu_map_mask {
424         u16     nr;
425         u16     long_size;
426         unsigned long mask[];
427 };
428
429 struct cpu_map_data {
430         u16     type;
431         char    data[];
432 };
433
434 struct cpu_map_event {
435         struct perf_event_header        header;
436         struct cpu_map_data             data;
437 };
438
439 struct attr_event {
440         struct perf_event_header header;
441         struct perf_event_attr attr;
442         u64 id[];
443 };
444
445 enum {
446         PERF_EVENT_UPDATE__UNIT  = 0,
447         PERF_EVENT_UPDATE__SCALE = 1,
448         PERF_EVENT_UPDATE__NAME  = 2,
449         PERF_EVENT_UPDATE__CPUS  = 3,
450 };
451
452 struct event_update_event_cpus {
453         struct cpu_map_data cpus;
454 };
455
456 struct event_update_event_scale {
457         double scale;
458 };
459
460 struct event_update_event {
461         struct perf_event_header header;
462         u64 type;
463         u64 id;
464
465         char data[];
466 };
467
468 #define MAX_EVENT_NAME 64
469
470 struct perf_trace_event_type {
471         u64     event_id;
472         char    name[MAX_EVENT_NAME];
473 };
474
475 struct event_type_event {
476         struct perf_event_header header;
477         struct perf_trace_event_type event_type;
478 };
479
480 struct tracing_data_event {
481         struct perf_event_header header;
482         u32 size;
483 };
484
485 struct id_index_entry {
486         u64 id;
487         u64 idx;
488         u64 cpu;
489         u64 tid;
490 };
491
492 struct id_index_event {
493         struct perf_event_header header;
494         u64 nr;
495         struct id_index_entry entries[0];
496 };
497
498 struct auxtrace_info_event {
499         struct perf_event_header header;
500         u32 type;
501         u32 reserved__; /* For alignment */
502         u64 priv[];
503 };
504
505 struct auxtrace_event {
506         struct perf_event_header header;
507         u64 size;
508         u64 offset;
509         u64 reference;
510         u32 idx;
511         u32 tid;
512         u32 cpu;
513         u32 reserved__; /* For alignment */
514 };
515
516 #define MAX_AUXTRACE_ERROR_MSG 64
517
518 struct auxtrace_error_event {
519         struct perf_event_header header;
520         u32 type;
521         u32 code;
522         u32 cpu;
523         u32 pid;
524         u32 tid;
525         u32 reserved__; /* For alignment */
526         u64 ip;
527         char msg[MAX_AUXTRACE_ERROR_MSG];
528 };
529
530 struct aux_event {
531         struct perf_event_header header;
532         u64     aux_offset;
533         u64     aux_size;
534         u64     flags;
535 };
536
537 struct itrace_start_event {
538         struct perf_event_header header;
539         u32 pid, tid;
540 };
541
542 struct context_switch_event {
543         struct perf_event_header header;
544         u32 next_prev_pid;
545         u32 next_prev_tid;
546 };
547
548 struct thread_map_event_entry {
549         u64     pid;
550         char    comm[16];
551 };
552
553 struct thread_map_event {
554         struct perf_event_header        header;
555         u64                             nr;
556         struct thread_map_event_entry   entries[];
557 };
558
559 enum {
560         PERF_STAT_CONFIG_TERM__AGGR_MODE        = 0,
561         PERF_STAT_CONFIG_TERM__INTERVAL         = 1,
562         PERF_STAT_CONFIG_TERM__SCALE            = 2,
563         PERF_STAT_CONFIG_TERM__MAX              = 3,
564 };
565
566 struct stat_config_event_entry {
567         u64     tag;
568         u64     val;
569 };
570
571 struct stat_config_event {
572         struct perf_event_header        header;
573         u64                             nr;
574         struct stat_config_event_entry  data[];
575 };
576
577 struct stat_event {
578         struct perf_event_header        header;
579
580         u64     id;
581         u32     cpu;
582         u32     thread;
583
584         union {
585                 struct {
586                         u64 val;
587                         u64 ena;
588                         u64 run;
589                 };
590                 u64 values[3];
591         };
592 };
593
594 enum {
595         PERF_STAT_ROUND_TYPE__INTERVAL  = 0,
596         PERF_STAT_ROUND_TYPE__FINAL     = 1,
597 };
598
599 struct stat_round_event {
600         struct perf_event_header        header;
601         u64                             type;
602         u64                             time;
603 };
604
605 struct time_conv_event {
606         struct perf_event_header header;
607         u64 time_shift;
608         u64 time_mult;
609         u64 time_zero;
610 };
611
612 union perf_event {
613         struct perf_event_header        header;
614         struct mmap_event               mmap;
615         struct mmap2_event              mmap2;
616         struct comm_event               comm;
617         struct namespaces_event         namespaces;
618         struct fork_event               fork;
619         struct lost_event               lost;
620         struct lost_samples_event       lost_samples;
621         struct read_event               read;
622         struct throttle_event           throttle;
623         struct sample_event             sample;
624         struct attr_event               attr;
625         struct event_update_event       event_update;
626         struct event_type_event         event_type;
627         struct tracing_data_event       tracing_data;
628         struct build_id_event           build_id;
629         struct id_index_event           id_index;
630         struct auxtrace_info_event      auxtrace_info;
631         struct auxtrace_event           auxtrace;
632         struct auxtrace_error_event     auxtrace_error;
633         struct aux_event                aux;
634         struct itrace_start_event       itrace_start;
635         struct context_switch_event     context_switch;
636         struct thread_map_event         thread_map;
637         struct cpu_map_event            cpu_map;
638         struct stat_config_event        stat_config;
639         struct stat_event               stat;
640         struct stat_round_event         stat_round;
641         struct time_conv_event          time_conv;
642 };
643
644 void perf_event__print_totals(void);
645
646 struct perf_tool;
647 struct thread_map;
648 struct cpu_map;
649 struct perf_stat_config;
650 struct perf_counts_values;
651
652 typedef int (*perf_event__handler_t)(struct perf_tool *tool,
653                                      union perf_event *event,
654                                      struct perf_sample *sample,
655                                      struct machine *machine);
656
657 int perf_event__synthesize_thread_map(struct perf_tool *tool,
658                                       struct thread_map *threads,
659                                       perf_event__handler_t process,
660                                       struct machine *machine, bool mmap_data,
661                                       unsigned int proc_map_timeout);
662 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
663                                       struct thread_map *threads,
664                                       perf_event__handler_t process,
665                                       struct machine *machine);
666 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
667                                    struct cpu_map *cpus,
668                                    perf_event__handler_t process,
669                                    struct machine *machine);
670 int perf_event__synthesize_threads(struct perf_tool *tool,
671                                    perf_event__handler_t process,
672                                    struct machine *machine, bool mmap_data,
673                                    unsigned int proc_map_timeout);
674 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
675                                        perf_event__handler_t process,
676                                        struct machine *machine);
677 int perf_event__synthesize_stat_config(struct perf_tool *tool,
678                                        struct perf_stat_config *config,
679                                        perf_event__handler_t process,
680                                        struct machine *machine);
681 void perf_event__read_stat_config(struct perf_stat_config *config,
682                                   struct stat_config_event *event);
683 int perf_event__synthesize_stat(struct perf_tool *tool,
684                                 u32 cpu, u32 thread, u64 id,
685                                 struct perf_counts_values *count,
686                                 perf_event__handler_t process,
687                                 struct machine *machine);
688 int perf_event__synthesize_stat_round(struct perf_tool *tool,
689                                       u64 time, u64 type,
690                                       perf_event__handler_t process,
691                                       struct machine *machine);
692 int perf_event__synthesize_modules(struct perf_tool *tool,
693                                    perf_event__handler_t process,
694                                    struct machine *machine);
695
696 int perf_event__process_comm(struct perf_tool *tool,
697                              union perf_event *event,
698                              struct perf_sample *sample,
699                              struct machine *machine);
700 int perf_event__process_lost(struct perf_tool *tool,
701                              union perf_event *event,
702                              struct perf_sample *sample,
703                              struct machine *machine);
704 int perf_event__process_lost_samples(struct perf_tool *tool,
705                                      union perf_event *event,
706                                      struct perf_sample *sample,
707                                      struct machine *machine);
708 int perf_event__process_aux(struct perf_tool *tool,
709                             union perf_event *event,
710                             struct perf_sample *sample,
711                             struct machine *machine);
712 int perf_event__process_itrace_start(struct perf_tool *tool,
713                                      union perf_event *event,
714                                      struct perf_sample *sample,
715                                      struct machine *machine);
716 int perf_event__process_switch(struct perf_tool *tool,
717                                union perf_event *event,
718                                struct perf_sample *sample,
719                                struct machine *machine);
720 int perf_event__process_namespaces(struct perf_tool *tool,
721                                    union perf_event *event,
722                                    struct perf_sample *sample,
723                                    struct machine *machine);
724 int perf_event__process_mmap(struct perf_tool *tool,
725                              union perf_event *event,
726                              struct perf_sample *sample,
727                              struct machine *machine);
728 int perf_event__process_mmap2(struct perf_tool *tool,
729                              union perf_event *event,
730                              struct perf_sample *sample,
731                              struct machine *machine);
732 int perf_event__process_fork(struct perf_tool *tool,
733                              union perf_event *event,
734                              struct perf_sample *sample,
735                              struct machine *machine);
736 int perf_event__process_exit(struct perf_tool *tool,
737                              union perf_event *event,
738                              struct perf_sample *sample,
739                              struct machine *machine);
740 int perf_event__process(struct perf_tool *tool,
741                         union perf_event *event,
742                         struct perf_sample *sample,
743                         struct machine *machine);
744
745 struct addr_location;
746
747 int machine__resolve(struct machine *machine, struct addr_location *al,
748                      struct perf_sample *sample);
749
750 void addr_location__put(struct addr_location *al);
751
752 struct thread;
753
754 bool is_bts_event(struct perf_event_attr *attr);
755 bool sample_addr_correlates_sym(struct perf_event_attr *attr);
756 void thread__resolve(struct thread *thread, struct addr_location *al,
757                      struct perf_sample *sample);
758
759 const char *perf_event__name(unsigned int id);
760
761 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
762                                      u64 read_format);
763 int perf_event__synthesize_sample(union perf_event *event, u64 type,
764                                   u64 read_format,
765                                   const struct perf_sample *sample,
766                                   bool swapped);
767
768 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
769                                   union perf_event *event, pid_t pid,
770                                   perf_event__handler_t process,
771                                   struct machine *machine);
772
773 int perf_event__synthesize_namespaces(struct perf_tool *tool,
774                                       union perf_event *event,
775                                       pid_t pid, pid_t tgid,
776                                       perf_event__handler_t process,
777                                       struct machine *machine);
778
779 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
780                                        union perf_event *event,
781                                        pid_t pid, pid_t tgid,
782                                        perf_event__handler_t process,
783                                        struct machine *machine,
784                                        bool mmap_data,
785                                        unsigned int proc_map_timeout);
786
787 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
788 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
789 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
790 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
791 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
792 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
793 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
794 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp);
795 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp);
796 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp);
797 size_t perf_event__fprintf(union perf_event *event, FILE *fp);
798
799 int kallsyms__get_function_start(const char *kallsyms_filename,
800                                  const char *symbol_name, u64 *addr);
801
802 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max);
803 void  cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
804                                u16 type, int max);
805
806 void event_attr_init(struct perf_event_attr *attr);
807
808 int perf_event_paranoid(void);
809
810 extern int sysctl_perf_event_max_stack;
811 extern int sysctl_perf_event_max_contexts_per_stack;
812
813 #endif /* __PERF_RECORD_H */