]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/evlist.h
3fed4fb2e8663c33d0c19c6f0e5cd622e789e048
[karo-tx-linux.git] / tools / perf / util / evlist.h
1 #ifndef __PERF_EVLIST_H
2 #define __PERF_EVLIST_H 1
3
4 #include <linux/kernel.h>
5 #include <linux/refcount.h>
6 #include <linux/list.h>
7 #include <api/fd/array.h>
8 #include <stdio.h>
9 #include "../perf.h"
10 #include "event.h"
11 #include "evsel.h"
12 #include "util.h"
13 #include "auxtrace.h"
14 #include <unistd.h>
15
16 struct pollfd;
17 struct thread_map;
18 struct cpu_map;
19 struct record_opts;
20
21 #define PERF_EVLIST__HLIST_BITS 8
22 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
23
24 /**
25  * struct perf_mmap - perf's ring buffer mmap details
26  *
27  * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
28  */
29 struct perf_mmap {
30         void             *base;
31         int              mask;
32         int              fd;
33         refcount_t       refcnt;
34         u64              prev;
35         struct auxtrace_mmap auxtrace_mmap;
36         char             event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
37 };
38
39 static inline size_t
40 perf_mmap__mmap_len(struct perf_mmap *map)
41 {
42         return map->mask + 1 + page_size;
43 }
44
45 /*
46  * State machine of bkw_mmap_state:
47  *
48  *                     .________________(forbid)_____________.
49  *                     |                                     V
50  * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
51  *                     ^  ^              |   ^               |
52  *                     |  |__(forbid)____/   |___(forbid)___/|
53  *                     |                                     |
54  *                      \_________________(3)_______________/
55  *
56  * NOTREADY     : Backward ring buffers are not ready
57  * RUNNING      : Backward ring buffers are recording
58  * DATA_PENDING : We are required to collect data from backward ring buffers
59  * EMPTY        : We have collected data from backward ring buffers.
60  *
61  * (0): Setup backward ring buffer
62  * (1): Pause ring buffers for reading
63  * (2): Read from ring buffers
64  * (3): Resume ring buffers for recording
65  */
66 enum bkw_mmap_state {
67         BKW_MMAP_NOTREADY,
68         BKW_MMAP_RUNNING,
69         BKW_MMAP_DATA_PENDING,
70         BKW_MMAP_EMPTY,
71 };
72
73 struct perf_evlist {
74         struct list_head entries;
75         struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
76         int              nr_entries;
77         int              nr_groups;
78         int              nr_mmaps;
79         bool             overwrite;
80         bool             enabled;
81         bool             has_user_cpus;
82         size_t           mmap_len;
83         int              id_pos;
84         int              is_pos;
85         u64              combined_sample_type;
86         enum bkw_mmap_state bkw_mmap_state;
87         struct {
88                 int     cork_fd;
89                 pid_t   pid;
90         } workload;
91         struct fdarray   pollfd;
92         struct perf_mmap *mmap;
93         struct perf_mmap *backward_mmap;
94         struct thread_map *threads;
95         struct cpu_map    *cpus;
96         struct perf_evsel *selected;
97         struct events_stats stats;
98         struct perf_env *env;
99 };
100
101 struct perf_evsel_str_handler {
102         const char *name;
103         void       *handler;
104 };
105
106 struct perf_evlist *perf_evlist__new(void);
107 struct perf_evlist *perf_evlist__new_default(void);
108 struct perf_evlist *perf_evlist__new_dummy(void);
109 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
110                        struct thread_map *threads);
111 void perf_evlist__exit(struct perf_evlist *evlist);
112 void perf_evlist__delete(struct perf_evlist *evlist);
113
114 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
115 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel);
116 int perf_evlist__add_default(struct perf_evlist *evlist);
117 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
118                                      struct perf_event_attr *attrs, size_t nr_attrs);
119
120 #define perf_evlist__add_default_attrs(evlist, array) \
121         __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
122
123 int perf_evlist__add_dummy(struct perf_evlist *evlist);
124
125 int perf_evlist__add_newtp(struct perf_evlist *evlist,
126                            const char *sys, const char *name, void *handler);
127
128 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
129                                    enum perf_event_sample_format bit);
130 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
131                                      enum perf_event_sample_format bit);
132
133 #define perf_evlist__set_sample_bit(evlist, bit) \
134         __perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit)
135
136 #define perf_evlist__reset_sample_bit(evlist, bit) \
137         __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
138
139 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
140 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid);
141 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
142
143 struct perf_evsel *
144 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
145
146 struct perf_evsel *
147 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
148                                      const char *name);
149
150 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
151                          int cpu, int thread, u64 id);
152 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
153                            struct perf_evsel *evsel,
154                            int cpu, int thread, int fd);
155
156 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
157 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
158 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask);
159
160 int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
161
162 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
163 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
164                                                 u64 id);
165
166 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
167
168 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
169
170 union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
171 union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
172
173 void perf_mmap__read_catchup(struct perf_mmap *md);
174 void perf_mmap__consume(struct perf_mmap *md, bool overwrite);
175
176 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
177
178 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
179                                                  int idx);
180 union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
181                                                   int idx);
182 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
183
184 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
185
186 int perf_evlist__open(struct perf_evlist *evlist);
187 void perf_evlist__close(struct perf_evlist *evlist);
188
189 struct callchain_param;
190
191 void perf_evlist__set_id_pos(struct perf_evlist *evlist);
192 bool perf_can_sample_identifier(void);
193 bool perf_can_record_switch_events(void);
194 bool perf_can_record_cpu_wide(void);
195 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
196                          struct callchain_param *callchain);
197 int record_opts__config(struct record_opts *opts);
198
199 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
200                                   struct target *target,
201                                   const char *argv[], bool pipe_output,
202                                   void (*exec_error)(int signo, siginfo_t *info,
203                                                      void *ucontext));
204 int perf_evlist__start_workload(struct perf_evlist *evlist);
205
206 struct option;
207
208 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
209 int perf_evlist__parse_mmap_pages(const struct option *opt,
210                                   const char *str,
211                                   int unset);
212
213 unsigned long perf_event_mlock_kb_in_pages(void);
214
215 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
216                          bool overwrite, unsigned int auxtrace_pages,
217                          bool auxtrace_overwrite);
218 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
219                       bool overwrite);
220 void perf_evlist__munmap(struct perf_evlist *evlist);
221
222 size_t perf_evlist__mmap_size(unsigned long pages);
223
224 void perf_evlist__disable(struct perf_evlist *evlist);
225 void perf_evlist__enable(struct perf_evlist *evlist);
226 void perf_evlist__toggle_enable(struct perf_evlist *evlist);
227
228 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
229                                   struct perf_evsel *evsel, int idx);
230
231 void perf_evlist__set_selected(struct perf_evlist *evlist,
232                                struct perf_evsel *evsel);
233
234 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
235                            struct thread_map *threads);
236 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
237 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
238
239 void __perf_evlist__set_leader(struct list_head *list);
240 void perf_evlist__set_leader(struct perf_evlist *evlist);
241
242 u64 perf_evlist__read_format(struct perf_evlist *evlist);
243 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
244 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
245 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist);
246 bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
247 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
248
249 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
250                               struct perf_sample *sample);
251
252 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
253 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
254 bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
255
256 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
257                                    struct list_head *list);
258
259 static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
260 {
261         return list_entry(evlist->entries.next, struct perf_evsel, node);
262 }
263
264 static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
265 {
266         return list_entry(evlist->entries.prev, struct perf_evsel, node);
267 }
268
269 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
270
271 int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
272 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
273
274 static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
275 {
276         struct perf_event_mmap_page *pc = mm->base;
277         u64 head = ACCESS_ONCE(pc->data_head);
278         rmb();
279         return head;
280 }
281
282 static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
283 {
284         struct perf_event_mmap_page *pc = md->base;
285
286         /*
287          * ensure all reads are done before we write the tail out.
288          */
289         mb();
290         pc->data_tail = tail;
291 }
292
293 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
294 void perf_evlist__to_front(struct perf_evlist *evlist,
295                            struct perf_evsel *move_evsel);
296
297 /**
298  * __evlist__for_each_entry - iterate thru all the evsels
299  * @list: list_head instance to iterate
300  * @evsel: struct evsel iterator
301  */
302 #define __evlist__for_each_entry(list, evsel) \
303         list_for_each_entry(evsel, list, node)
304
305 /**
306  * evlist__for_each_entry - iterate thru all the evsels
307  * @evlist: evlist instance to iterate
308  * @evsel: struct evsel iterator
309  */
310 #define evlist__for_each_entry(evlist, evsel) \
311         __evlist__for_each_entry(&(evlist)->entries, evsel)
312
313 /**
314  * __evlist__for_each_entry_continue - continue iteration thru all the evsels
315  * @list: list_head instance to iterate
316  * @evsel: struct evsel iterator
317  */
318 #define __evlist__for_each_entry_continue(list, evsel) \
319         list_for_each_entry_continue(evsel, list, node)
320
321 /**
322  * evlist__for_each_entry_continue - continue iteration thru all the evsels
323  * @evlist: evlist instance to iterate
324  * @evsel: struct evsel iterator
325  */
326 #define evlist__for_each_entry_continue(evlist, evsel) \
327         __evlist__for_each_entry_continue(&(evlist)->entries, evsel)
328
329 /**
330  * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
331  * @list: list_head instance to iterate
332  * @evsel: struct evsel iterator
333  */
334 #define __evlist__for_each_entry_reverse(list, evsel) \
335         list_for_each_entry_reverse(evsel, list, node)
336
337 /**
338  * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
339  * @evlist: evlist instance to iterate
340  * @evsel: struct evsel iterator
341  */
342 #define evlist__for_each_entry_reverse(evlist, evsel) \
343         __evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
344
345 /**
346  * __evlist__for_each_entry_safe - safely iterate thru all the evsels
347  * @list: list_head instance to iterate
348  * @tmp: struct evsel temp iterator
349  * @evsel: struct evsel iterator
350  */
351 #define __evlist__for_each_entry_safe(list, tmp, evsel) \
352         list_for_each_entry_safe(evsel, tmp, list, node)
353
354 /**
355  * evlist__for_each_entry_safe - safely iterate thru all the evsels
356  * @evlist: evlist instance to iterate
357  * @evsel: struct evsel iterator
358  * @tmp: struct evsel temp iterator
359  */
360 #define evlist__for_each_entry_safe(evlist, tmp, evsel) \
361         __evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
362
363 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
364                                      struct perf_evsel *tracking_evsel);
365
366 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
367
368 struct perf_evsel *
369 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
370
371 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
372                                             union perf_event *event);
373 #endif /* __PERF_EVLIST_H */