]> git.karo-electronics.de Git - karo-tx-linux.git/blob - tools/perf/util/ordered-events.c
Merge branch 'etnaviv/fixes' of git://git.pengutronix.de/git/lst/linux
[karo-tx-linux.git] / tools / perf / util / ordered-events.c
1 #include <errno.h>
2 #include <inttypes.h>
3 #include <linux/list.h>
4 #include <linux/compiler.h>
5 #include <linux/string.h>
6 #include "ordered-events.h"
7 #include "session.h"
8 #include "asm/bug.h"
9 #include "debug.h"
10
11 #define pr_N(n, fmt, ...) \
12         eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
13
14 #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
15
16 static void queue_event(struct ordered_events *oe, struct ordered_event *new)
17 {
18         struct ordered_event *last = oe->last;
19         u64 timestamp = new->timestamp;
20         struct list_head *p;
21
22         ++oe->nr_events;
23         oe->last = new;
24
25         pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
26
27         if (!last) {
28                 list_add(&new->list, &oe->events);
29                 oe->max_timestamp = timestamp;
30                 return;
31         }
32
33         /*
34          * last event might point to some random place in the list as it's
35          * the last queued event. We expect that the new event is close to
36          * this.
37          */
38         if (last->timestamp <= timestamp) {
39                 while (last->timestamp <= timestamp) {
40                         p = last->list.next;
41                         if (p == &oe->events) {
42                                 list_add_tail(&new->list, &oe->events);
43                                 oe->max_timestamp = timestamp;
44                                 return;
45                         }
46                         last = list_entry(p, struct ordered_event, list);
47                 }
48                 list_add_tail(&new->list, &last->list);
49         } else {
50                 while (last->timestamp > timestamp) {
51                         p = last->list.prev;
52                         if (p == &oe->events) {
53                                 list_add(&new->list, &oe->events);
54                                 return;
55                         }
56                         last = list_entry(p, struct ordered_event, list);
57                 }
58                 list_add(&new->list, &last->list);
59         }
60 }
61
62 static union perf_event *__dup_event(struct ordered_events *oe,
63                                      union perf_event *event)
64 {
65         union perf_event *new_event = NULL;
66
67         if (oe->cur_alloc_size < oe->max_alloc_size) {
68                 new_event = memdup(event, event->header.size);
69                 if (new_event)
70                         oe->cur_alloc_size += event->header.size;
71         }
72
73         return new_event;
74 }
75
76 static union perf_event *dup_event(struct ordered_events *oe,
77                                    union perf_event *event)
78 {
79         return oe->copy_on_queue ? __dup_event(oe, event) : event;
80 }
81
82 static void free_dup_event(struct ordered_events *oe, union perf_event *event)
83 {
84         if (event && oe->copy_on_queue) {
85                 oe->cur_alloc_size -= event->header.size;
86                 free(event);
87         }
88 }
89
90 #define MAX_SAMPLE_BUFFER       (64 * 1024 / sizeof(struct ordered_event))
91 static struct ordered_event *alloc_event(struct ordered_events *oe,
92                                          union perf_event *event)
93 {
94         struct list_head *cache = &oe->cache;
95         struct ordered_event *new = NULL;
96         union perf_event *new_event;
97
98         new_event = dup_event(oe, event);
99         if (!new_event)
100                 return NULL;
101
102         if (!list_empty(cache)) {
103                 new = list_entry(cache->next, struct ordered_event, list);
104                 list_del(&new->list);
105         } else if (oe->buffer) {
106                 new = oe->buffer + oe->buffer_idx;
107                 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
108                         oe->buffer = NULL;
109         } else if (oe->cur_alloc_size < oe->max_alloc_size) {
110                 size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
111
112                 oe->buffer = malloc(size);
113                 if (!oe->buffer) {
114                         free_dup_event(oe, new_event);
115                         return NULL;
116                 }
117
118                 pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
119                    oe->cur_alloc_size, size, oe->max_alloc_size);
120
121                 oe->cur_alloc_size += size;
122                 list_add(&oe->buffer->list, &oe->to_free);
123
124                 /* First entry is abused to maintain the to_free list. */
125                 oe->buffer_idx = 2;
126                 new = oe->buffer + 1;
127         } else {
128                 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
129         }
130
131         new->event = new_event;
132         return new;
133 }
134
135 static struct ordered_event *
136 ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
137                     union perf_event *event)
138 {
139         struct ordered_event *new;
140
141         new = alloc_event(oe, event);
142         if (new) {
143                 new->timestamp = timestamp;
144                 queue_event(oe, new);
145         }
146
147         return new;
148 }
149
150 void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
151 {
152         list_move(&event->list, &oe->cache);
153         oe->nr_events--;
154         free_dup_event(oe, event->event);
155         event->event = NULL;
156 }
157
158 int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
159                           struct perf_sample *sample, u64 file_offset)
160 {
161         u64 timestamp = sample->time;
162         struct ordered_event *oevent;
163
164         if (!timestamp || timestamp == ~0ULL)
165                 return -ETIME;
166
167         if (timestamp < oe->last_flush) {
168                 pr_oe_time(timestamp,      "out of order event\n");
169                 pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
170                            oe->last_flush_type);
171
172                 oe->nr_unordered_events++;
173         }
174
175         oevent = ordered_events__new_event(oe, timestamp, event);
176         if (!oevent) {
177                 ordered_events__flush(oe, OE_FLUSH__HALF);
178                 oevent = ordered_events__new_event(oe, timestamp, event);
179         }
180
181         if (!oevent)
182                 return -ENOMEM;
183
184         oevent->file_offset = file_offset;
185         return 0;
186 }
187
188 static int __ordered_events__flush(struct ordered_events *oe)
189 {
190         struct list_head *head = &oe->events;
191         struct ordered_event *tmp, *iter;
192         u64 limit = oe->next_flush;
193         u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
194         bool show_progress = limit == ULLONG_MAX;
195         struct ui_progress prog;
196         int ret;
197
198         if (!limit)
199                 return 0;
200
201         if (show_progress)
202                 ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
203
204         list_for_each_entry_safe(iter, tmp, head, list) {
205                 if (session_done())
206                         return 0;
207
208                 if (iter->timestamp > limit)
209                         break;
210                 ret = oe->deliver(oe, iter);
211                 if (ret)
212                         return ret;
213
214                 ordered_events__delete(oe, iter);
215                 oe->last_flush = iter->timestamp;
216
217                 if (show_progress)
218                         ui_progress__update(&prog, 1);
219         }
220
221         if (list_empty(head))
222                 oe->last = NULL;
223         else if (last_ts <= limit)
224                 oe->last = list_entry(head->prev, struct ordered_event, list);
225
226         if (show_progress)
227                 ui_progress__finish();
228
229         return 0;
230 }
231
232 int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
233 {
234         static const char * const str[] = {
235                 "NONE",
236                 "FINAL",
237                 "ROUND",
238                 "HALF ",
239         };
240         int err;
241
242         if (oe->nr_events == 0)
243                 return 0;
244
245         switch (how) {
246         case OE_FLUSH__FINAL:
247                 oe->next_flush = ULLONG_MAX;
248                 break;
249
250         case OE_FLUSH__HALF:
251         {
252                 struct ordered_event *first, *last;
253                 struct list_head *head = &oe->events;
254
255                 first = list_entry(head->next, struct ordered_event, list);
256                 last = oe->last;
257
258                 /* Warn if we are called before any event got allocated. */
259                 if (WARN_ONCE(!last || list_empty(head), "empty queue"))
260                         return 0;
261
262                 oe->next_flush  = first->timestamp;
263                 oe->next_flush += (last->timestamp - first->timestamp) / 2;
264                 break;
265         }
266
267         case OE_FLUSH__ROUND:
268         case OE_FLUSH__NONE:
269         default:
270                 break;
271         };
272
273         pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
274                    str[how], oe->nr_events);
275         pr_oe_time(oe->max_timestamp, "max_timestamp\n");
276
277         err = __ordered_events__flush(oe);
278
279         if (!err) {
280                 if (how == OE_FLUSH__ROUND)
281                         oe->next_flush = oe->max_timestamp;
282
283                 oe->last_flush_type = how;
284         }
285
286         pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
287                    str[how], oe->nr_events);
288         pr_oe_time(oe->last_flush, "last_flush\n");
289
290         return err;
291 }
292
293 void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
294 {
295         INIT_LIST_HEAD(&oe->events);
296         INIT_LIST_HEAD(&oe->cache);
297         INIT_LIST_HEAD(&oe->to_free);
298         oe->max_alloc_size = (u64) -1;
299         oe->cur_alloc_size = 0;
300         oe->deliver        = deliver;
301 }
302
303 void ordered_events__free(struct ordered_events *oe)
304 {
305         while (!list_empty(&oe->to_free)) {
306                 struct ordered_event *event;
307
308                 event = list_entry(oe->to_free.next, struct ordered_event, list);
309                 list_del(&event->list);
310                 free_dup_event(oe, event->event);
311                 free(event);
312         }
313 }
314
315 void ordered_events__reinit(struct ordered_events *oe)
316 {
317         ordered_events__deliver_t old_deliver = oe->deliver;
318
319         ordered_events__free(oe);
320         memset(oe, '\0', sizeof(*oe));
321         ordered_events__init(oe, old_deliver);
322 }