2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <api/fs/fs.h>
15 #include "thread_map.h"
24 #include "parse-events.h"
25 #include <subcmd/parse-options.h>
29 #include <linux/bitops.h>
30 #include <linux/hash.h>
31 #include <linux/log2.h>
32 #include <linux/err.h>
34 static void perf_mmap__munmap(struct perf_mmap *map);
35 static void perf_mmap__put(struct perf_mmap *map);
37 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
38 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
40 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
41 struct thread_map *threads)
45 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
46 INIT_HLIST_HEAD(&evlist->heads[i]);
47 INIT_LIST_HEAD(&evlist->entries);
48 perf_evlist__set_maps(evlist, cpus, threads);
49 fdarray__init(&evlist->pollfd, 64);
50 evlist->workload.pid = -1;
51 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
54 struct perf_evlist *perf_evlist__new(void)
56 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
59 perf_evlist__init(evlist, NULL, NULL);
64 struct perf_evlist *perf_evlist__new_default(void)
66 struct perf_evlist *evlist = perf_evlist__new();
68 if (evlist && perf_evlist__add_default(evlist)) {
69 perf_evlist__delete(evlist);
76 struct perf_evlist *perf_evlist__new_dummy(void)
78 struct perf_evlist *evlist = perf_evlist__new();
80 if (evlist && perf_evlist__add_dummy(evlist)) {
81 perf_evlist__delete(evlist);
89 * perf_evlist__set_id_pos - set the positions of event ids.
90 * @evlist: selected event list
92 * Events with compatible sample types all have the same id_pos
93 * and is_pos. For convenience, put a copy on evlist.
95 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
97 struct perf_evsel *first = perf_evlist__first(evlist);
99 evlist->id_pos = first->id_pos;
100 evlist->is_pos = first->is_pos;
103 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
105 struct perf_evsel *evsel;
107 evlist__for_each_entry(evlist, evsel)
108 perf_evsel__calc_id_pos(evsel);
110 perf_evlist__set_id_pos(evlist);
113 static void perf_evlist__purge(struct perf_evlist *evlist)
115 struct perf_evsel *pos, *n;
117 evlist__for_each_entry_safe(evlist, n, pos) {
118 list_del_init(&pos->node);
120 perf_evsel__delete(pos);
123 evlist->nr_entries = 0;
126 void perf_evlist__exit(struct perf_evlist *evlist)
128 zfree(&evlist->mmap);
129 zfree(&evlist->backward_mmap);
130 fdarray__exit(&evlist->pollfd);
133 void perf_evlist__delete(struct perf_evlist *evlist)
138 perf_evlist__munmap(evlist);
139 perf_evlist__close(evlist);
140 cpu_map__put(evlist->cpus);
141 thread_map__put(evlist->threads);
143 evlist->threads = NULL;
144 perf_evlist__purge(evlist);
145 perf_evlist__exit(evlist);
149 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
150 struct perf_evsel *evsel)
153 * We already have cpus for evsel (via PMU sysfs) so
154 * keep it, if there's no target cpu list defined.
156 if (!evsel->own_cpus || evlist->has_user_cpus) {
157 cpu_map__put(evsel->cpus);
158 evsel->cpus = cpu_map__get(evlist->cpus);
159 } else if (evsel->cpus != evsel->own_cpus) {
160 cpu_map__put(evsel->cpus);
161 evsel->cpus = cpu_map__get(evsel->own_cpus);
164 thread_map__put(evsel->threads);
165 evsel->threads = thread_map__get(evlist->threads);
168 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
170 struct perf_evsel *evsel;
172 evlist__for_each_entry(evlist, evsel)
173 __perf_evlist__propagate_maps(evlist, evsel);
176 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
178 entry->evlist = evlist;
179 list_add_tail(&entry->node, &evlist->entries);
180 entry->idx = evlist->nr_entries;
181 entry->tracking = !entry->idx;
183 if (!evlist->nr_entries++)
184 perf_evlist__set_id_pos(evlist);
186 __perf_evlist__propagate_maps(evlist, entry);
189 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
191 evsel->evlist = NULL;
192 list_del_init(&evsel->node);
193 evlist->nr_entries -= 1;
196 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
197 struct list_head *list)
199 struct perf_evsel *evsel, *temp;
201 __evlist__for_each_entry_safe(list, temp, evsel) {
202 list_del_init(&evsel->node);
203 perf_evlist__add(evlist, evsel);
207 void __perf_evlist__set_leader(struct list_head *list)
209 struct perf_evsel *evsel, *leader;
211 leader = list_entry(list->next, struct perf_evsel, node);
212 evsel = list_entry(list->prev, struct perf_evsel, node);
214 leader->nr_members = evsel->idx - leader->idx + 1;
216 __evlist__for_each_entry(list, evsel) {
217 evsel->leader = leader;
221 void perf_evlist__set_leader(struct perf_evlist *evlist)
223 if (evlist->nr_entries) {
224 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
225 __perf_evlist__set_leader(&evlist->entries);
229 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
231 attr->precise_ip = 3;
233 while (attr->precise_ip != 0) {
234 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
243 int perf_evlist__add_default(struct perf_evlist *evlist)
245 struct perf_evsel *evsel = perf_evsel__new_cycles();
250 perf_evlist__add(evlist, evsel);
254 int perf_evlist__add_dummy(struct perf_evlist *evlist)
256 struct perf_event_attr attr = {
257 .type = PERF_TYPE_SOFTWARE,
258 .config = PERF_COUNT_SW_DUMMY,
259 .size = sizeof(attr), /* to capture ABI version */
261 struct perf_evsel *evsel = perf_evsel__new(&attr);
266 perf_evlist__add(evlist, evsel);
270 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
271 struct perf_event_attr *attrs, size_t nr_attrs)
273 struct perf_evsel *evsel, *n;
277 for (i = 0; i < nr_attrs; i++) {
278 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
280 goto out_delete_partial_list;
281 list_add_tail(&evsel->node, &head);
284 perf_evlist__splice_list_tail(evlist, &head);
288 out_delete_partial_list:
289 __evlist__for_each_entry_safe(&head, n, evsel)
290 perf_evsel__delete(evsel);
294 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
295 struct perf_event_attr *attrs, size_t nr_attrs)
299 for (i = 0; i < nr_attrs; i++)
300 event_attr_init(attrs + i);
302 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
306 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
308 struct perf_evsel *evsel;
310 evlist__for_each_entry(evlist, evsel) {
311 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
312 (int)evsel->attr.config == id)
320 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
323 struct perf_evsel *evsel;
325 evlist__for_each_entry(evlist, evsel) {
326 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
327 (strcmp(evsel->name, name) == 0))
334 int perf_evlist__add_newtp(struct perf_evlist *evlist,
335 const char *sys, const char *name, void *handler)
337 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
342 evsel->handler = handler;
343 perf_evlist__add(evlist, evsel);
347 static int perf_evlist__nr_threads(struct perf_evlist *evlist,
348 struct perf_evsel *evsel)
350 if (evsel->system_wide)
353 return thread_map__nr(evlist->threads);
356 void perf_evlist__disable(struct perf_evlist *evlist)
358 struct perf_evsel *pos;
360 evlist__for_each_entry(evlist, pos) {
361 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
363 perf_evsel__disable(pos);
366 evlist->enabled = false;
369 void perf_evlist__enable(struct perf_evlist *evlist)
371 struct perf_evsel *pos;
373 evlist__for_each_entry(evlist, pos) {
374 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
376 perf_evsel__enable(pos);
379 evlist->enabled = true;
382 void perf_evlist__toggle_enable(struct perf_evlist *evlist)
384 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
387 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
388 struct perf_evsel *evsel, int cpu)
391 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
396 for (thread = 0; thread < nr_threads; thread++) {
397 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
404 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
405 struct perf_evsel *evsel,
409 int nr_cpus = cpu_map__nr(evlist->cpus);
414 for (cpu = 0; cpu < nr_cpus; cpu++) {
415 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
422 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
423 struct perf_evsel *evsel, int idx)
425 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
428 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
430 return perf_evlist__enable_event_thread(evlist, evsel, idx);
433 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
435 int nr_cpus = cpu_map__nr(evlist->cpus);
436 int nr_threads = thread_map__nr(evlist->threads);
438 struct perf_evsel *evsel;
440 evlist__for_each_entry(evlist, evsel) {
441 if (evsel->system_wide)
444 nfds += nr_cpus * nr_threads;
447 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
448 fdarray__grow(&evlist->pollfd, nfds) < 0)
454 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
455 struct perf_mmap *map, short revent)
457 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
459 * Save the idx so that when we filter out fds POLLHUP'ed we can
460 * close the associated evlist->mmap[] entry.
463 evlist->pollfd.priv[pos].ptr = map;
465 fcntl(fd, F_SETFL, O_NONBLOCK);
471 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
473 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
476 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
477 void *arg __maybe_unused)
479 struct perf_mmap *map = fda->priv[fd].ptr;
485 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
487 return fdarray__filter(&evlist->pollfd, revents_and_mask,
488 perf_evlist__munmap_filtered, NULL);
491 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
493 return fdarray__poll(&evlist->pollfd, timeout);
496 static void perf_evlist__id_hash(struct perf_evlist *evlist,
497 struct perf_evsel *evsel,
498 int cpu, int thread, u64 id)
501 struct perf_sample_id *sid = SID(evsel, cpu, thread);
505 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
506 hlist_add_head(&sid->node, &evlist->heads[hash]);
509 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
510 int cpu, int thread, u64 id)
512 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
513 evsel->id[evsel->ids++] = id;
516 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
517 struct perf_evsel *evsel,
518 int cpu, int thread, int fd)
520 u64 read_data[4] = { 0, };
521 int id_idx = 1; /* The first entry is the counter value */
525 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
532 /* Legacy way to get event id.. All hail to old kernels! */
535 * This way does not work with group format read, so bail
538 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
541 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
542 read(fd, &read_data, sizeof(read_data)) == -1)
545 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
547 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
550 id = read_data[id_idx];
553 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
557 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
558 struct perf_evsel *evsel, int idx, int cpu,
561 struct perf_sample_id *sid = SID(evsel, cpu, thread);
563 if (evlist->cpus && cpu >= 0)
564 sid->cpu = evlist->cpus->map[cpu];
567 if (!evsel->system_wide && evlist->threads && thread >= 0)
568 sid->tid = thread_map__pid(evlist->threads, thread);
573 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
575 struct hlist_head *head;
576 struct perf_sample_id *sid;
579 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
580 head = &evlist->heads[hash];
582 hlist_for_each_entry(sid, head, node)
589 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
591 struct perf_sample_id *sid;
593 if (evlist->nr_entries == 1 || !id)
594 return perf_evlist__first(evlist);
596 sid = perf_evlist__id2sid(evlist, id);
600 if (!perf_evlist__sample_id_all(evlist))
601 return perf_evlist__first(evlist);
606 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
609 struct perf_sample_id *sid;
614 sid = perf_evlist__id2sid(evlist, id);
621 static int perf_evlist__event2id(struct perf_evlist *evlist,
622 union perf_event *event, u64 *id)
624 const u64 *array = event->sample.array;
627 n = (event->header.size - sizeof(event->header)) >> 3;
629 if (event->header.type == PERF_RECORD_SAMPLE) {
630 if (evlist->id_pos >= n)
632 *id = array[evlist->id_pos];
634 if (evlist->is_pos > n)
642 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
643 union perf_event *event)
645 struct perf_evsel *first = perf_evlist__first(evlist);
646 struct hlist_head *head;
647 struct perf_sample_id *sid;
651 if (evlist->nr_entries == 1)
654 if (!first->attr.sample_id_all &&
655 event->header.type != PERF_RECORD_SAMPLE)
658 if (perf_evlist__event2id(evlist, event, &id))
661 /* Synthesized events have an id of zero */
665 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
666 head = &evlist->heads[hash];
668 hlist_for_each_entry(sid, head, node) {
675 static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
679 if (!evlist->backward_mmap)
682 for (i = 0; i < evlist->nr_mmaps; i++) {
683 int fd = evlist->backward_mmap[i].fd;
688 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
695 static int perf_evlist__pause(struct perf_evlist *evlist)
697 return perf_evlist__set_paused(evlist, true);
700 static int perf_evlist__resume(struct perf_evlist *evlist)
702 return perf_evlist__set_paused(evlist, false);
705 /* When check_messup is true, 'end' must points to a good entry */
706 static union perf_event *
707 perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
710 unsigned char *data = md->base + page_size;
711 union perf_event *event = NULL;
712 int diff = end - start;
716 * If we're further behind than half the buffer, there's a chance
717 * the writer will bite our tail and mess up the samples under us.
719 * If we somehow ended up ahead of the 'end', we got messed up.
721 * In either case, truncate and restart at 'end'.
723 if (diff > md->mask / 2 || diff < 0) {
724 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
727 * 'end' points to a known good entry, start there.
734 if (diff >= (int)sizeof(event->header)) {
737 event = (union perf_event *)&data[start & md->mask];
738 size = event->header.size;
740 if (size < sizeof(event->header) || diff < (int)size) {
746 * Event straddles the mmap boundary -- header should always
747 * be inside due to u64 alignment of output.
749 if ((start & md->mask) + size != ((start + size) & md->mask)) {
750 unsigned int offset = start;
751 unsigned int len = min(sizeof(*event), size), cpy;
752 void *dst = md->event_copy;
755 cpy = min(md->mask + 1 - (offset & md->mask), len);
756 memcpy(dst, &data[offset & md->mask], cpy);
762 event = (union perf_event *) md->event_copy;
775 union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
781 * Check if event was unmapped due to a POLLHUP/POLLERR.
783 if (!refcount_read(&md->refcnt))
786 head = perf_mmap__read_head(md);
788 return perf_mmap__read(md, check_messup, old, head, &md->prev);
792 perf_mmap__read_backward(struct perf_mmap *md)
795 u64 start = md->prev;
798 * Check if event was unmapped due to a POLLHUP/POLLERR.
800 if (!refcount_read(&md->refcnt))
803 head = perf_mmap__read_head(md);
808 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
809 * it each time when kernel writes to it, so in fact 'head' is
810 * negative. 'end' pointer is made manually by adding the size of
811 * the ring buffer to 'head' pointer, means the validate data can
812 * read is the whole ring buffer. If 'end' is positive, the ring
813 * buffer has not fully filled, so we must adjust 'end' to 0.
815 * However, since both 'head' and 'end' is unsigned, we can't
816 * simply compare 'end' against 0. Here we compare '-head' and
817 * the size of the ring buffer, where -head is the number of bytes
818 * kernel write to the ring buffer.
820 if (-head < (u64)(md->mask + 1))
823 end = head + md->mask + 1;
825 return perf_mmap__read(md, false, start, end, &md->prev);
828 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
830 struct perf_mmap *md = &evlist->mmap[idx];
833 * Check messup is required for forward overwritable ring buffer:
834 * memory pointed by md->prev can be overwritten in this case.
835 * No need for read-write ring buffer: kernel stop outputting when
836 * it hit md->prev (perf_mmap__consume()).
838 return perf_mmap__read_forward(md, evlist->overwrite);
841 union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
843 struct perf_mmap *md = &evlist->mmap[idx];
846 * No need to check messup for backward ring buffer:
847 * We can always read arbitrary long data from a backward
848 * ring buffer unless we forget to pause it before reading.
850 return perf_mmap__read_backward(md);
853 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
855 return perf_evlist__mmap_read_forward(evlist, idx);
858 void perf_mmap__read_catchup(struct perf_mmap *md)
862 if (!refcount_read(&md->refcnt))
865 head = perf_mmap__read_head(md);
869 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
871 perf_mmap__read_catchup(&evlist->mmap[idx]);
874 static bool perf_mmap__empty(struct perf_mmap *md)
876 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
879 static void perf_mmap__get(struct perf_mmap *map)
881 refcount_inc(&map->refcnt);
884 static void perf_mmap__put(struct perf_mmap *md)
886 BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
888 if (refcount_dec_and_test(&md->refcnt))
889 perf_mmap__munmap(md);
892 void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
897 perf_mmap__write_tail(md, old);
900 if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
904 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
906 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
909 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
910 struct auxtrace_mmap_params *mp __maybe_unused,
911 void *userpg __maybe_unused,
912 int fd __maybe_unused)
917 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
921 void __weak auxtrace_mmap_params__init(
922 struct auxtrace_mmap_params *mp __maybe_unused,
923 off_t auxtrace_offset __maybe_unused,
924 unsigned int auxtrace_pages __maybe_unused,
925 bool auxtrace_overwrite __maybe_unused)
929 void __weak auxtrace_mmap_params__set_idx(
930 struct auxtrace_mmap_params *mp __maybe_unused,
931 struct perf_evlist *evlist __maybe_unused,
932 int idx __maybe_unused,
933 bool per_cpu __maybe_unused)
937 static void perf_mmap__munmap(struct perf_mmap *map)
939 if (map->base != NULL) {
940 munmap(map->base, perf_mmap__mmap_len(map));
943 refcount_set(&map->refcnt, 0);
945 auxtrace_mmap__munmap(&map->auxtrace_mmap);
948 static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
953 for (i = 0; i < evlist->nr_mmaps; i++)
954 perf_mmap__munmap(&evlist->mmap[i]);
956 if (evlist->backward_mmap)
957 for (i = 0; i < evlist->nr_mmaps; i++)
958 perf_mmap__munmap(&evlist->backward_mmap[i]);
961 void perf_evlist__munmap(struct perf_evlist *evlist)
963 perf_evlist__munmap_nofree(evlist);
964 zfree(&evlist->mmap);
965 zfree(&evlist->backward_mmap);
968 static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
971 struct perf_mmap *map;
973 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
974 if (cpu_map__empty(evlist->cpus))
975 evlist->nr_mmaps = thread_map__nr(evlist->threads);
976 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
980 for (i = 0; i < evlist->nr_mmaps; i++) {
983 * When the perf_mmap() call is made we grab one refcount, plus
984 * one extra to let perf_evlist__mmap_consume() get the last
985 * events after all real references (perf_mmap__get()) are
988 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
989 * thus does perf_mmap__get() on it.
991 refcount_set(&map[i].refcnt, 0);
999 struct auxtrace_mmap_params auxtrace_mp;
1002 static int perf_mmap__mmap(struct perf_mmap *map,
1003 struct mmap_params *mp, int fd)
1006 * The last one will be done at perf_evlist__mmap_consume(), so that we
1007 * make sure we don't prevent tools from consuming every last event in
1010 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
1011 * anymore, but the last events for it are still in the ring buffer,
1012 * waiting to be consumed.
1014 * Tools can chose to ignore this at their own discretion, but the
1015 * evlist layer can't just drop it when filtering events in
1016 * perf_evlist__filter_pollfd().
1018 refcount_set(&map->refcnt, 2);
1020 map->mask = mp->mask;
1021 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1023 if (map->base == MAP_FAILED) {
1024 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1031 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1032 &mp->auxtrace_mp, map->base, fd))
1039 perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1040 struct perf_evsel *evsel)
1042 if (evsel->attr.write_backward)
1047 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
1048 struct mmap_params *mp, int cpu_idx,
1049 int thread, int *_output, int *_output_backward)
1051 struct perf_evsel *evsel;
1053 int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
1055 evlist__for_each_entry(evlist, evsel) {
1056 struct perf_mmap *maps = evlist->mmap;
1057 int *output = _output;
1061 if (evsel->attr.write_backward) {
1062 output = _output_backward;
1063 maps = evlist->backward_mmap;
1066 maps = perf_evlist__alloc_mmap(evlist);
1069 evlist->backward_mmap = maps;
1070 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
1071 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
1075 if (evsel->system_wide && thread)
1078 cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
1082 fd = FD(evsel, cpu, thread);
1084 if (*output == -1) {
1087 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
1090 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1093 perf_mmap__get(&maps[idx]);
1096 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
1099 * The system_wide flag causes a selected event to be opened
1100 * always without a pid. Consequently it will never get a
1101 * POLLHUP, but it is used for tracking in combination with
1102 * other events, so it should not need to be polled anyway.
1103 * Therefore don't add it for polling.
1105 if (!evsel->system_wide &&
1106 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
1107 perf_mmap__put(&maps[idx]);
1111 if (evsel->attr.read_format & PERF_FORMAT_ID) {
1112 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1115 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1123 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1124 struct mmap_params *mp)
1127 int nr_cpus = cpu_map__nr(evlist->cpus);
1128 int nr_threads = thread_map__nr(evlist->threads);
1130 pr_debug2("perf event ring buffer mmapped per cpu\n");
1131 for (cpu = 0; cpu < nr_cpus; cpu++) {
1133 int output_backward = -1;
1135 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1138 for (thread = 0; thread < nr_threads; thread++) {
1139 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
1140 thread, &output, &output_backward))
1148 perf_evlist__munmap_nofree(evlist);
1152 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1153 struct mmap_params *mp)
1156 int nr_threads = thread_map__nr(evlist->threads);
1158 pr_debug2("perf event ring buffer mmapped per thread\n");
1159 for (thread = 0; thread < nr_threads; thread++) {
1161 int output_backward = -1;
1163 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1166 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
1167 &output, &output_backward))
1174 perf_evlist__munmap_nofree(evlist);
1178 unsigned long perf_event_mlock_kb_in_pages(void)
1180 unsigned long pages;
1183 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1185 * Pick a once upon a time good value, i.e. things look
1186 * strange since we can't read a sysctl value, but lets not
1191 max -= (page_size / 1024);
1194 pages = (max * 1024) / page_size;
1195 if (!is_power_of_2(pages))
1196 pages = rounddown_pow_of_two(pages);
1201 size_t perf_evlist__mmap_size(unsigned long pages)
1203 if (pages == UINT_MAX)
1204 pages = perf_event_mlock_kb_in_pages();
1205 else if (!is_power_of_2(pages))
1208 return (pages + 1) * page_size;
1211 static long parse_pages_arg(const char *str, unsigned long min,
1214 unsigned long pages, val;
1215 static struct parse_tag tags[] = {
1216 { .tag = 'B', .mult = 1 },
1217 { .tag = 'K', .mult = 1 << 10 },
1218 { .tag = 'M', .mult = 1 << 20 },
1219 { .tag = 'G', .mult = 1 << 30 },
1226 val = parse_tag_value(str, tags);
1227 if (val != (unsigned long) -1) {
1228 /* we got file size value */
1229 pages = PERF_ALIGN(val, page_size) / page_size;
1231 /* we got pages count value */
1233 pages = strtoul(str, &eptr, 10);
1238 if (pages == 0 && min == 0) {
1239 /* leave number of pages at 0 */
1240 } else if (!is_power_of_2(pages)) {
1243 /* round pages up to next power of 2 */
1244 pages = roundup_pow_of_two(pages);
1248 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
1249 pr_info("rounding mmap pages size to %s (%lu pages)\n",
1259 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
1261 unsigned long max = UINT_MAX;
1264 if (max > SIZE_MAX / page_size)
1265 max = SIZE_MAX / page_size;
1267 pages = parse_pages_arg(str, 1, max);
1269 pr_err("Invalid argument for --mmap_pages/-m\n");
1273 *mmap_pages = pages;
1277 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1278 int unset __maybe_unused)
1280 return __perf_evlist__parse_mmap_pages(opt->value, str);
1284 * perf_evlist__mmap_ex - Create mmaps to receive events.
1285 * @evlist: list of events
1286 * @pages: map length in pages
1287 * @overwrite: overwrite older events?
1288 * @auxtrace_pages - auxtrace map length in pages
1289 * @auxtrace_overwrite - overwrite older auxtrace data?
1291 * If @overwrite is %false the user needs to signal event consumption using
1292 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1295 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1296 * consumption using auxtrace_mmap__write_tail().
1298 * Return: %0 on success, negative error code otherwise.
1300 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1301 bool overwrite, unsigned int auxtrace_pages,
1302 bool auxtrace_overwrite)
1304 struct perf_evsel *evsel;
1305 const struct cpu_map *cpus = evlist->cpus;
1306 const struct thread_map *threads = evlist->threads;
1307 struct mmap_params mp = {
1308 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1312 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1316 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1319 evlist->overwrite = overwrite;
1320 evlist->mmap_len = perf_evlist__mmap_size(pages);
1321 pr_debug("mmap size %zuB\n", evlist->mmap_len);
1322 mp.mask = evlist->mmap_len - page_size - 1;
1324 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1325 auxtrace_pages, auxtrace_overwrite);
1327 evlist__for_each_entry(evlist, evsel) {
1328 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1329 evsel->sample_id == NULL &&
1330 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1334 if (cpu_map__empty(cpus))
1335 return perf_evlist__mmap_per_thread(evlist, &mp);
1337 return perf_evlist__mmap_per_cpu(evlist, &mp);
1340 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1343 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1346 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1348 struct cpu_map *cpus;
1349 struct thread_map *threads;
1351 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1356 if (target__uses_dummy_map(target))
1357 cpus = cpu_map__dummy_new();
1359 cpus = cpu_map__new(target->cpu_list);
1362 goto out_delete_threads;
1364 evlist->has_user_cpus = !!target->cpu_list;
1366 perf_evlist__set_maps(evlist, cpus, threads);
1371 thread_map__put(threads);
1375 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1376 struct thread_map *threads)
1379 * Allow for the possibility that one or another of the maps isn't being
1380 * changed i.e. don't put it. Note we are assuming the maps that are
1381 * being applied are brand new and evlist is taking ownership of the
1382 * original reference count of 1. If that is not the case it is up to
1383 * the caller to increase the reference count.
1385 if (cpus != evlist->cpus) {
1386 cpu_map__put(evlist->cpus);
1387 evlist->cpus = cpu_map__get(cpus);
1390 if (threads != evlist->threads) {
1391 thread_map__put(evlist->threads);
1392 evlist->threads = thread_map__get(threads);
1395 perf_evlist__propagate_maps(evlist);
1398 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1399 enum perf_event_sample_format bit)
1401 struct perf_evsel *evsel;
1403 evlist__for_each_entry(evlist, evsel)
1404 __perf_evsel__set_sample_bit(evsel, bit);
1407 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1408 enum perf_event_sample_format bit)
1410 struct perf_evsel *evsel;
1412 evlist__for_each_entry(evlist, evsel)
1413 __perf_evsel__reset_sample_bit(evsel, bit);
1416 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1418 struct perf_evsel *evsel;
1420 const int ncpus = cpu_map__nr(evlist->cpus),
1421 nthreads = thread_map__nr(evlist->threads);
1423 evlist__for_each_entry(evlist, evsel) {
1424 if (evsel->filter == NULL)
1428 * filters only work for tracepoint event, which doesn't have cpu limit.
1429 * So evlist and evsel should always be same.
1431 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
1441 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1443 struct perf_evsel *evsel;
1446 evlist__for_each_entry(evlist, evsel) {
1447 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1450 err = perf_evsel__set_filter(evsel, filter);
1458 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1464 for (i = 0; i < npids; ++i) {
1466 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1471 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1479 ret = perf_evlist__set_filter(evlist, filter);
1485 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1487 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1490 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1492 struct perf_evsel *pos;
1494 if (evlist->nr_entries == 1)
1497 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1500 evlist__for_each_entry(evlist, pos) {
1501 if (pos->id_pos != evlist->id_pos ||
1502 pos->is_pos != evlist->is_pos)
1509 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1511 struct perf_evsel *evsel;
1513 if (evlist->combined_sample_type)
1514 return evlist->combined_sample_type;
1516 evlist__for_each_entry(evlist, evsel)
1517 evlist->combined_sample_type |= evsel->attr.sample_type;
1519 return evlist->combined_sample_type;
1522 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1524 evlist->combined_sample_type = 0;
1525 return __perf_evlist__combined_sample_type(evlist);
1528 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1530 struct perf_evsel *evsel;
1531 u64 branch_type = 0;
1533 evlist__for_each_entry(evlist, evsel)
1534 branch_type |= evsel->attr.branch_sample_type;
1538 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1540 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1541 u64 read_format = first->attr.read_format;
1542 u64 sample_type = first->attr.sample_type;
1544 evlist__for_each_entry(evlist, pos) {
1545 if (read_format != pos->attr.read_format)
1549 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1550 if ((sample_type & PERF_SAMPLE_READ) &&
1551 !(read_format & PERF_FORMAT_ID)) {
1558 u64 perf_evlist__read_format(struct perf_evlist *evlist)
1560 struct perf_evsel *first = perf_evlist__first(evlist);
1561 return first->attr.read_format;
1564 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1566 struct perf_evsel *first = perf_evlist__first(evlist);
1567 struct perf_sample *data;
1571 if (!first->attr.sample_id_all)
1574 sample_type = first->attr.sample_type;
1576 if (sample_type & PERF_SAMPLE_TID)
1577 size += sizeof(data->tid) * 2;
1579 if (sample_type & PERF_SAMPLE_TIME)
1580 size += sizeof(data->time);
1582 if (sample_type & PERF_SAMPLE_ID)
1583 size += sizeof(data->id);
1585 if (sample_type & PERF_SAMPLE_STREAM_ID)
1586 size += sizeof(data->stream_id);
1588 if (sample_type & PERF_SAMPLE_CPU)
1589 size += sizeof(data->cpu) * 2;
1591 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1592 size += sizeof(data->id);
1597 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1599 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1601 evlist__for_each_entry_continue(evlist, pos) {
1602 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1609 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1611 struct perf_evsel *first = perf_evlist__first(evlist);
1612 return first->attr.sample_id_all;
1615 void perf_evlist__set_selected(struct perf_evlist *evlist,
1616 struct perf_evsel *evsel)
1618 evlist->selected = evsel;
1621 void perf_evlist__close(struct perf_evlist *evlist)
1623 struct perf_evsel *evsel;
1624 int ncpus = cpu_map__nr(evlist->cpus);
1625 int nthreads = thread_map__nr(evlist->threads);
1627 evlist__for_each_entry_reverse(evlist, evsel) {
1628 int n = evsel->cpus ? evsel->cpus->nr : ncpus;
1629 perf_evsel__close(evsel, n, nthreads);
1633 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1635 struct cpu_map *cpus;
1636 struct thread_map *threads;
1640 * Try reading /sys/devices/system/cpu/online to get
1643 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1644 * code needs an overhaul to properly forward the
1645 * error, and we may not want to do that fallback to a
1646 * default cpu identity map :-\
1648 cpus = cpu_map__new(NULL);
1652 threads = thread_map__new_dummy();
1656 perf_evlist__set_maps(evlist, cpus, threads);
1664 int perf_evlist__open(struct perf_evlist *evlist)
1666 struct perf_evsel *evsel;
1670 * Default: one fd per CPU, all threads, aka systemwide
1671 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1673 if (evlist->threads == NULL && evlist->cpus == NULL) {
1674 err = perf_evlist__create_syswide_maps(evlist);
1679 perf_evlist__update_id_pos(evlist);
1681 evlist__for_each_entry(evlist, evsel) {
1682 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1689 perf_evlist__close(evlist);
1694 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1695 const char *argv[], bool pipe_output,
1696 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1698 int child_ready_pipe[2], go_pipe[2];
1701 if (pipe(child_ready_pipe) < 0) {
1702 perror("failed to create 'ready' pipe");
1706 if (pipe(go_pipe) < 0) {
1707 perror("failed to create 'go' pipe");
1708 goto out_close_ready_pipe;
1711 evlist->workload.pid = fork();
1712 if (evlist->workload.pid < 0) {
1713 perror("failed to fork");
1714 goto out_close_pipes;
1717 if (!evlist->workload.pid) {
1723 signal(SIGTERM, SIG_DFL);
1725 close(child_ready_pipe[0]);
1727 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1730 * Tell the parent we're ready to go
1732 close(child_ready_pipe[1]);
1735 * Wait until the parent tells us to go.
1737 ret = read(go_pipe[0], &bf, 1);
1739 * The parent will ask for the execvp() to be performed by
1740 * writing exactly one byte, in workload.cork_fd, usually via
1741 * perf_evlist__start_workload().
1743 * For cancelling the workload without actually running it,
1744 * the parent will just close workload.cork_fd, without writing
1745 * anything, i.e. read will return zero and we just exit()
1750 perror("unable to read pipe");
1754 execvp(argv[0], (char **)argv);
1759 val.sival_int = errno;
1760 if (sigqueue(getppid(), SIGUSR1, val))
1768 struct sigaction act = {
1769 .sa_flags = SA_SIGINFO,
1770 .sa_sigaction = exec_error,
1772 sigaction(SIGUSR1, &act, NULL);
1775 if (target__none(target)) {
1776 if (evlist->threads == NULL) {
1777 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1778 __func__, __LINE__);
1779 goto out_close_pipes;
1781 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1784 close(child_ready_pipe[1]);
1787 * wait for child to settle
1789 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1790 perror("unable to read pipe");
1791 goto out_close_pipes;
1794 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1795 evlist->workload.cork_fd = go_pipe[1];
1796 close(child_ready_pipe[0]);
1802 out_close_ready_pipe:
1803 close(child_ready_pipe[0]);
1804 close(child_ready_pipe[1]);
1808 int perf_evlist__start_workload(struct perf_evlist *evlist)
1810 if (evlist->workload.cork_fd > 0) {
1814 * Remove the cork, let it rip!
1816 ret = write(evlist->workload.cork_fd, &bf, 1);
1818 perror("unable to write to pipe");
1820 close(evlist->workload.cork_fd);
1827 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1828 struct perf_sample *sample)
1830 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1834 return perf_evsel__parse_sample(evsel, event, sample);
1837 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1839 struct perf_evsel *evsel;
1842 evlist__for_each_entry(evlist, evsel) {
1843 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1844 perf_evsel__name(evsel));
1847 return printed + fprintf(fp, "\n");
1850 int perf_evlist__strerror_open(struct perf_evlist *evlist,
1851 int err, char *buf, size_t size)
1854 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1859 printed = scnprintf(buf, size,
1861 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1863 value = perf_event_paranoid();
1865 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1868 printed += scnprintf(buf + printed, size - printed,
1869 "For your workloads it needs to be <= 1\nHint:\t");
1871 printed += scnprintf(buf + printed, size - printed,
1872 "For system wide tracing it needs to be set to -1.\n");
1874 printed += scnprintf(buf + printed, size - printed,
1875 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1876 "Hint:\tThe current value is %d.", value);
1879 struct perf_evsel *first = perf_evlist__first(evlist);
1882 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1885 if (first->attr.sample_freq < (u64)max_freq)
1888 printed = scnprintf(buf, size,
1890 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1891 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1892 emsg, max_freq, first->attr.sample_freq);
1897 scnprintf(buf, size, "%s", emsg);
1904 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1906 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1907 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1911 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1912 printed += scnprintf(buf + printed, size - printed,
1914 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1915 "Hint:\tTried using %zd kB.\n",
1916 emsg, pages_max_per_user, pages_attempted);
1918 if (pages_attempted >= pages_max_per_user) {
1919 printed += scnprintf(buf + printed, size - printed,
1920 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1921 pages_max_per_user + pages_attempted);
1924 printed += scnprintf(buf + printed, size - printed,
1925 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1928 scnprintf(buf, size, "%s", emsg);
1935 void perf_evlist__to_front(struct perf_evlist *evlist,
1936 struct perf_evsel *move_evsel)
1938 struct perf_evsel *evsel, *n;
1941 if (move_evsel == perf_evlist__first(evlist))
1944 evlist__for_each_entry_safe(evlist, n, evsel) {
1945 if (evsel->leader == move_evsel->leader)
1946 list_move_tail(&evsel->node, &move);
1949 list_splice(&move, &evlist->entries);
1952 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1953 struct perf_evsel *tracking_evsel)
1955 struct perf_evsel *evsel;
1957 if (tracking_evsel->tracking)
1960 evlist__for_each_entry(evlist, evsel) {
1961 if (evsel != tracking_evsel)
1962 evsel->tracking = false;
1965 tracking_evsel->tracking = true;
1969 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1972 struct perf_evsel *evsel;
1974 evlist__for_each_entry(evlist, evsel) {
1977 if (strcmp(str, evsel->name) == 0)
1984 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1985 enum bkw_mmap_state state)
1987 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1994 if (!evlist->backward_mmap)
1997 switch (old_state) {
1998 case BKW_MMAP_NOTREADY: {
1999 if (state != BKW_MMAP_RUNNING)
2003 case BKW_MMAP_RUNNING: {
2004 if (state != BKW_MMAP_DATA_PENDING)
2009 case BKW_MMAP_DATA_PENDING: {
2010 if (state != BKW_MMAP_EMPTY)
2014 case BKW_MMAP_EMPTY: {
2015 if (state != BKW_MMAP_RUNNING)
2021 WARN_ONCE(1, "Shouldn't get there\n");
2024 evlist->bkw_mmap_state = state;
2028 perf_evlist__pause(evlist);
2031 perf_evlist__resume(evlist);