2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <api/fs/fs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
21 #include <subcmd/parse-options.h>
25 #include <linux/bitops.h>
26 #include <linux/hash.h>
27 #include <linux/log2.h>
28 #include <linux/err.h>
30 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
31 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
33 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
34 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
36 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
37 struct thread_map *threads)
41 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
42 INIT_HLIST_HEAD(&evlist->heads[i]);
43 INIT_LIST_HEAD(&evlist->entries);
44 perf_evlist__set_maps(evlist, cpus, threads);
45 fdarray__init(&evlist->pollfd, 64);
46 evlist->workload.pid = -1;
49 struct perf_evlist *perf_evlist__new(void)
51 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
54 perf_evlist__init(evlist, NULL, NULL);
59 struct perf_evlist *perf_evlist__new_default(void)
61 struct perf_evlist *evlist = perf_evlist__new();
63 if (evlist && perf_evlist__add_default(evlist)) {
64 perf_evlist__delete(evlist);
71 struct perf_evlist *perf_evlist__new_dummy(void)
73 struct perf_evlist *evlist = perf_evlist__new();
75 if (evlist && perf_evlist__add_dummy(evlist)) {
76 perf_evlist__delete(evlist);
84 * perf_evlist__set_id_pos - set the positions of event ids.
85 * @evlist: selected event list
87 * Events with compatible sample types all have the same id_pos
88 * and is_pos. For convenience, put a copy on evlist.
90 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
92 struct perf_evsel *first = perf_evlist__first(evlist);
94 evlist->id_pos = first->id_pos;
95 evlist->is_pos = first->is_pos;
98 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
100 struct perf_evsel *evsel;
102 evlist__for_each(evlist, evsel)
103 perf_evsel__calc_id_pos(evsel);
105 perf_evlist__set_id_pos(evlist);
108 static void perf_evlist__purge(struct perf_evlist *evlist)
110 struct perf_evsel *pos, *n;
112 evlist__for_each_safe(evlist, n, pos) {
113 list_del_init(&pos->node);
115 perf_evsel__delete(pos);
118 evlist->nr_entries = 0;
121 void perf_evlist__exit(struct perf_evlist *evlist)
123 zfree(&evlist->mmap);
124 fdarray__exit(&evlist->pollfd);
127 void perf_evlist__delete(struct perf_evlist *evlist)
129 perf_evlist__munmap(evlist);
130 perf_evlist__close(evlist);
131 cpu_map__put(evlist->cpus);
132 thread_map__put(evlist->threads);
134 evlist->threads = NULL;
135 perf_evlist__purge(evlist);
136 perf_evlist__exit(evlist);
140 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
141 struct perf_evsel *evsel)
144 * We already have cpus for evsel (via PMU sysfs) so
145 * keep it, if there's no target cpu list defined.
147 if (!evsel->own_cpus || evlist->has_user_cpus) {
148 cpu_map__put(evsel->cpus);
149 evsel->cpus = cpu_map__get(evlist->cpus);
150 } else if (evsel->cpus != evsel->own_cpus) {
151 cpu_map__put(evsel->cpus);
152 evsel->cpus = cpu_map__get(evsel->own_cpus);
155 thread_map__put(evsel->threads);
156 evsel->threads = thread_map__get(evlist->threads);
159 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
161 struct perf_evsel *evsel;
163 evlist__for_each(evlist, evsel)
164 __perf_evlist__propagate_maps(evlist, evsel);
167 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
169 entry->evlist = evlist;
170 list_add_tail(&entry->node, &evlist->entries);
171 entry->idx = evlist->nr_entries;
172 entry->tracking = !entry->idx;
174 if (!evlist->nr_entries++)
175 perf_evlist__set_id_pos(evlist);
177 __perf_evlist__propagate_maps(evlist, entry);
180 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
182 evsel->evlist = NULL;
183 list_del_init(&evsel->node);
184 evlist->nr_entries -= 1;
187 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
188 struct list_head *list)
190 struct perf_evsel *evsel, *temp;
192 __evlist__for_each_safe(list, temp, evsel) {
193 list_del_init(&evsel->node);
194 perf_evlist__add(evlist, evsel);
198 void __perf_evlist__set_leader(struct list_head *list)
200 struct perf_evsel *evsel, *leader;
202 leader = list_entry(list->next, struct perf_evsel, node);
203 evsel = list_entry(list->prev, struct perf_evsel, node);
205 leader->nr_members = evsel->idx - leader->idx + 1;
207 __evlist__for_each(list, evsel) {
208 evsel->leader = leader;
212 void perf_evlist__set_leader(struct perf_evlist *evlist)
214 if (evlist->nr_entries) {
215 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
216 __perf_evlist__set_leader(&evlist->entries);
220 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
222 attr->precise_ip = 3;
224 while (attr->precise_ip != 0) {
225 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
234 int perf_evlist__add_default(struct perf_evlist *evlist)
236 struct perf_event_attr attr = {
237 .type = PERF_TYPE_HARDWARE,
238 .config = PERF_COUNT_HW_CPU_CYCLES,
240 struct perf_evsel *evsel;
242 event_attr_init(&attr);
244 perf_event_attr__set_max_precise_ip(&attr);
246 evsel = perf_evsel__new(&attr);
250 /* use asprintf() because free(evsel) assumes name is allocated */
251 if (asprintf(&evsel->name, "cycles%.*s",
252 attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
255 perf_evlist__add(evlist, evsel);
258 perf_evsel__delete(evsel);
263 int perf_evlist__add_dummy(struct perf_evlist *evlist)
265 struct perf_event_attr attr = {
266 .type = PERF_TYPE_SOFTWARE,
267 .config = PERF_COUNT_SW_DUMMY,
268 .size = sizeof(attr), /* to capture ABI version */
270 struct perf_evsel *evsel = perf_evsel__new(&attr);
275 perf_evlist__add(evlist, evsel);
279 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
280 struct perf_event_attr *attrs, size_t nr_attrs)
282 struct perf_evsel *evsel, *n;
286 for (i = 0; i < nr_attrs; i++) {
287 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
289 goto out_delete_partial_list;
290 list_add_tail(&evsel->node, &head);
293 perf_evlist__splice_list_tail(evlist, &head);
297 out_delete_partial_list:
298 __evlist__for_each_safe(&head, n, evsel)
299 perf_evsel__delete(evsel);
303 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
304 struct perf_event_attr *attrs, size_t nr_attrs)
308 for (i = 0; i < nr_attrs; i++)
309 event_attr_init(attrs + i);
311 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
315 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
317 struct perf_evsel *evsel;
319 evlist__for_each(evlist, evsel) {
320 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
321 (int)evsel->attr.config == id)
329 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
332 struct perf_evsel *evsel;
334 evlist__for_each(evlist, evsel) {
335 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
336 (strcmp(evsel->name, name) == 0))
343 int perf_evlist__add_newtp(struct perf_evlist *evlist,
344 const char *sys, const char *name, void *handler)
346 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
351 evsel->handler = handler;
352 perf_evlist__add(evlist, evsel);
356 static int perf_evlist__nr_threads(struct perf_evlist *evlist,
357 struct perf_evsel *evsel)
359 if (evsel->system_wide)
362 return thread_map__nr(evlist->threads);
365 void perf_evlist__disable(struct perf_evlist *evlist)
367 struct perf_evsel *pos;
369 evlist__for_each(evlist, pos) {
370 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
372 perf_evsel__disable(pos);
375 evlist->enabled = false;
378 void perf_evlist__enable(struct perf_evlist *evlist)
380 struct perf_evsel *pos;
382 evlist__for_each(evlist, pos) {
383 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
385 perf_evsel__enable(pos);
388 evlist->enabled = true;
391 void perf_evlist__toggle_enable(struct perf_evlist *evlist)
393 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
396 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
397 struct perf_evsel *evsel, int cpu)
400 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
405 for (thread = 0; thread < nr_threads; thread++) {
406 err = ioctl(FD(evsel, cpu, thread),
407 PERF_EVENT_IOC_ENABLE, 0);
414 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
415 struct perf_evsel *evsel,
419 int nr_cpus = cpu_map__nr(evlist->cpus);
424 for (cpu = 0; cpu < nr_cpus; cpu++) {
425 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
432 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
433 struct perf_evsel *evsel, int idx)
435 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
438 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
440 return perf_evlist__enable_event_thread(evlist, evsel, idx);
443 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
445 int nr_cpus = cpu_map__nr(evlist->cpus);
446 int nr_threads = thread_map__nr(evlist->threads);
448 struct perf_evsel *evsel;
450 evlist__for_each(evlist, evsel) {
451 if (evsel->system_wide)
454 nfds += nr_cpus * nr_threads;
457 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
458 fdarray__grow(&evlist->pollfd, nfds) < 0)
464 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
466 int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
468 * Save the idx so that when we filter out fds POLLHUP'ed we can
469 * close the associated evlist->mmap[] entry.
472 evlist->pollfd.priv[pos].idx = idx;
474 fcntl(fd, F_SETFL, O_NONBLOCK);
480 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
482 return __perf_evlist__add_pollfd(evlist, fd, -1);
485 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
487 struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
489 perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
492 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
494 return fdarray__filter(&evlist->pollfd, revents_and_mask,
495 perf_evlist__munmap_filtered);
498 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
500 return fdarray__poll(&evlist->pollfd, timeout);
503 static void perf_evlist__id_hash(struct perf_evlist *evlist,
504 struct perf_evsel *evsel,
505 int cpu, int thread, u64 id)
508 struct perf_sample_id *sid = SID(evsel, cpu, thread);
512 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
513 hlist_add_head(&sid->node, &evlist->heads[hash]);
516 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
517 int cpu, int thread, u64 id)
519 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
520 evsel->id[evsel->ids++] = id;
523 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
524 struct perf_evsel *evsel,
525 int cpu, int thread, int fd)
527 u64 read_data[4] = { 0, };
528 int id_idx = 1; /* The first entry is the counter value */
532 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
539 /* Legacy way to get event id.. All hail to old kernels! */
542 * This way does not work with group format read, so bail
545 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
548 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
549 read(fd, &read_data, sizeof(read_data)) == -1)
552 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
554 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
557 id = read_data[id_idx];
560 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
564 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
565 struct perf_evsel *evsel, int idx, int cpu,
568 struct perf_sample_id *sid = SID(evsel, cpu, thread);
570 if (evlist->cpus && cpu >= 0)
571 sid->cpu = evlist->cpus->map[cpu];
574 if (!evsel->system_wide && evlist->threads && thread >= 0)
575 sid->tid = thread_map__pid(evlist->threads, thread);
580 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
582 struct hlist_head *head;
583 struct perf_sample_id *sid;
586 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
587 head = &evlist->heads[hash];
589 hlist_for_each_entry(sid, head, node)
596 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
598 struct perf_sample_id *sid;
600 if (evlist->nr_entries == 1 || !id)
601 return perf_evlist__first(evlist);
603 sid = perf_evlist__id2sid(evlist, id);
607 if (!perf_evlist__sample_id_all(evlist))
608 return perf_evlist__first(evlist);
613 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
616 struct perf_sample_id *sid;
621 sid = perf_evlist__id2sid(evlist, id);
628 static int perf_evlist__event2id(struct perf_evlist *evlist,
629 union perf_event *event, u64 *id)
631 const u64 *array = event->sample.array;
634 n = (event->header.size - sizeof(event->header)) >> 3;
636 if (event->header.type == PERF_RECORD_SAMPLE) {
637 if (evlist->id_pos >= n)
639 *id = array[evlist->id_pos];
641 if (evlist->is_pos > n)
649 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
650 union perf_event *event)
652 struct perf_evsel *first = perf_evlist__first(evlist);
653 struct hlist_head *head;
654 struct perf_sample_id *sid;
658 if (evlist->nr_entries == 1)
661 if (!first->attr.sample_id_all &&
662 event->header.type != PERF_RECORD_SAMPLE)
665 if (perf_evlist__event2id(evlist, event, &id))
668 /* Synthesized events have an id of zero */
672 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
673 head = &evlist->heads[hash];
675 hlist_for_each_entry(sid, head, node) {
682 static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
686 for (i = 0; i < evlist->nr_mmaps; i++) {
687 int fd = evlist->mmap[i].fd;
692 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
699 int perf_evlist__pause(struct perf_evlist *evlist)
701 return perf_evlist__set_paused(evlist, true);
704 int perf_evlist__resume(struct perf_evlist *evlist)
706 return perf_evlist__set_paused(evlist, false);
709 /* When check_messup is true, 'end' must points to a good entry */
710 static union perf_event *
711 perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
714 unsigned char *data = md->base + page_size;
715 union perf_event *event = NULL;
716 int diff = end - start;
720 * If we're further behind than half the buffer, there's a chance
721 * the writer will bite our tail and mess up the samples under us.
723 * If we somehow ended up ahead of the 'end', we got messed up.
725 * In either case, truncate and restart at 'end'.
727 if (diff > md->mask / 2 || diff < 0) {
728 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
731 * 'end' points to a known good entry, start there.
738 if (diff >= (int)sizeof(event->header)) {
741 event = (union perf_event *)&data[start & md->mask];
742 size = event->header.size;
744 if (size < sizeof(event->header) || diff < (int)size) {
750 * Event straddles the mmap boundary -- header should always
751 * be inside due to u64 alignment of output.
753 if ((start & md->mask) + size != ((start + size) & md->mask)) {
754 unsigned int offset = start;
755 unsigned int len = min(sizeof(*event), size), cpy;
756 void *dst = md->event_copy;
759 cpy = min(md->mask + 1 - (offset & md->mask), len);
760 memcpy(dst, &data[offset & md->mask], cpy);
766 event = (union perf_event *) md->event_copy;
779 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
781 struct perf_mmap *md = &evlist->mmap[idx];
786 * Check if event was unmapped due to a POLLHUP/POLLERR.
788 if (!atomic_read(&md->refcnt))
791 head = perf_mmap__read_head(md);
793 return perf_mmap__read(md, evlist->overwrite, old, head, &md->prev);
797 perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
799 struct perf_mmap *md = &evlist->mmap[idx];
801 u64 start = md->prev;
804 * Check if event was unmapped due to a POLLHUP/POLLERR.
806 if (!atomic_read(&md->refcnt))
809 head = perf_mmap__read_head(md);
814 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
815 * it each time when kernel writes to it, so in fact 'head' is
816 * negative. 'end' pointer is made manually by adding the size of
817 * the ring buffer to 'head' pointer, means the validate data can
818 * read is the whole ring buffer. If 'end' is positive, the ring
819 * buffer has not fully filled, so we must adjust 'end' to 0.
821 * However, since both 'head' and 'end' is unsigned, we can't
822 * simply compare 'end' against 0. Here we compare '-head' and
823 * the size of the ring buffer, where -head is the number of bytes
824 * kernel write to the ring buffer.
826 if (-head < (u64)(md->mask + 1))
829 end = head + md->mask + 1;
831 return perf_mmap__read(md, false, start, end, &md->prev);
834 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
836 struct perf_mmap *md = &evlist->mmap[idx];
839 if (!atomic_read(&md->refcnt))
842 head = perf_mmap__read_head(md);
846 static bool perf_mmap__empty(struct perf_mmap *md)
848 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
851 static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
853 atomic_inc(&evlist->mmap[idx].refcnt);
856 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
858 BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0);
860 if (atomic_dec_and_test(&evlist->mmap[idx].refcnt))
861 __perf_evlist__munmap(evlist, idx);
864 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
866 struct perf_mmap *md = &evlist->mmap[idx];
868 if (!evlist->overwrite) {
871 perf_mmap__write_tail(md, old);
874 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
875 perf_evlist__mmap_put(evlist, idx);
878 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
879 struct auxtrace_mmap_params *mp __maybe_unused,
880 void *userpg __maybe_unused,
881 int fd __maybe_unused)
886 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
890 void __weak auxtrace_mmap_params__init(
891 struct auxtrace_mmap_params *mp __maybe_unused,
892 off_t auxtrace_offset __maybe_unused,
893 unsigned int auxtrace_pages __maybe_unused,
894 bool auxtrace_overwrite __maybe_unused)
898 void __weak auxtrace_mmap_params__set_idx(
899 struct auxtrace_mmap_params *mp __maybe_unused,
900 struct perf_evlist *evlist __maybe_unused,
901 int idx __maybe_unused,
902 bool per_cpu __maybe_unused)
906 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
908 if (evlist->mmap[idx].base != NULL) {
909 munmap(evlist->mmap[idx].base, evlist->mmap_len);
910 evlist->mmap[idx].base = NULL;
911 evlist->mmap[idx].fd = -1;
912 atomic_set(&evlist->mmap[idx].refcnt, 0);
914 auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
917 void perf_evlist__munmap(struct perf_evlist *evlist)
921 if (evlist->mmap == NULL)
924 for (i = 0; i < evlist->nr_mmaps; i++)
925 __perf_evlist__munmap(evlist, i);
927 zfree(&evlist->mmap);
930 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
934 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
935 if (cpu_map__empty(evlist->cpus))
936 evlist->nr_mmaps = thread_map__nr(evlist->threads);
937 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
938 for (i = 0; i < evlist->nr_mmaps; i++)
939 evlist->mmap[i].fd = -1;
940 return evlist->mmap != NULL ? 0 : -ENOMEM;
946 struct auxtrace_mmap_params auxtrace_mp;
949 static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
950 struct mmap_params *mp, int fd)
953 * The last one will be done at perf_evlist__mmap_consume(), so that we
954 * make sure we don't prevent tools from consuming every last event in
957 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
958 * anymore, but the last events for it are still in the ring buffer,
959 * waiting to be consumed.
961 * Tools can chose to ignore this at their own discretion, but the
962 * evlist layer can't just drop it when filtering events in
963 * perf_evlist__filter_pollfd().
965 atomic_set(&evlist->mmap[idx].refcnt, 2);
966 evlist->mmap[idx].prev = 0;
967 evlist->mmap[idx].mask = mp->mask;
968 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
970 if (evlist->mmap[idx].base == MAP_FAILED) {
971 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
973 evlist->mmap[idx].base = NULL;
976 evlist->mmap[idx].fd = fd;
978 if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
979 &mp->auxtrace_mp, evlist->mmap[idx].base, fd))
985 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
986 struct mmap_params *mp, int cpu,
987 int thread, int *output)
989 struct perf_evsel *evsel;
991 evlist__for_each(evlist, evsel) {
994 if (evsel->system_wide && thread)
997 fd = FD(evsel, cpu, thread);
1001 if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
1004 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1007 perf_evlist__mmap_get(evlist, idx);
1011 * The system_wide flag causes a selected event to be opened
1012 * always without a pid. Consequently it will never get a
1013 * POLLHUP, but it is used for tracking in combination with
1014 * other events, so it should not need to be polled anyway.
1015 * Therefore don't add it for polling.
1017 if (!evsel->system_wide &&
1018 __perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
1019 perf_evlist__mmap_put(evlist, idx);
1023 if (evsel->attr.read_format & PERF_FORMAT_ID) {
1024 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1027 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1035 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1036 struct mmap_params *mp)
1039 int nr_cpus = cpu_map__nr(evlist->cpus);
1040 int nr_threads = thread_map__nr(evlist->threads);
1042 pr_debug2("perf event ring buffer mmapped per cpu\n");
1043 for (cpu = 0; cpu < nr_cpus; cpu++) {
1046 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1049 for (thread = 0; thread < nr_threads; thread++) {
1050 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
1059 for (cpu = 0; cpu < nr_cpus; cpu++)
1060 __perf_evlist__munmap(evlist, cpu);
1064 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1065 struct mmap_params *mp)
1068 int nr_threads = thread_map__nr(evlist->threads);
1070 pr_debug2("perf event ring buffer mmapped per thread\n");
1071 for (thread = 0; thread < nr_threads; thread++) {
1074 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1077 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
1085 for (thread = 0; thread < nr_threads; thread++)
1086 __perf_evlist__munmap(evlist, thread);
1090 unsigned long perf_event_mlock_kb_in_pages(void)
1092 unsigned long pages;
1095 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1097 * Pick a once upon a time good value, i.e. things look
1098 * strange since we can't read a sysctl value, but lets not
1103 max -= (page_size / 1024);
1106 pages = (max * 1024) / page_size;
1107 if (!is_power_of_2(pages))
1108 pages = rounddown_pow_of_two(pages);
1113 static size_t perf_evlist__mmap_size(unsigned long pages)
1115 if (pages == UINT_MAX)
1116 pages = perf_event_mlock_kb_in_pages();
1117 else if (!is_power_of_2(pages))
1120 return (pages + 1) * page_size;
1123 static long parse_pages_arg(const char *str, unsigned long min,
1126 unsigned long pages, val;
1127 static struct parse_tag tags[] = {
1128 { .tag = 'B', .mult = 1 },
1129 { .tag = 'K', .mult = 1 << 10 },
1130 { .tag = 'M', .mult = 1 << 20 },
1131 { .tag = 'G', .mult = 1 << 30 },
1138 val = parse_tag_value(str, tags);
1139 if (val != (unsigned long) -1) {
1140 /* we got file size value */
1141 pages = PERF_ALIGN(val, page_size) / page_size;
1143 /* we got pages count value */
1145 pages = strtoul(str, &eptr, 10);
1150 if (pages == 0 && min == 0) {
1151 /* leave number of pages at 0 */
1152 } else if (!is_power_of_2(pages)) {
1153 /* round pages up to next power of 2 */
1154 pages = roundup_pow_of_two(pages);
1157 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
1158 pages * page_size, pages);
1167 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
1169 unsigned long max = UINT_MAX;
1172 if (max > SIZE_MAX / page_size)
1173 max = SIZE_MAX / page_size;
1175 pages = parse_pages_arg(str, 1, max);
1177 pr_err("Invalid argument for --mmap_pages/-m\n");
1181 *mmap_pages = pages;
1185 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1186 int unset __maybe_unused)
1188 return __perf_evlist__parse_mmap_pages(opt->value, str);
1192 * perf_evlist__mmap_ex - Create mmaps to receive events.
1193 * @evlist: list of events
1194 * @pages: map length in pages
1195 * @overwrite: overwrite older events?
1196 * @auxtrace_pages - auxtrace map length in pages
1197 * @auxtrace_overwrite - overwrite older auxtrace data?
1199 * If @overwrite is %false the user needs to signal event consumption using
1200 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1203 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1204 * consumption using auxtrace_mmap__write_tail().
1206 * Return: %0 on success, negative error code otherwise.
1208 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1209 bool overwrite, unsigned int auxtrace_pages,
1210 bool auxtrace_overwrite)
1212 struct perf_evsel *evsel;
1213 const struct cpu_map *cpus = evlist->cpus;
1214 const struct thread_map *threads = evlist->threads;
1215 struct mmap_params mp = {
1216 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1219 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
1222 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1225 evlist->overwrite = overwrite;
1226 evlist->mmap_len = perf_evlist__mmap_size(pages);
1227 pr_debug("mmap size %zuB\n", evlist->mmap_len);
1228 mp.mask = evlist->mmap_len - page_size - 1;
1230 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1231 auxtrace_pages, auxtrace_overwrite);
1233 evlist__for_each(evlist, evsel) {
1234 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1235 evsel->sample_id == NULL &&
1236 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1240 if (cpu_map__empty(cpus))
1241 return perf_evlist__mmap_per_thread(evlist, &mp);
1243 return perf_evlist__mmap_per_cpu(evlist, &mp);
1246 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1249 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1252 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1254 struct cpu_map *cpus;
1255 struct thread_map *threads;
1257 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1262 if (target__uses_dummy_map(target))
1263 cpus = cpu_map__dummy_new();
1265 cpus = cpu_map__new(target->cpu_list);
1268 goto out_delete_threads;
1270 evlist->has_user_cpus = !!target->cpu_list;
1272 perf_evlist__set_maps(evlist, cpus, threads);
1277 thread_map__put(threads);
1281 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1282 struct thread_map *threads)
1285 * Allow for the possibility that one or another of the maps isn't being
1286 * changed i.e. don't put it. Note we are assuming the maps that are
1287 * being applied are brand new and evlist is taking ownership of the
1288 * original reference count of 1. If that is not the case it is up to
1289 * the caller to increase the reference count.
1291 if (cpus != evlist->cpus) {
1292 cpu_map__put(evlist->cpus);
1293 evlist->cpus = cpu_map__get(cpus);
1296 if (threads != evlist->threads) {
1297 thread_map__put(evlist->threads);
1298 evlist->threads = thread_map__get(threads);
1301 perf_evlist__propagate_maps(evlist);
1304 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1305 enum perf_event_sample_format bit)
1307 struct perf_evsel *evsel;
1309 evlist__for_each(evlist, evsel)
1310 __perf_evsel__set_sample_bit(evsel, bit);
1313 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1314 enum perf_event_sample_format bit)
1316 struct perf_evsel *evsel;
1318 evlist__for_each(evlist, evsel)
1319 __perf_evsel__reset_sample_bit(evsel, bit);
1322 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1324 struct perf_evsel *evsel;
1326 const int ncpus = cpu_map__nr(evlist->cpus),
1327 nthreads = thread_map__nr(evlist->threads);
1329 evlist__for_each(evlist, evsel) {
1330 if (evsel->filter == NULL)
1334 * filters only work for tracepoint event, which doesn't have cpu limit.
1335 * So evlist and evsel should always be same.
1337 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
1347 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1349 struct perf_evsel *evsel;
1352 evlist__for_each(evlist, evsel) {
1353 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1356 err = perf_evsel__set_filter(evsel, filter);
1364 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1370 for (i = 0; i < npids; ++i) {
1372 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1377 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1385 ret = perf_evlist__set_filter(evlist, filter);
1391 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1393 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1396 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1398 struct perf_evsel *pos;
1400 if (evlist->nr_entries == 1)
1403 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1406 evlist__for_each(evlist, pos) {
1407 if (pos->id_pos != evlist->id_pos ||
1408 pos->is_pos != evlist->is_pos)
1415 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1417 struct perf_evsel *evsel;
1419 if (evlist->combined_sample_type)
1420 return evlist->combined_sample_type;
1422 evlist__for_each(evlist, evsel)
1423 evlist->combined_sample_type |= evsel->attr.sample_type;
1425 return evlist->combined_sample_type;
1428 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1430 evlist->combined_sample_type = 0;
1431 return __perf_evlist__combined_sample_type(evlist);
1434 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1436 struct perf_evsel *evsel;
1437 u64 branch_type = 0;
1439 evlist__for_each(evlist, evsel)
1440 branch_type |= evsel->attr.branch_sample_type;
1444 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1446 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1447 u64 read_format = first->attr.read_format;
1448 u64 sample_type = first->attr.sample_type;
1450 evlist__for_each(evlist, pos) {
1451 if (read_format != pos->attr.read_format)
1455 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1456 if ((sample_type & PERF_SAMPLE_READ) &&
1457 !(read_format & PERF_FORMAT_ID)) {
1464 u64 perf_evlist__read_format(struct perf_evlist *evlist)
1466 struct perf_evsel *first = perf_evlist__first(evlist);
1467 return first->attr.read_format;
1470 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1472 struct perf_evsel *first = perf_evlist__first(evlist);
1473 struct perf_sample *data;
1477 if (!first->attr.sample_id_all)
1480 sample_type = first->attr.sample_type;
1482 if (sample_type & PERF_SAMPLE_TID)
1483 size += sizeof(data->tid) * 2;
1485 if (sample_type & PERF_SAMPLE_TIME)
1486 size += sizeof(data->time);
1488 if (sample_type & PERF_SAMPLE_ID)
1489 size += sizeof(data->id);
1491 if (sample_type & PERF_SAMPLE_STREAM_ID)
1492 size += sizeof(data->stream_id);
1494 if (sample_type & PERF_SAMPLE_CPU)
1495 size += sizeof(data->cpu) * 2;
1497 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1498 size += sizeof(data->id);
1503 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1505 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1507 evlist__for_each_continue(evlist, pos) {
1508 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1515 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1517 struct perf_evsel *first = perf_evlist__first(evlist);
1518 return first->attr.sample_id_all;
1521 void perf_evlist__set_selected(struct perf_evlist *evlist,
1522 struct perf_evsel *evsel)
1524 evlist->selected = evsel;
1527 void perf_evlist__close(struct perf_evlist *evlist)
1529 struct perf_evsel *evsel;
1530 int ncpus = cpu_map__nr(evlist->cpus);
1531 int nthreads = thread_map__nr(evlist->threads);
1534 evlist__for_each_reverse(evlist, evsel) {
1535 n = evsel->cpus ? evsel->cpus->nr : ncpus;
1536 perf_evsel__close(evsel, n, nthreads);
1540 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1542 struct cpu_map *cpus;
1543 struct thread_map *threads;
1547 * Try reading /sys/devices/system/cpu/online to get
1550 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1551 * code needs an overhaul to properly forward the
1552 * error, and we may not want to do that fallback to a
1553 * default cpu identity map :-\
1555 cpus = cpu_map__new(NULL);
1559 threads = thread_map__new_dummy();
1563 perf_evlist__set_maps(evlist, cpus, threads);
1571 int perf_evlist__open(struct perf_evlist *evlist)
1573 struct perf_evsel *evsel;
1577 * Default: one fd per CPU, all threads, aka systemwide
1578 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1580 if (evlist->threads == NULL && evlist->cpus == NULL) {
1581 err = perf_evlist__create_syswide_maps(evlist);
1586 perf_evlist__update_id_pos(evlist);
1588 evlist__for_each(evlist, evsel) {
1589 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1596 perf_evlist__close(evlist);
1601 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1602 const char *argv[], bool pipe_output,
1603 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1605 int child_ready_pipe[2], go_pipe[2];
1608 if (pipe(child_ready_pipe) < 0) {
1609 perror("failed to create 'ready' pipe");
1613 if (pipe(go_pipe) < 0) {
1614 perror("failed to create 'go' pipe");
1615 goto out_close_ready_pipe;
1618 evlist->workload.pid = fork();
1619 if (evlist->workload.pid < 0) {
1620 perror("failed to fork");
1621 goto out_close_pipes;
1624 if (!evlist->workload.pid) {
1630 signal(SIGTERM, SIG_DFL);
1632 close(child_ready_pipe[0]);
1634 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1637 * Tell the parent we're ready to go
1639 close(child_ready_pipe[1]);
1642 * Wait until the parent tells us to go.
1644 ret = read(go_pipe[0], &bf, 1);
1646 * The parent will ask for the execvp() to be performed by
1647 * writing exactly one byte, in workload.cork_fd, usually via
1648 * perf_evlist__start_workload().
1650 * For cancelling the workload without actually running it,
1651 * the parent will just close workload.cork_fd, without writing
1652 * anything, i.e. read will return zero and we just exit()
1657 perror("unable to read pipe");
1661 execvp(argv[0], (char **)argv);
1666 val.sival_int = errno;
1667 if (sigqueue(getppid(), SIGUSR1, val))
1675 struct sigaction act = {
1676 .sa_flags = SA_SIGINFO,
1677 .sa_sigaction = exec_error,
1679 sigaction(SIGUSR1, &act, NULL);
1682 if (target__none(target)) {
1683 if (evlist->threads == NULL) {
1684 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1685 __func__, __LINE__);
1686 goto out_close_pipes;
1688 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1691 close(child_ready_pipe[1]);
1694 * wait for child to settle
1696 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1697 perror("unable to read pipe");
1698 goto out_close_pipes;
1701 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1702 evlist->workload.cork_fd = go_pipe[1];
1703 close(child_ready_pipe[0]);
1709 out_close_ready_pipe:
1710 close(child_ready_pipe[0]);
1711 close(child_ready_pipe[1]);
1715 int perf_evlist__start_workload(struct perf_evlist *evlist)
1717 if (evlist->workload.cork_fd > 0) {
1721 * Remove the cork, let it rip!
1723 ret = write(evlist->workload.cork_fd, &bf, 1);
1725 perror("enable to write to pipe");
1727 close(evlist->workload.cork_fd);
1734 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1735 struct perf_sample *sample)
1737 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1741 return perf_evsel__parse_sample(evsel, event, sample);
1744 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1746 struct perf_evsel *evsel;
1749 evlist__for_each(evlist, evsel) {
1750 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1751 perf_evsel__name(evsel));
1754 return printed + fprintf(fp, "\n");
1757 int perf_evlist__strerror_open(struct perf_evlist *evlist,
1758 int err, char *buf, size_t size)
1761 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1766 printed = scnprintf(buf, size,
1768 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1770 value = perf_event_paranoid();
1772 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1775 printed += scnprintf(buf + printed, size - printed,
1776 "For your workloads it needs to be <= 1\nHint:\t");
1778 printed += scnprintf(buf + printed, size - printed,
1779 "For system wide tracing it needs to be set to -1.\n");
1781 printed += scnprintf(buf + printed, size - printed,
1782 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1783 "Hint:\tThe current value is %d.", value);
1786 struct perf_evsel *first = perf_evlist__first(evlist);
1789 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1792 if (first->attr.sample_freq < (u64)max_freq)
1795 printed = scnprintf(buf, size,
1797 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1798 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1799 emsg, max_freq, first->attr.sample_freq);
1804 scnprintf(buf, size, "%s", emsg);
1811 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1813 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1814 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1818 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1819 printed += scnprintf(buf + printed, size - printed,
1821 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1822 "Hint:\tTried using %zd kB.\n",
1823 emsg, pages_max_per_user, pages_attempted);
1825 if (pages_attempted >= pages_max_per_user) {
1826 printed += scnprintf(buf + printed, size - printed,
1827 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1828 pages_max_per_user + pages_attempted);
1831 printed += scnprintf(buf + printed, size - printed,
1832 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1835 scnprintf(buf, size, "%s", emsg);
1842 void perf_evlist__to_front(struct perf_evlist *evlist,
1843 struct perf_evsel *move_evsel)
1845 struct perf_evsel *evsel, *n;
1848 if (move_evsel == perf_evlist__first(evlist))
1851 evlist__for_each_safe(evlist, n, evsel) {
1852 if (evsel->leader == move_evsel->leader)
1853 list_move_tail(&evsel->node, &move);
1856 list_splice(&move, &evlist->entries);
1859 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1860 struct perf_evsel *tracking_evsel)
1862 struct perf_evsel *evsel;
1864 if (tracking_evsel->tracking)
1867 evlist__for_each(evlist, evsel) {
1868 if (evsel != tracking_evsel)
1869 evsel->tracking = false;
1872 tracking_evsel->tracking = true;
1876 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1879 struct perf_evsel *evsel;
1881 evlist__for_each(evlist, evsel) {
1884 if (strcmp(str, evsel->name) == 0)