2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <lk/debugfs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
24 #include <linux/bitops.h>
25 #include <linux/hash.h>
27 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
28 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
30 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
31 struct thread_map *threads)
35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
36 INIT_HLIST_HEAD(&evlist->heads[i]);
37 INIT_LIST_HEAD(&evlist->entries);
38 perf_evlist__set_maps(evlist, cpus, threads);
39 evlist->workload.pid = -1;
42 struct perf_evlist *perf_evlist__new(void)
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
47 perf_evlist__init(evlist, NULL, NULL);
53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list
56 * Events with compatible sample types all have the same id_pos
57 * and is_pos. For convenience, put a copy on evlist.
59 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
61 struct perf_evsel *first = perf_evlist__first(evlist);
63 evlist->id_pos = first->id_pos;
64 evlist->is_pos = first->is_pos;
67 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
69 struct perf_evsel *evsel;
71 list_for_each_entry(evsel, &evlist->entries, node)
72 perf_evsel__calc_id_pos(evsel);
74 perf_evlist__set_id_pos(evlist);
77 static void perf_evlist__purge(struct perf_evlist *evlist)
79 struct perf_evsel *pos, *n;
81 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
82 list_del_init(&pos->node);
83 perf_evsel__delete(pos);
86 evlist->nr_entries = 0;
89 void perf_evlist__exit(struct perf_evlist *evlist)
94 evlist->pollfd = NULL;
97 void perf_evlist__delete(struct perf_evlist *evlist)
99 perf_evlist__purge(evlist);
100 perf_evlist__exit(evlist);
104 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
106 list_add_tail(&entry->node, &evlist->entries);
107 if (!evlist->nr_entries++)
108 perf_evlist__set_id_pos(evlist);
111 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
112 struct list_head *list,
115 bool set_id_pos = !evlist->nr_entries;
117 list_splice_tail(list, &evlist->entries);
118 evlist->nr_entries += nr_entries;
120 perf_evlist__set_id_pos(evlist);
123 void __perf_evlist__set_leader(struct list_head *list)
125 struct perf_evsel *evsel, *leader;
127 leader = list_entry(list->next, struct perf_evsel, node);
128 evsel = list_entry(list->prev, struct perf_evsel, node);
130 leader->nr_members = evsel->idx - leader->idx + 1;
132 list_for_each_entry(evsel, list, node) {
133 evsel->leader = leader;
137 void perf_evlist__set_leader(struct perf_evlist *evlist)
139 if (evlist->nr_entries) {
140 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
141 __perf_evlist__set_leader(&evlist->entries);
145 int perf_evlist__add_default(struct perf_evlist *evlist)
147 struct perf_event_attr attr = {
148 .type = PERF_TYPE_HARDWARE,
149 .config = PERF_COUNT_HW_CPU_CYCLES,
151 struct perf_evsel *evsel;
153 event_attr_init(&attr);
155 evsel = perf_evsel__new(&attr, 0);
159 /* use strdup() because free(evsel) assumes name is allocated */
160 evsel->name = strdup("cycles");
164 perf_evlist__add(evlist, evsel);
167 perf_evsel__delete(evsel);
172 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
173 struct perf_event_attr *attrs, size_t nr_attrs)
175 struct perf_evsel *evsel, *n;
179 for (i = 0; i < nr_attrs; i++) {
180 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
182 goto out_delete_partial_list;
183 list_add_tail(&evsel->node, &head);
186 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
190 out_delete_partial_list:
191 list_for_each_entry_safe(evsel, n, &head, node)
192 perf_evsel__delete(evsel);
196 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
197 struct perf_event_attr *attrs, size_t nr_attrs)
201 for (i = 0; i < nr_attrs; i++)
202 event_attr_init(attrs + i);
204 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
208 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
210 struct perf_evsel *evsel;
212 list_for_each_entry(evsel, &evlist->entries, node) {
213 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
214 (int)evsel->attr.config == id)
222 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
225 struct perf_evsel *evsel;
227 list_for_each_entry(evsel, &evlist->entries, node) {
228 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
229 (strcmp(evsel->name, name) == 0))
236 int perf_evlist__add_newtp(struct perf_evlist *evlist,
237 const char *sys, const char *name, void *handler)
239 struct perf_evsel *evsel;
241 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
245 evsel->handler.func = handler;
246 perf_evlist__add(evlist, evsel);
250 void perf_evlist__disable(struct perf_evlist *evlist)
253 struct perf_evsel *pos;
254 int nr_cpus = cpu_map__nr(evlist->cpus);
255 int nr_threads = thread_map__nr(evlist->threads);
257 for (cpu = 0; cpu < nr_cpus; cpu++) {
258 list_for_each_entry(pos, &evlist->entries, node) {
259 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
261 for (thread = 0; thread < nr_threads; thread++)
262 ioctl(FD(pos, cpu, thread),
263 PERF_EVENT_IOC_DISABLE, 0);
268 void perf_evlist__enable(struct perf_evlist *evlist)
271 struct perf_evsel *pos;
272 int nr_cpus = cpu_map__nr(evlist->cpus);
273 int nr_threads = thread_map__nr(evlist->threads);
275 for (cpu = 0; cpu < nr_cpus; cpu++) {
276 list_for_each_entry(pos, &evlist->entries, node) {
277 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
279 for (thread = 0; thread < nr_threads; thread++)
280 ioctl(FD(pos, cpu, thread),
281 PERF_EVENT_IOC_ENABLE, 0);
286 int perf_evlist__disable_event(struct perf_evlist *evlist,
287 struct perf_evsel *evsel)
289 int cpu, thread, err;
294 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
295 for (thread = 0; thread < evlist->threads->nr; thread++) {
296 err = ioctl(FD(evsel, cpu, thread),
297 PERF_EVENT_IOC_DISABLE, 0);
305 int perf_evlist__enable_event(struct perf_evlist *evlist,
306 struct perf_evsel *evsel)
308 int cpu, thread, err;
313 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
314 for (thread = 0; thread < evlist->threads->nr; thread++) {
315 err = ioctl(FD(evsel, cpu, thread),
316 PERF_EVENT_IOC_ENABLE, 0);
324 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
326 int nr_cpus = cpu_map__nr(evlist->cpus);
327 int nr_threads = thread_map__nr(evlist->threads);
328 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
329 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
330 return evlist->pollfd != NULL ? 0 : -ENOMEM;
333 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
335 fcntl(fd, F_SETFL, O_NONBLOCK);
336 evlist->pollfd[evlist->nr_fds].fd = fd;
337 evlist->pollfd[evlist->nr_fds].events = POLLIN;
341 static void perf_evlist__id_hash(struct perf_evlist *evlist,
342 struct perf_evsel *evsel,
343 int cpu, int thread, u64 id)
346 struct perf_sample_id *sid = SID(evsel, cpu, thread);
350 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
351 hlist_add_head(&sid->node, &evlist->heads[hash]);
354 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
355 int cpu, int thread, u64 id)
357 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
358 evsel->id[evsel->ids++] = id;
361 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
362 struct perf_evsel *evsel,
363 int cpu, int thread, int fd)
365 u64 read_data[4] = { 0, };
366 int id_idx = 1; /* The first entry is the counter value */
370 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
377 /* Legacy way to get event id.. All hail to old kernels! */
380 * This way does not work with group format read, so bail
383 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
386 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
387 read(fd, &read_data, sizeof(read_data)) == -1)
390 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
392 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
395 id = read_data[id_idx];
398 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
402 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
404 struct hlist_head *head;
405 struct perf_sample_id *sid;
408 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
409 head = &evlist->heads[hash];
411 hlist_for_each_entry(sid, head, node)
418 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
420 struct perf_sample_id *sid;
422 if (evlist->nr_entries == 1)
423 return perf_evlist__first(evlist);
425 sid = perf_evlist__id2sid(evlist, id);
429 if (!perf_evlist__sample_id_all(evlist))
430 return perf_evlist__first(evlist);
435 static int perf_evlist__event2id(struct perf_evlist *evlist,
436 union perf_event *event, u64 *id)
438 const u64 *array = event->sample.array;
441 n = (event->header.size - sizeof(event->header)) >> 3;
443 if (event->header.type == PERF_RECORD_SAMPLE) {
444 if (evlist->id_pos >= n)
446 *id = array[evlist->id_pos];
448 if (evlist->is_pos > n)
456 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
457 union perf_event *event)
459 struct perf_evsel *first = perf_evlist__first(evlist);
460 struct hlist_head *head;
461 struct perf_sample_id *sid;
465 if (evlist->nr_entries == 1)
468 if (!first->attr.sample_id_all &&
469 event->header.type != PERF_RECORD_SAMPLE)
472 if (perf_evlist__event2id(evlist, event, &id))
475 /* Synthesized events have an id of zero */
479 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
480 head = &evlist->heads[hash];
482 hlist_for_each_entry(sid, head, node) {
489 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
491 struct perf_mmap *md = &evlist->mmap[idx];
492 unsigned int head = perf_mmap__read_head(md);
493 unsigned int old = md->prev;
494 unsigned char *data = md->base + page_size;
495 union perf_event *event = NULL;
497 if (evlist->overwrite) {
499 * If we're further behind than half the buffer, there's a chance
500 * the writer will bite our tail and mess up the samples under us.
502 * If we somehow ended up ahead of the head, we got messed up.
504 * In either case, truncate and restart at head.
506 int diff = head - old;
507 if (diff > md->mask / 2 || diff < 0) {
508 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
511 * head points to a known good entry, start there.
520 event = (union perf_event *)&data[old & md->mask];
521 size = event->header.size;
524 * Event straddles the mmap boundary -- header should always
525 * be inside due to u64 alignment of output.
527 if ((old & md->mask) + size != ((old + size) & md->mask)) {
528 unsigned int offset = old;
529 unsigned int len = min(sizeof(*event), size), cpy;
530 void *dst = &md->event_copy;
533 cpy = min(md->mask + 1 - (offset & md->mask), len);
534 memcpy(dst, &data[offset & md->mask], cpy);
540 event = &md->event_copy;
548 if (!evlist->overwrite)
549 perf_mmap__write_tail(md, old);
554 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
556 if (evlist->mmap[idx].base != NULL) {
557 munmap(evlist->mmap[idx].base, evlist->mmap_len);
558 evlist->mmap[idx].base = NULL;
562 void perf_evlist__munmap(struct perf_evlist *evlist)
566 for (i = 0; i < evlist->nr_mmaps; i++)
567 __perf_evlist__munmap(evlist, i);
573 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
575 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
576 if (cpu_map__empty(evlist->cpus))
577 evlist->nr_mmaps = thread_map__nr(evlist->threads);
578 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
579 return evlist->mmap != NULL ? 0 : -ENOMEM;
582 static int __perf_evlist__mmap(struct perf_evlist *evlist,
583 int idx, int prot, int mask, int fd)
585 evlist->mmap[idx].prev = 0;
586 evlist->mmap[idx].mask = mask;
587 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
589 if (evlist->mmap[idx].base == MAP_FAILED) {
590 evlist->mmap[idx].base = NULL;
594 perf_evlist__add_pollfd(evlist, fd);
598 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
600 struct perf_evsel *evsel;
602 int nr_cpus = cpu_map__nr(evlist->cpus);
603 int nr_threads = thread_map__nr(evlist->threads);
605 pr_debug2("perf event ring buffer mmapped per cpu\n");
606 for (cpu = 0; cpu < nr_cpus; cpu++) {
609 for (thread = 0; thread < nr_threads; thread++) {
610 list_for_each_entry(evsel, &evlist->entries, node) {
611 int fd = FD(evsel, cpu, thread);
615 if (__perf_evlist__mmap(evlist, cpu,
616 prot, mask, output) < 0)
619 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
623 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
624 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
633 for (cpu = 0; cpu < nr_cpus; cpu++)
634 __perf_evlist__munmap(evlist, cpu);
638 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
640 struct perf_evsel *evsel;
642 int nr_threads = thread_map__nr(evlist->threads);
644 pr_debug2("perf event ring buffer mmapped per thread\n");
645 for (thread = 0; thread < nr_threads; thread++) {
648 list_for_each_entry(evsel, &evlist->entries, node) {
649 int fd = FD(evsel, 0, thread);
653 if (__perf_evlist__mmap(evlist, thread,
654 prot, mask, output) < 0)
657 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
661 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
662 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
670 for (thread = 0; thread < nr_threads; thread++)
671 __perf_evlist__munmap(evlist, thread);
675 /** perf_evlist__mmap - Create per cpu maps to receive events
677 * @evlist - list of events
678 * @pages - map length in pages
679 * @overwrite - overwrite older events?
681 * If overwrite is false the user needs to signal event consuption using:
683 * struct perf_mmap *m = &evlist->mmap[cpu];
684 * unsigned int head = perf_mmap__read_head(m);
686 * perf_mmap__write_tail(m, head)
688 * Using perf_evlist__read_on_cpu does this automatically.
690 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
693 struct perf_evsel *evsel;
694 const struct cpu_map *cpus = evlist->cpus;
695 const struct thread_map *threads = evlist->threads;
696 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
698 /* 512 kiB: default amount of unprivileged mlocked memory */
699 if (pages == UINT_MAX)
700 pages = (512 * 1024) / page_size;
701 else if (!is_power_of_2(pages))
704 mask = pages * page_size - 1;
706 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
709 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
712 evlist->overwrite = overwrite;
713 evlist->mmap_len = (pages + 1) * page_size;
715 list_for_each_entry(evsel, &evlist->entries, node) {
716 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
717 evsel->sample_id == NULL &&
718 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
722 if (cpu_map__empty(cpus))
723 return perf_evlist__mmap_per_thread(evlist, prot, mask);
725 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
728 int perf_evlist__create_maps(struct perf_evlist *evlist,
729 struct perf_target *target)
731 evlist->threads = thread_map__new_str(target->pid, target->tid,
734 if (evlist->threads == NULL)
737 if (perf_target__has_task(target))
738 evlist->cpus = cpu_map__dummy_new();
739 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
740 evlist->cpus = cpu_map__dummy_new();
742 evlist->cpus = cpu_map__new(target->cpu_list);
744 if (evlist->cpus == NULL)
745 goto out_delete_threads;
750 thread_map__delete(evlist->threads);
754 void perf_evlist__delete_maps(struct perf_evlist *evlist)
756 cpu_map__delete(evlist->cpus);
757 thread_map__delete(evlist->threads);
759 evlist->threads = NULL;
762 int perf_evlist__apply_filters(struct perf_evlist *evlist)
764 struct perf_evsel *evsel;
766 const int ncpus = cpu_map__nr(evlist->cpus),
767 nthreads = thread_map__nr(evlist->threads);
769 list_for_each_entry(evsel, &evlist->entries, node) {
770 if (evsel->filter == NULL)
773 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
781 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
783 struct perf_evsel *evsel;
785 const int ncpus = cpu_map__nr(evlist->cpus),
786 nthreads = thread_map__nr(evlist->threads);
788 list_for_each_entry(evsel, &evlist->entries, node) {
789 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
797 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
799 struct perf_evsel *pos;
801 if (evlist->nr_entries == 1)
804 if (evlist->id_pos < 0 || evlist->is_pos < 0)
807 list_for_each_entry(pos, &evlist->entries, node) {
808 if (pos->id_pos != evlist->id_pos ||
809 pos->is_pos != evlist->is_pos)
816 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
818 struct perf_evsel *evsel;
820 if (evlist->combined_sample_type)
821 return evlist->combined_sample_type;
823 list_for_each_entry(evsel, &evlist->entries, node)
824 evlist->combined_sample_type |= evsel->attr.sample_type;
826 return evlist->combined_sample_type;
829 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
831 evlist->combined_sample_type = 0;
832 return __perf_evlist__combined_sample_type(evlist);
835 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
837 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
838 u64 read_format = first->attr.read_format;
839 u64 sample_type = first->attr.sample_type;
841 list_for_each_entry_continue(pos, &evlist->entries, node) {
842 if (read_format != pos->attr.read_format)
846 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
847 if ((sample_type & PERF_SAMPLE_READ) &&
848 !(read_format & PERF_FORMAT_ID)) {
855 u64 perf_evlist__read_format(struct perf_evlist *evlist)
857 struct perf_evsel *first = perf_evlist__first(evlist);
858 return first->attr.read_format;
861 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
863 struct perf_evsel *first = perf_evlist__first(evlist);
864 struct perf_sample *data;
868 if (!first->attr.sample_id_all)
871 sample_type = first->attr.sample_type;
873 if (sample_type & PERF_SAMPLE_TID)
874 size += sizeof(data->tid) * 2;
876 if (sample_type & PERF_SAMPLE_TIME)
877 size += sizeof(data->time);
879 if (sample_type & PERF_SAMPLE_ID)
880 size += sizeof(data->id);
882 if (sample_type & PERF_SAMPLE_STREAM_ID)
883 size += sizeof(data->stream_id);
885 if (sample_type & PERF_SAMPLE_CPU)
886 size += sizeof(data->cpu) * 2;
888 if (sample_type & PERF_SAMPLE_IDENTIFIER)
889 size += sizeof(data->id);
894 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
896 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
898 list_for_each_entry_continue(pos, &evlist->entries, node) {
899 if (first->attr.sample_id_all != pos->attr.sample_id_all)
906 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
908 struct perf_evsel *first = perf_evlist__first(evlist);
909 return first->attr.sample_id_all;
912 void perf_evlist__set_selected(struct perf_evlist *evlist,
913 struct perf_evsel *evsel)
915 evlist->selected = evsel;
918 void perf_evlist__close(struct perf_evlist *evlist)
920 struct perf_evsel *evsel;
921 int ncpus = cpu_map__nr(evlist->cpus);
922 int nthreads = thread_map__nr(evlist->threads);
924 list_for_each_entry_reverse(evsel, &evlist->entries, node)
925 perf_evsel__close(evsel, ncpus, nthreads);
928 int perf_evlist__open(struct perf_evlist *evlist)
930 struct perf_evsel *evsel;
933 perf_evlist__update_id_pos(evlist);
935 list_for_each_entry(evsel, &evlist->entries, node) {
936 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
943 perf_evlist__close(evlist);
948 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
949 struct perf_target *target,
950 const char *argv[], bool pipe_output,
953 int child_ready_pipe[2], go_pipe[2];
956 if (pipe(child_ready_pipe) < 0) {
957 perror("failed to create 'ready' pipe");
961 if (pipe(go_pipe) < 0) {
962 perror("failed to create 'go' pipe");
963 goto out_close_ready_pipe;
966 evlist->workload.pid = fork();
967 if (evlist->workload.pid < 0) {
968 perror("failed to fork");
969 goto out_close_pipes;
972 if (!evlist->workload.pid) {
976 signal(SIGTERM, SIG_DFL);
978 close(child_ready_pipe[0]);
980 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
983 * Tell the parent we're ready to go
985 close(child_ready_pipe[1]);
988 * Wait until the parent tells us to go.
990 if (read(go_pipe[0], &bf, 1) == -1)
991 perror("unable to read pipe");
993 execvp(argv[0], (char **)argv);
997 kill(getppid(), SIGUSR1);
1001 if (perf_target__none(target))
1002 evlist->threads->map[0] = evlist->workload.pid;
1004 close(child_ready_pipe[1]);
1007 * wait for child to settle
1009 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1010 perror("unable to read pipe");
1011 goto out_close_pipes;
1014 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1015 evlist->workload.cork_fd = go_pipe[1];
1016 close(child_ready_pipe[0]);
1022 out_close_ready_pipe:
1023 close(child_ready_pipe[0]);
1024 close(child_ready_pipe[1]);
1028 int perf_evlist__start_workload(struct perf_evlist *evlist)
1030 if (evlist->workload.cork_fd > 0) {
1034 * Remove the cork, let it rip!
1036 ret = write(evlist->workload.cork_fd, &bf, 1);
1038 perror("enable to write to pipe");
1040 close(evlist->workload.cork_fd);
1047 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1048 struct perf_sample *sample)
1050 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1054 return perf_evsel__parse_sample(evsel, event, sample);
1057 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1059 struct perf_evsel *evsel;
1062 list_for_each_entry(evsel, &evlist->entries, node) {
1063 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1064 perf_evsel__name(evsel));
1067 return printed + fprintf(fp, "\n");;