2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <lk/debugfs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
24 #include <linux/bitops.h>
25 #include <linux/hash.h>
27 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
28 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
30 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
31 struct thread_map *threads)
35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
36 INIT_HLIST_HEAD(&evlist->heads[i]);
37 INIT_LIST_HEAD(&evlist->entries);
38 perf_evlist__set_maps(evlist, cpus, threads);
39 evlist->workload.pid = -1;
42 struct perf_evlist *perf_evlist__new(void)
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
47 perf_evlist__init(evlist, NULL, NULL);
53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list
56 * Events with compatible sample types all have the same id_pos
57 * and is_pos. For convenience, put a copy on evlist.
59 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
61 struct perf_evsel *first = perf_evlist__first(evlist);
63 evlist->id_pos = first->id_pos;
64 evlist->is_pos = first->is_pos;
67 static void perf_evlist__purge(struct perf_evlist *evlist)
69 struct perf_evsel *pos, *n;
71 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
72 list_del_init(&pos->node);
73 perf_evsel__delete(pos);
76 evlist->nr_entries = 0;
79 void perf_evlist__exit(struct perf_evlist *evlist)
84 evlist->pollfd = NULL;
87 void perf_evlist__delete(struct perf_evlist *evlist)
89 perf_evlist__purge(evlist);
90 perf_evlist__exit(evlist);
94 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
96 list_add_tail(&entry->node, &evlist->entries);
97 if (!evlist->nr_entries++)
98 perf_evlist__set_id_pos(evlist);
101 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
102 struct list_head *list,
105 bool set_id_pos = !evlist->nr_entries;
107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries;
110 perf_evlist__set_id_pos(evlist);
113 void __perf_evlist__set_leader(struct list_head *list)
115 struct perf_evsel *evsel, *leader;
117 leader = list_entry(list->next, struct perf_evsel, node);
118 evsel = list_entry(list->prev, struct perf_evsel, node);
120 leader->nr_members = evsel->idx - leader->idx + 1;
122 list_for_each_entry(evsel, list, node) {
123 evsel->leader = leader;
127 void perf_evlist__set_leader(struct perf_evlist *evlist)
129 if (evlist->nr_entries) {
130 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
131 __perf_evlist__set_leader(&evlist->entries);
135 int perf_evlist__add_default(struct perf_evlist *evlist)
137 struct perf_event_attr attr = {
138 .type = PERF_TYPE_HARDWARE,
139 .config = PERF_COUNT_HW_CPU_CYCLES,
141 struct perf_evsel *evsel;
143 event_attr_init(&attr);
145 evsel = perf_evsel__new(&attr, 0);
149 /* use strdup() because free(evsel) assumes name is allocated */
150 evsel->name = strdup("cycles");
154 perf_evlist__add(evlist, evsel);
157 perf_evsel__delete(evsel);
162 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
163 struct perf_event_attr *attrs, size_t nr_attrs)
165 struct perf_evsel *evsel, *n;
169 for (i = 0; i < nr_attrs; i++) {
170 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
172 goto out_delete_partial_list;
173 list_add_tail(&evsel->node, &head);
176 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
180 out_delete_partial_list:
181 list_for_each_entry_safe(evsel, n, &head, node)
182 perf_evsel__delete(evsel);
186 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
187 struct perf_event_attr *attrs, size_t nr_attrs)
191 for (i = 0; i < nr_attrs; i++)
192 event_attr_init(attrs + i);
194 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
198 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
200 struct perf_evsel *evsel;
202 list_for_each_entry(evsel, &evlist->entries, node) {
203 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
204 (int)evsel->attr.config == id)
212 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
215 struct perf_evsel *evsel;
217 list_for_each_entry(evsel, &evlist->entries, node) {
218 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
219 (strcmp(evsel->name, name) == 0))
226 int perf_evlist__add_newtp(struct perf_evlist *evlist,
227 const char *sys, const char *name, void *handler)
229 struct perf_evsel *evsel;
231 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
235 evsel->handler.func = handler;
236 perf_evlist__add(evlist, evsel);
240 void perf_evlist__disable(struct perf_evlist *evlist)
243 struct perf_evsel *pos;
244 int nr_cpus = cpu_map__nr(evlist->cpus);
245 int nr_threads = thread_map__nr(evlist->threads);
247 for (cpu = 0; cpu < nr_cpus; cpu++) {
248 list_for_each_entry(pos, &evlist->entries, node) {
249 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
251 for (thread = 0; thread < nr_threads; thread++)
252 ioctl(FD(pos, cpu, thread),
253 PERF_EVENT_IOC_DISABLE, 0);
258 void perf_evlist__enable(struct perf_evlist *evlist)
261 struct perf_evsel *pos;
262 int nr_cpus = cpu_map__nr(evlist->cpus);
263 int nr_threads = thread_map__nr(evlist->threads);
265 for (cpu = 0; cpu < nr_cpus; cpu++) {
266 list_for_each_entry(pos, &evlist->entries, node) {
267 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
269 for (thread = 0; thread < nr_threads; thread++)
270 ioctl(FD(pos, cpu, thread),
271 PERF_EVENT_IOC_ENABLE, 0);
276 int perf_evlist__disable_event(struct perf_evlist *evlist,
277 struct perf_evsel *evsel)
279 int cpu, thread, err;
284 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
285 for (thread = 0; thread < evlist->threads->nr; thread++) {
286 err = ioctl(FD(evsel, cpu, thread),
287 PERF_EVENT_IOC_DISABLE, 0);
295 int perf_evlist__enable_event(struct perf_evlist *evlist,
296 struct perf_evsel *evsel)
298 int cpu, thread, err;
303 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
304 for (thread = 0; thread < evlist->threads->nr; thread++) {
305 err = ioctl(FD(evsel, cpu, thread),
306 PERF_EVENT_IOC_ENABLE, 0);
314 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
316 int nr_cpus = cpu_map__nr(evlist->cpus);
317 int nr_threads = thread_map__nr(evlist->threads);
318 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
319 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
320 return evlist->pollfd != NULL ? 0 : -ENOMEM;
323 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
325 fcntl(fd, F_SETFL, O_NONBLOCK);
326 evlist->pollfd[evlist->nr_fds].fd = fd;
327 evlist->pollfd[evlist->nr_fds].events = POLLIN;
331 static void perf_evlist__id_hash(struct perf_evlist *evlist,
332 struct perf_evsel *evsel,
333 int cpu, int thread, u64 id)
336 struct perf_sample_id *sid = SID(evsel, cpu, thread);
340 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
341 hlist_add_head(&sid->node, &evlist->heads[hash]);
344 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
345 int cpu, int thread, u64 id)
347 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
348 evsel->id[evsel->ids++] = id;
351 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
352 struct perf_evsel *evsel,
353 int cpu, int thread, int fd)
355 u64 read_data[4] = { 0, };
356 int id_idx = 1; /* The first entry is the counter value */
360 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
367 /* Legacy way to get event id.. All hail to old kernels! */
370 * This way does not work with group format read, so bail
373 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
376 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
377 read(fd, &read_data, sizeof(read_data)) == -1)
380 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
382 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
385 id = read_data[id_idx];
388 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
392 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
394 struct hlist_head *head;
395 struct perf_sample_id *sid;
398 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
399 head = &evlist->heads[hash];
401 hlist_for_each_entry(sid, head, node)
408 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
410 struct perf_sample_id *sid;
412 if (evlist->nr_entries == 1)
413 return perf_evlist__first(evlist);
415 sid = perf_evlist__id2sid(evlist, id);
419 if (!perf_evlist__sample_id_all(evlist))
420 return perf_evlist__first(evlist);
425 static int perf_evlist__event2id(struct perf_evlist *evlist,
426 union perf_event *event, u64 *id)
428 const u64 *array = event->sample.array;
431 n = (event->header.size - sizeof(event->header)) >> 3;
433 if (event->header.type == PERF_RECORD_SAMPLE) {
434 if (evlist->id_pos >= n)
436 *id = array[evlist->id_pos];
438 if (evlist->is_pos > n)
446 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
447 union perf_event *event)
449 struct perf_evsel *first = perf_evlist__first(evlist);
450 struct hlist_head *head;
451 struct perf_sample_id *sid;
455 if (evlist->nr_entries == 1)
458 if (!first->attr.sample_id_all &&
459 event->header.type != PERF_RECORD_SAMPLE)
462 if (perf_evlist__event2id(evlist, event, &id))
465 /* Synthesized events have an id of zero */
469 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
470 head = &evlist->heads[hash];
472 hlist_for_each_entry(sid, head, node) {
479 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
481 struct perf_mmap *md = &evlist->mmap[idx];
482 unsigned int head = perf_mmap__read_head(md);
483 unsigned int old = md->prev;
484 unsigned char *data = md->base + page_size;
485 union perf_event *event = NULL;
487 if (evlist->overwrite) {
489 * If we're further behind than half the buffer, there's a chance
490 * the writer will bite our tail and mess up the samples under us.
492 * If we somehow ended up ahead of the head, we got messed up.
494 * In either case, truncate and restart at head.
496 int diff = head - old;
497 if (diff > md->mask / 2 || diff < 0) {
498 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
501 * head points to a known good entry, start there.
510 event = (union perf_event *)&data[old & md->mask];
511 size = event->header.size;
514 * Event straddles the mmap boundary -- header should always
515 * be inside due to u64 alignment of output.
517 if ((old & md->mask) + size != ((old + size) & md->mask)) {
518 unsigned int offset = old;
519 unsigned int len = min(sizeof(*event), size), cpy;
520 void *dst = &md->event_copy;
523 cpy = min(md->mask + 1 - (offset & md->mask), len);
524 memcpy(dst, &data[offset & md->mask], cpy);
530 event = &md->event_copy;
538 if (!evlist->overwrite)
539 perf_mmap__write_tail(md, old);
544 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
546 if (evlist->mmap[idx].base != NULL) {
547 munmap(evlist->mmap[idx].base, evlist->mmap_len);
548 evlist->mmap[idx].base = NULL;
552 void perf_evlist__munmap(struct perf_evlist *evlist)
556 for (i = 0; i < evlist->nr_mmaps; i++)
557 __perf_evlist__munmap(evlist, i);
563 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
565 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
566 if (cpu_map__empty(evlist->cpus))
567 evlist->nr_mmaps = thread_map__nr(evlist->threads);
568 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
569 return evlist->mmap != NULL ? 0 : -ENOMEM;
572 static int __perf_evlist__mmap(struct perf_evlist *evlist,
573 int idx, int prot, int mask, int fd)
575 evlist->mmap[idx].prev = 0;
576 evlist->mmap[idx].mask = mask;
577 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
579 if (evlist->mmap[idx].base == MAP_FAILED) {
580 evlist->mmap[idx].base = NULL;
584 perf_evlist__add_pollfd(evlist, fd);
588 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
590 struct perf_evsel *evsel;
592 int nr_cpus = cpu_map__nr(evlist->cpus);
593 int nr_threads = thread_map__nr(evlist->threads);
595 pr_debug2("perf event ring buffer mmapped per cpu\n");
596 for (cpu = 0; cpu < nr_cpus; cpu++) {
599 for (thread = 0; thread < nr_threads; thread++) {
600 list_for_each_entry(evsel, &evlist->entries, node) {
601 int fd = FD(evsel, cpu, thread);
605 if (__perf_evlist__mmap(evlist, cpu,
606 prot, mask, output) < 0)
609 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
613 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
614 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
623 for (cpu = 0; cpu < nr_cpus; cpu++)
624 __perf_evlist__munmap(evlist, cpu);
628 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
630 struct perf_evsel *evsel;
632 int nr_threads = thread_map__nr(evlist->threads);
634 pr_debug2("perf event ring buffer mmapped per thread\n");
635 for (thread = 0; thread < nr_threads; thread++) {
638 list_for_each_entry(evsel, &evlist->entries, node) {
639 int fd = FD(evsel, 0, thread);
643 if (__perf_evlist__mmap(evlist, thread,
644 prot, mask, output) < 0)
647 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
651 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
652 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
660 for (thread = 0; thread < nr_threads; thread++)
661 __perf_evlist__munmap(evlist, thread);
665 /** perf_evlist__mmap - Create per cpu maps to receive events
667 * @evlist - list of events
668 * @pages - map length in pages
669 * @overwrite - overwrite older events?
671 * If overwrite is false the user needs to signal event consuption using:
673 * struct perf_mmap *m = &evlist->mmap[cpu];
674 * unsigned int head = perf_mmap__read_head(m);
676 * perf_mmap__write_tail(m, head)
678 * Using perf_evlist__read_on_cpu does this automatically.
680 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
683 struct perf_evsel *evsel;
684 const struct cpu_map *cpus = evlist->cpus;
685 const struct thread_map *threads = evlist->threads;
686 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
688 /* 512 kiB: default amount of unprivileged mlocked memory */
689 if (pages == UINT_MAX)
690 pages = (512 * 1024) / page_size;
691 else if (!is_power_of_2(pages))
694 mask = pages * page_size - 1;
696 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
699 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
702 evlist->overwrite = overwrite;
703 evlist->mmap_len = (pages + 1) * page_size;
705 list_for_each_entry(evsel, &evlist->entries, node) {
706 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
707 evsel->sample_id == NULL &&
708 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
712 if (cpu_map__empty(cpus))
713 return perf_evlist__mmap_per_thread(evlist, prot, mask);
715 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
718 int perf_evlist__create_maps(struct perf_evlist *evlist,
719 struct perf_target *target)
721 evlist->threads = thread_map__new_str(target->pid, target->tid,
724 if (evlist->threads == NULL)
727 if (perf_target__has_task(target))
728 evlist->cpus = cpu_map__dummy_new();
729 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
730 evlist->cpus = cpu_map__dummy_new();
732 evlist->cpus = cpu_map__new(target->cpu_list);
734 if (evlist->cpus == NULL)
735 goto out_delete_threads;
740 thread_map__delete(evlist->threads);
744 void perf_evlist__delete_maps(struct perf_evlist *evlist)
746 cpu_map__delete(evlist->cpus);
747 thread_map__delete(evlist->threads);
749 evlist->threads = NULL;
752 int perf_evlist__apply_filters(struct perf_evlist *evlist)
754 struct perf_evsel *evsel;
756 const int ncpus = cpu_map__nr(evlist->cpus),
757 nthreads = thread_map__nr(evlist->threads);
759 list_for_each_entry(evsel, &evlist->entries, node) {
760 if (evsel->filter == NULL)
763 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
771 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
773 struct perf_evsel *evsel;
775 const int ncpus = cpu_map__nr(evlist->cpus),
776 nthreads = thread_map__nr(evlist->threads);
778 list_for_each_entry(evsel, &evlist->entries, node) {
779 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
787 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
789 struct perf_evsel *pos;
791 if (evlist->nr_entries == 1)
794 if (evlist->id_pos < 0 || evlist->is_pos < 0)
797 list_for_each_entry(pos, &evlist->entries, node) {
798 if (pos->id_pos != evlist->id_pos ||
799 pos->is_pos != evlist->is_pos)
806 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
808 struct perf_evsel *evsel;
810 if (evlist->combined_sample_type)
811 return evlist->combined_sample_type;
813 list_for_each_entry(evsel, &evlist->entries, node)
814 evlist->combined_sample_type |= evsel->attr.sample_type;
816 return evlist->combined_sample_type;
819 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
821 evlist->combined_sample_type = 0;
822 return __perf_evlist__combined_sample_type(evlist);
825 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
827 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
828 u64 read_format = first->attr.read_format;
829 u64 sample_type = first->attr.sample_type;
831 list_for_each_entry_continue(pos, &evlist->entries, node) {
832 if (read_format != pos->attr.read_format)
836 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
837 if ((sample_type & PERF_SAMPLE_READ) &&
838 !(read_format & PERF_FORMAT_ID)) {
845 u64 perf_evlist__read_format(struct perf_evlist *evlist)
847 struct perf_evsel *first = perf_evlist__first(evlist);
848 return first->attr.read_format;
851 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
853 struct perf_evsel *first = perf_evlist__first(evlist);
854 struct perf_sample *data;
858 if (!first->attr.sample_id_all)
861 sample_type = first->attr.sample_type;
863 if (sample_type & PERF_SAMPLE_TID)
864 size += sizeof(data->tid) * 2;
866 if (sample_type & PERF_SAMPLE_TIME)
867 size += sizeof(data->time);
869 if (sample_type & PERF_SAMPLE_ID)
870 size += sizeof(data->id);
872 if (sample_type & PERF_SAMPLE_STREAM_ID)
873 size += sizeof(data->stream_id);
875 if (sample_type & PERF_SAMPLE_CPU)
876 size += sizeof(data->cpu) * 2;
878 if (sample_type & PERF_SAMPLE_IDENTIFIER)
879 size += sizeof(data->id);
884 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
886 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
888 list_for_each_entry_continue(pos, &evlist->entries, node) {
889 if (first->attr.sample_id_all != pos->attr.sample_id_all)
896 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
898 struct perf_evsel *first = perf_evlist__first(evlist);
899 return first->attr.sample_id_all;
902 void perf_evlist__set_selected(struct perf_evlist *evlist,
903 struct perf_evsel *evsel)
905 evlist->selected = evsel;
908 void perf_evlist__close(struct perf_evlist *evlist)
910 struct perf_evsel *evsel;
911 int ncpus = cpu_map__nr(evlist->cpus);
912 int nthreads = thread_map__nr(evlist->threads);
914 list_for_each_entry_reverse(evsel, &evlist->entries, node)
915 perf_evsel__close(evsel, ncpus, nthreads);
918 int perf_evlist__open(struct perf_evlist *evlist)
920 struct perf_evsel *evsel;
923 list_for_each_entry(evsel, &evlist->entries, node) {
924 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
931 perf_evlist__close(evlist);
936 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
937 struct perf_target *target,
938 const char *argv[], bool pipe_output,
941 int child_ready_pipe[2], go_pipe[2];
944 if (pipe(child_ready_pipe) < 0) {
945 perror("failed to create 'ready' pipe");
949 if (pipe(go_pipe) < 0) {
950 perror("failed to create 'go' pipe");
951 goto out_close_ready_pipe;
954 evlist->workload.pid = fork();
955 if (evlist->workload.pid < 0) {
956 perror("failed to fork");
957 goto out_close_pipes;
960 if (!evlist->workload.pid) {
964 signal(SIGTERM, SIG_DFL);
966 close(child_ready_pipe[0]);
968 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
971 * Tell the parent we're ready to go
973 close(child_ready_pipe[1]);
976 * Wait until the parent tells us to go.
978 if (read(go_pipe[0], &bf, 1) == -1)
979 perror("unable to read pipe");
981 execvp(argv[0], (char **)argv);
985 kill(getppid(), SIGUSR1);
989 if (perf_target__none(target))
990 evlist->threads->map[0] = evlist->workload.pid;
992 close(child_ready_pipe[1]);
995 * wait for child to settle
997 if (read(child_ready_pipe[0], &bf, 1) == -1) {
998 perror("unable to read pipe");
999 goto out_close_pipes;
1002 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1003 evlist->workload.cork_fd = go_pipe[1];
1004 close(child_ready_pipe[0]);
1010 out_close_ready_pipe:
1011 close(child_ready_pipe[0]);
1012 close(child_ready_pipe[1]);
1016 int perf_evlist__start_workload(struct perf_evlist *evlist)
1018 if (evlist->workload.cork_fd > 0) {
1022 * Remove the cork, let it rip!
1024 ret = write(evlist->workload.cork_fd, &bf, 1);
1026 perror("enable to write to pipe");
1028 close(evlist->workload.cork_fd);
1035 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1036 struct perf_sample *sample)
1038 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1042 return perf_evsel__parse_sample(evsel, event, sample);
1045 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1047 struct perf_evsel *evsel;
1050 list_for_each_entry(evsel, &evlist->entries, node) {
1051 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1052 perf_evsel__name(evsel));
1055 return printed + fprintf(fp, "\n");;