2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <lk/debugfs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
24 #include <linux/bitops.h>
25 #include <linux/hash.h>
27 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
28 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
30 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
31 struct thread_map *threads)
35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
36 INIT_HLIST_HEAD(&evlist->heads[i]);
37 INIT_LIST_HEAD(&evlist->entries);
38 perf_evlist__set_maps(evlist, cpus, threads);
39 evlist->workload.pid = -1;
42 struct perf_evlist *perf_evlist__new(void)
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
47 perf_evlist__init(evlist, NULL, NULL);
53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list
56 * Events with compatible sample types all have the same id_pos
57 * and is_pos. For convenience, put a copy on evlist.
59 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
61 struct perf_evsel *first = perf_evlist__first(evlist);
63 evlist->id_pos = first->id_pos;
64 evlist->is_pos = first->is_pos;
67 static void perf_evlist__purge(struct perf_evlist *evlist)
69 struct perf_evsel *pos, *n;
71 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
72 list_del_init(&pos->node);
73 perf_evsel__delete(pos);
76 evlist->nr_entries = 0;
79 void perf_evlist__exit(struct perf_evlist *evlist)
84 evlist->pollfd = NULL;
87 void perf_evlist__delete(struct perf_evlist *evlist)
89 perf_evlist__purge(evlist);
90 perf_evlist__exit(evlist);
94 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
96 list_add_tail(&entry->node, &evlist->entries);
97 if (!evlist->nr_entries++)
98 perf_evlist__set_id_pos(evlist);
101 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
102 struct list_head *list,
105 bool set_id_pos = !evlist->nr_entries;
107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries;
110 perf_evlist__set_id_pos(evlist);
113 void __perf_evlist__set_leader(struct list_head *list)
115 struct perf_evsel *evsel, *leader;
117 leader = list_entry(list->next, struct perf_evsel, node);
118 evsel = list_entry(list->prev, struct perf_evsel, node);
120 leader->nr_members = evsel->idx - leader->idx + 1;
122 list_for_each_entry(evsel, list, node) {
123 evsel->leader = leader;
127 void perf_evlist__set_leader(struct perf_evlist *evlist)
129 if (evlist->nr_entries) {
130 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
131 __perf_evlist__set_leader(&evlist->entries);
135 int perf_evlist__add_default(struct perf_evlist *evlist)
137 struct perf_event_attr attr = {
138 .type = PERF_TYPE_HARDWARE,
139 .config = PERF_COUNT_HW_CPU_CYCLES,
141 struct perf_evsel *evsel;
143 event_attr_init(&attr);
145 evsel = perf_evsel__new(&attr, 0);
149 /* use strdup() because free(evsel) assumes name is allocated */
150 evsel->name = strdup("cycles");
154 perf_evlist__add(evlist, evsel);
157 perf_evsel__delete(evsel);
162 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
163 struct perf_event_attr *attrs, size_t nr_attrs)
165 struct perf_evsel *evsel, *n;
169 for (i = 0; i < nr_attrs; i++) {
170 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
172 goto out_delete_partial_list;
173 list_add_tail(&evsel->node, &head);
176 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
180 out_delete_partial_list:
181 list_for_each_entry_safe(evsel, n, &head, node)
182 perf_evsel__delete(evsel);
186 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
187 struct perf_event_attr *attrs, size_t nr_attrs)
191 for (i = 0; i < nr_attrs; i++)
192 event_attr_init(attrs + i);
194 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
198 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
200 struct perf_evsel *evsel;
202 list_for_each_entry(evsel, &evlist->entries, node) {
203 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
204 (int)evsel->attr.config == id)
212 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
215 struct perf_evsel *evsel;
217 list_for_each_entry(evsel, &evlist->entries, node) {
218 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
219 (strcmp(evsel->name, name) == 0))
226 int perf_evlist__add_newtp(struct perf_evlist *evlist,
227 const char *sys, const char *name, void *handler)
229 struct perf_evsel *evsel;
231 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
235 evsel->handler.func = handler;
236 perf_evlist__add(evlist, evsel);
240 void perf_evlist__disable(struct perf_evlist *evlist)
243 struct perf_evsel *pos;
244 int nr_cpus = cpu_map__nr(evlist->cpus);
245 int nr_threads = thread_map__nr(evlist->threads);
247 for (cpu = 0; cpu < nr_cpus; cpu++) {
248 list_for_each_entry(pos, &evlist->entries, node) {
249 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
251 for (thread = 0; thread < nr_threads; thread++)
252 ioctl(FD(pos, cpu, thread),
253 PERF_EVENT_IOC_DISABLE, 0);
258 void perf_evlist__enable(struct perf_evlist *evlist)
261 struct perf_evsel *pos;
262 int nr_cpus = cpu_map__nr(evlist->cpus);
263 int nr_threads = thread_map__nr(evlist->threads);
265 for (cpu = 0; cpu < nr_cpus; cpu++) {
266 list_for_each_entry(pos, &evlist->entries, node) {
267 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
269 for (thread = 0; thread < nr_threads; thread++)
270 ioctl(FD(pos, cpu, thread),
271 PERF_EVENT_IOC_ENABLE, 0);
276 int perf_evlist__disable_event(struct perf_evlist *evlist,
277 struct perf_evsel *evsel)
279 int cpu, thread, err;
284 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
285 for (thread = 0; thread < evlist->threads->nr; thread++) {
286 err = ioctl(FD(evsel, cpu, thread),
287 PERF_EVENT_IOC_DISABLE, 0);
295 int perf_evlist__enable_event(struct perf_evlist *evlist,
296 struct perf_evsel *evsel)
298 int cpu, thread, err;
303 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
304 for (thread = 0; thread < evlist->threads->nr; thread++) {
305 err = ioctl(FD(evsel, cpu, thread),
306 PERF_EVENT_IOC_ENABLE, 0);
314 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
316 int nr_cpus = cpu_map__nr(evlist->cpus);
317 int nr_threads = thread_map__nr(evlist->threads);
318 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
319 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
320 return evlist->pollfd != NULL ? 0 : -ENOMEM;
323 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
325 fcntl(fd, F_SETFL, O_NONBLOCK);
326 evlist->pollfd[evlist->nr_fds].fd = fd;
327 evlist->pollfd[evlist->nr_fds].events = POLLIN;
331 static void perf_evlist__id_hash(struct perf_evlist *evlist,
332 struct perf_evsel *evsel,
333 int cpu, int thread, u64 id)
336 struct perf_sample_id *sid = SID(evsel, cpu, thread);
340 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
341 hlist_add_head(&sid->node, &evlist->heads[hash]);
344 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
345 int cpu, int thread, u64 id)
347 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
348 evsel->id[evsel->ids++] = id;
351 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
352 struct perf_evsel *evsel,
353 int cpu, int thread, int fd)
355 u64 read_data[4] = { 0, };
356 int id_idx = 1; /* The first entry is the counter value */
360 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
367 /* Legacy way to get event id.. All hail to old kernels! */
370 * This way does not work with group format read, so bail
373 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
376 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
377 read(fd, &read_data, sizeof(read_data)) == -1)
380 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
382 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
385 id = read_data[id_idx];
388 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
392 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
394 struct hlist_head *head;
395 struct perf_sample_id *sid;
398 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
399 head = &evlist->heads[hash];
401 hlist_for_each_entry(sid, head, node)
408 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
410 struct perf_sample_id *sid;
412 if (evlist->nr_entries == 1)
413 return perf_evlist__first(evlist);
415 sid = perf_evlist__id2sid(evlist, id);
419 if (!perf_evlist__sample_id_all(evlist))
420 return perf_evlist__first(evlist);
425 static int perf_evlist__event2id(struct perf_evlist *evlist,
426 union perf_event *event, u64 *id)
428 const u64 *array = event->sample.array;
431 n = (event->header.size - sizeof(event->header)) >> 3;
433 if (event->header.type == PERF_RECORD_SAMPLE) {
434 if (evlist->id_pos >= n)
436 *id = array[evlist->id_pos];
438 if (evlist->is_pos > n)
446 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
447 union perf_event *event)
449 struct hlist_head *head;
450 struct perf_sample_id *sid;
454 if (evlist->nr_entries == 1)
455 return perf_evlist__first(evlist);
457 if (perf_evlist__event2id(evlist, event, &id))
460 /* Synthesized events have an id of zero */
462 return perf_evlist__first(evlist);
464 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
465 head = &evlist->heads[hash];
467 hlist_for_each_entry(sid, head, node) {
474 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
476 struct perf_mmap *md = &evlist->mmap[idx];
477 unsigned int head = perf_mmap__read_head(md);
478 unsigned int old = md->prev;
479 unsigned char *data = md->base + page_size;
480 union perf_event *event = NULL;
482 if (evlist->overwrite) {
484 * If we're further behind than half the buffer, there's a chance
485 * the writer will bite our tail and mess up the samples under us.
487 * If we somehow ended up ahead of the head, we got messed up.
489 * In either case, truncate and restart at head.
491 int diff = head - old;
492 if (diff > md->mask / 2 || diff < 0) {
493 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
496 * head points to a known good entry, start there.
505 event = (union perf_event *)&data[old & md->mask];
506 size = event->header.size;
509 * Event straddles the mmap boundary -- header should always
510 * be inside due to u64 alignment of output.
512 if ((old & md->mask) + size != ((old + size) & md->mask)) {
513 unsigned int offset = old;
514 unsigned int len = min(sizeof(*event), size), cpy;
515 void *dst = &md->event_copy;
518 cpy = min(md->mask + 1 - (offset & md->mask), len);
519 memcpy(dst, &data[offset & md->mask], cpy);
525 event = &md->event_copy;
533 if (!evlist->overwrite)
534 perf_mmap__write_tail(md, old);
539 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
541 if (evlist->mmap[idx].base != NULL) {
542 munmap(evlist->mmap[idx].base, evlist->mmap_len);
543 evlist->mmap[idx].base = NULL;
547 void perf_evlist__munmap(struct perf_evlist *evlist)
551 for (i = 0; i < evlist->nr_mmaps; i++)
552 __perf_evlist__munmap(evlist, i);
558 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
560 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
561 if (cpu_map__empty(evlist->cpus))
562 evlist->nr_mmaps = thread_map__nr(evlist->threads);
563 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
564 return evlist->mmap != NULL ? 0 : -ENOMEM;
567 static int __perf_evlist__mmap(struct perf_evlist *evlist,
568 int idx, int prot, int mask, int fd)
570 evlist->mmap[idx].prev = 0;
571 evlist->mmap[idx].mask = mask;
572 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
574 if (evlist->mmap[idx].base == MAP_FAILED) {
575 evlist->mmap[idx].base = NULL;
579 perf_evlist__add_pollfd(evlist, fd);
583 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
585 struct perf_evsel *evsel;
587 int nr_cpus = cpu_map__nr(evlist->cpus);
588 int nr_threads = thread_map__nr(evlist->threads);
590 pr_debug2("perf event ring buffer mmapped per cpu\n");
591 for (cpu = 0; cpu < nr_cpus; cpu++) {
594 for (thread = 0; thread < nr_threads; thread++) {
595 list_for_each_entry(evsel, &evlist->entries, node) {
596 int fd = FD(evsel, cpu, thread);
600 if (__perf_evlist__mmap(evlist, cpu,
601 prot, mask, output) < 0)
604 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
608 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
609 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
618 for (cpu = 0; cpu < nr_cpus; cpu++)
619 __perf_evlist__munmap(evlist, cpu);
623 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
625 struct perf_evsel *evsel;
627 int nr_threads = thread_map__nr(evlist->threads);
629 pr_debug2("perf event ring buffer mmapped per thread\n");
630 for (thread = 0; thread < nr_threads; thread++) {
633 list_for_each_entry(evsel, &evlist->entries, node) {
634 int fd = FD(evsel, 0, thread);
638 if (__perf_evlist__mmap(evlist, thread,
639 prot, mask, output) < 0)
642 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
646 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
647 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
655 for (thread = 0; thread < nr_threads; thread++)
656 __perf_evlist__munmap(evlist, thread);
660 /** perf_evlist__mmap - Create per cpu maps to receive events
662 * @evlist - list of events
663 * @pages - map length in pages
664 * @overwrite - overwrite older events?
666 * If overwrite is false the user needs to signal event consuption using:
668 * struct perf_mmap *m = &evlist->mmap[cpu];
669 * unsigned int head = perf_mmap__read_head(m);
671 * perf_mmap__write_tail(m, head)
673 * Using perf_evlist__read_on_cpu does this automatically.
675 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
678 struct perf_evsel *evsel;
679 const struct cpu_map *cpus = evlist->cpus;
680 const struct thread_map *threads = evlist->threads;
681 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
683 /* 512 kiB: default amount of unprivileged mlocked memory */
684 if (pages == UINT_MAX)
685 pages = (512 * 1024) / page_size;
686 else if (!is_power_of_2(pages))
689 mask = pages * page_size - 1;
691 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
694 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
697 evlist->overwrite = overwrite;
698 evlist->mmap_len = (pages + 1) * page_size;
700 list_for_each_entry(evsel, &evlist->entries, node) {
701 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
702 evsel->sample_id == NULL &&
703 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
707 if (cpu_map__empty(cpus))
708 return perf_evlist__mmap_per_thread(evlist, prot, mask);
710 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
713 int perf_evlist__create_maps(struct perf_evlist *evlist,
714 struct perf_target *target)
716 evlist->threads = thread_map__new_str(target->pid, target->tid,
719 if (evlist->threads == NULL)
722 if (perf_target__has_task(target))
723 evlist->cpus = cpu_map__dummy_new();
724 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
725 evlist->cpus = cpu_map__dummy_new();
727 evlist->cpus = cpu_map__new(target->cpu_list);
729 if (evlist->cpus == NULL)
730 goto out_delete_threads;
735 thread_map__delete(evlist->threads);
739 void perf_evlist__delete_maps(struct perf_evlist *evlist)
741 cpu_map__delete(evlist->cpus);
742 thread_map__delete(evlist->threads);
744 evlist->threads = NULL;
747 int perf_evlist__apply_filters(struct perf_evlist *evlist)
749 struct perf_evsel *evsel;
751 const int ncpus = cpu_map__nr(evlist->cpus),
752 nthreads = thread_map__nr(evlist->threads);
754 list_for_each_entry(evsel, &evlist->entries, node) {
755 if (evsel->filter == NULL)
758 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
766 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
768 struct perf_evsel *evsel;
770 const int ncpus = cpu_map__nr(evlist->cpus),
771 nthreads = thread_map__nr(evlist->threads);
773 list_for_each_entry(evsel, &evlist->entries, node) {
774 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
782 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
784 struct perf_evsel *pos;
786 if (evlist->nr_entries == 1)
789 if (evlist->id_pos < 0 || evlist->is_pos < 0)
792 list_for_each_entry(pos, &evlist->entries, node) {
793 if (pos->id_pos != evlist->id_pos ||
794 pos->is_pos != evlist->is_pos)
801 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
803 struct perf_evsel *evsel;
805 if (evlist->combined_sample_type)
806 return evlist->combined_sample_type;
808 list_for_each_entry(evsel, &evlist->entries, node)
809 evlist->combined_sample_type |= evsel->attr.sample_type;
811 return evlist->combined_sample_type;
814 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
816 evlist->combined_sample_type = 0;
817 return __perf_evlist__combined_sample_type(evlist);
820 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
822 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
823 u64 read_format = first->attr.read_format;
824 u64 sample_type = first->attr.sample_type;
826 list_for_each_entry_continue(pos, &evlist->entries, node) {
827 if (read_format != pos->attr.read_format)
831 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
832 if ((sample_type & PERF_SAMPLE_READ) &&
833 !(read_format & PERF_FORMAT_ID)) {
840 u64 perf_evlist__read_format(struct perf_evlist *evlist)
842 struct perf_evsel *first = perf_evlist__first(evlist);
843 return first->attr.read_format;
846 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
848 struct perf_evsel *first = perf_evlist__first(evlist);
849 struct perf_sample *data;
853 if (!first->attr.sample_id_all)
856 sample_type = first->attr.sample_type;
858 if (sample_type & PERF_SAMPLE_TID)
859 size += sizeof(data->tid) * 2;
861 if (sample_type & PERF_SAMPLE_TIME)
862 size += sizeof(data->time);
864 if (sample_type & PERF_SAMPLE_ID)
865 size += sizeof(data->id);
867 if (sample_type & PERF_SAMPLE_STREAM_ID)
868 size += sizeof(data->stream_id);
870 if (sample_type & PERF_SAMPLE_CPU)
871 size += sizeof(data->cpu) * 2;
873 if (sample_type & PERF_SAMPLE_IDENTIFIER)
874 size += sizeof(data->id);
879 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
881 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
883 list_for_each_entry_continue(pos, &evlist->entries, node) {
884 if (first->attr.sample_id_all != pos->attr.sample_id_all)
891 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
893 struct perf_evsel *first = perf_evlist__first(evlist);
894 return first->attr.sample_id_all;
897 void perf_evlist__set_selected(struct perf_evlist *evlist,
898 struct perf_evsel *evsel)
900 evlist->selected = evsel;
903 void perf_evlist__close(struct perf_evlist *evlist)
905 struct perf_evsel *evsel;
906 int ncpus = cpu_map__nr(evlist->cpus);
907 int nthreads = thread_map__nr(evlist->threads);
909 list_for_each_entry_reverse(evsel, &evlist->entries, node)
910 perf_evsel__close(evsel, ncpus, nthreads);
913 int perf_evlist__open(struct perf_evlist *evlist)
915 struct perf_evsel *evsel;
918 list_for_each_entry(evsel, &evlist->entries, node) {
919 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
926 perf_evlist__close(evlist);
931 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
932 struct perf_target *target,
933 const char *argv[], bool pipe_output,
936 int child_ready_pipe[2], go_pipe[2];
939 if (pipe(child_ready_pipe) < 0) {
940 perror("failed to create 'ready' pipe");
944 if (pipe(go_pipe) < 0) {
945 perror("failed to create 'go' pipe");
946 goto out_close_ready_pipe;
949 evlist->workload.pid = fork();
950 if (evlist->workload.pid < 0) {
951 perror("failed to fork");
952 goto out_close_pipes;
955 if (!evlist->workload.pid) {
959 signal(SIGTERM, SIG_DFL);
961 close(child_ready_pipe[0]);
963 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
966 * Tell the parent we're ready to go
968 close(child_ready_pipe[1]);
971 * Wait until the parent tells us to go.
973 if (read(go_pipe[0], &bf, 1) == -1)
974 perror("unable to read pipe");
976 execvp(argv[0], (char **)argv);
980 kill(getppid(), SIGUSR1);
984 if (perf_target__none(target))
985 evlist->threads->map[0] = evlist->workload.pid;
987 close(child_ready_pipe[1]);
990 * wait for child to settle
992 if (read(child_ready_pipe[0], &bf, 1) == -1) {
993 perror("unable to read pipe");
994 goto out_close_pipes;
997 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
998 evlist->workload.cork_fd = go_pipe[1];
999 close(child_ready_pipe[0]);
1005 out_close_ready_pipe:
1006 close(child_ready_pipe[0]);
1007 close(child_ready_pipe[1]);
1011 int perf_evlist__start_workload(struct perf_evlist *evlist)
1013 if (evlist->workload.cork_fd > 0) {
1017 * Remove the cork, let it rip!
1019 ret = write(evlist->workload.cork_fd, &bf, 1);
1021 perror("enable to write to pipe");
1023 close(evlist->workload.cork_fd);
1030 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1031 struct perf_sample *sample)
1033 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1037 return perf_evsel__parse_sample(evsel, event, sample);
1040 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1042 struct perf_evsel *evsel;
1045 list_for_each_entry(evsel, &evlist->entries, node) {
1046 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1047 perf_evsel__name(evsel));
1050 return printed + fprintf(fp, "\n");;