2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
14 #include "thread_map.h"
16 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
18 void perf_evsel__init(struct perf_evsel *evsel,
19 struct perf_event_attr *attr, int idx)
23 INIT_LIST_HEAD(&evsel->node);
26 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
28 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
31 perf_evsel__init(evsel, attr, idx);
36 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
38 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
39 return evsel->fd != NULL ? 0 : -ENOMEM;
42 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
44 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
45 if (evsel->sample_id == NULL)
48 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
49 if (evsel->id == NULL) {
50 xyarray__delete(evsel->sample_id);
51 evsel->sample_id = NULL;
58 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
60 evsel->counts = zalloc((sizeof(*evsel->counts) +
61 (ncpus * sizeof(struct perf_counts_values))));
62 return evsel->counts != NULL ? 0 : -ENOMEM;
65 void perf_evsel__free_fd(struct perf_evsel *evsel)
67 xyarray__delete(evsel->fd);
71 void perf_evsel__free_id(struct perf_evsel *evsel)
73 xyarray__delete(evsel->sample_id);
74 evsel->sample_id = NULL;
79 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
83 for (cpu = 0; cpu < ncpus; cpu++)
84 for (thread = 0; thread < nthreads; ++thread) {
85 close(FD(evsel, cpu, thread));
86 FD(evsel, cpu, thread) = -1;
90 void perf_evsel__exit(struct perf_evsel *evsel)
92 assert(list_empty(&evsel->node));
93 xyarray__delete(evsel->fd);
94 xyarray__delete(evsel->sample_id);
98 void perf_evsel__delete(struct perf_evsel *evsel)
100 perf_evsel__exit(evsel);
101 close_cgroup(evsel->cgrp);
106 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
107 int cpu, int thread, bool scale)
109 struct perf_counts_values count;
110 size_t nv = scale ? 3 : 1;
112 if (FD(evsel, cpu, thread) < 0)
115 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
118 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
124 else if (count.run < count.ena)
125 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
127 count.ena = count.run = 0;
129 evsel->counts->cpu[cpu] = count;
133 int __perf_evsel__read(struct perf_evsel *evsel,
134 int ncpus, int nthreads, bool scale)
136 size_t nv = scale ? 3 : 1;
138 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
140 aggr->val = aggr->ena = aggr->run = 0;
142 for (cpu = 0; cpu < ncpus; cpu++) {
143 for (thread = 0; thread < nthreads; thread++) {
144 if (FD(evsel, cpu, thread) < 0)
147 if (readn(FD(evsel, cpu, thread),
148 &count, nv * sizeof(u64)) < 0)
151 aggr->val += count.val;
153 aggr->ena += count.ena;
154 aggr->run += count.run;
159 evsel->counts->scaled = 0;
161 if (aggr->run == 0) {
162 evsel->counts->scaled = -1;
167 if (aggr->run < aggr->ena) {
168 evsel->counts->scaled = 1;
169 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
172 aggr->ena = aggr->run = 0;
177 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
178 struct thread_map *threads, bool group)
181 unsigned long flags = 0;
184 if (evsel->fd == NULL &&
185 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
189 flags = PERF_FLAG_PID_CGROUP;
190 pid = evsel->cgrp->fd;
193 for (cpu = 0; cpu < cpus->nr; cpu++) {
196 for (thread = 0; thread < threads->nr; thread++) {
199 pid = threads->map[thread];
201 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
205 if (FD(evsel, cpu, thread) < 0)
208 if (group && group_fd == -1)
209 group_fd = FD(evsel, cpu, thread);
217 while (--thread >= 0) {
218 close(FD(evsel, cpu, thread));
219 FD(evsel, cpu, thread) = -1;
221 thread = threads->nr;
222 } while (--cpu >= 0);
235 struct thread_map map;
237 } empty_thread_map = {
242 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
243 struct thread_map *threads, bool group)
246 /* Work around old compiler warnings about strict aliasing */
247 cpus = &empty_cpu_map.map;
251 threads = &empty_thread_map.map;
253 return __perf_evsel__open(evsel, cpus, threads, group);
256 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
257 struct cpu_map *cpus, bool group)
259 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
262 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
263 struct thread_map *threads, bool group)
265 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
268 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
269 struct perf_sample *sample)
271 const u64 *array = event->sample.array;
273 array += ((event->header.size -
274 sizeof(event->header)) / sizeof(u64)) - 1;
276 if (type & PERF_SAMPLE_CPU) {
277 u32 *p = (u32 *)array;
282 if (type & PERF_SAMPLE_STREAM_ID) {
283 sample->stream_id = *array;
287 if (type & PERF_SAMPLE_ID) {
292 if (type & PERF_SAMPLE_TIME) {
293 sample->time = *array;
297 if (type & PERF_SAMPLE_TID) {
298 u32 *p = (u32 *)array;
306 static bool sample_overlap(const union perf_event *event,
307 const void *offset, u64 size)
309 const void *base = event;
311 if (offset + size > base + event->header.size)
317 int perf_event__parse_sample(const union perf_event *event, u64 type,
318 int sample_size, bool sample_id_all,
319 struct perf_sample *data)
323 data->cpu = data->pid = data->tid = -1;
324 data->stream_id = data->id = data->time = -1ULL;
326 if (event->header.type != PERF_RECORD_SAMPLE) {
329 return perf_event__parse_id_sample(event, type, data);
332 array = event->sample.array;
334 if (sample_size + sizeof(event->header) > event->header.size)
337 if (type & PERF_SAMPLE_IP) {
338 data->ip = event->ip.ip;
342 if (type & PERF_SAMPLE_TID) {
343 u32 *p = (u32 *)array;
349 if (type & PERF_SAMPLE_TIME) {
354 if (type & PERF_SAMPLE_ADDR) {
360 if (type & PERF_SAMPLE_ID) {
365 if (type & PERF_SAMPLE_STREAM_ID) {
366 data->stream_id = *array;
370 if (type & PERF_SAMPLE_CPU) {
371 u32 *p = (u32 *)array;
376 if (type & PERF_SAMPLE_PERIOD) {
377 data->period = *array;
381 if (type & PERF_SAMPLE_READ) {
382 fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
386 if (type & PERF_SAMPLE_CALLCHAIN) {
387 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
390 data->callchain = (struct ip_callchain *)array;
392 if (sample_overlap(event, array, data->callchain->nr))
395 array += 1 + data->callchain->nr;
398 if (type & PERF_SAMPLE_RAW) {
399 u32 *p = (u32 *)array;
401 if (sample_overlap(event, array, sizeof(u32)))
407 if (sample_overlap(event, p, data->raw_size))