7 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
9 struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx)
11 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
15 evsel->attr.type = type;
16 evsel->attr.config = config;
17 INIT_LIST_HEAD(&evsel->node);
23 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
25 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
26 return evsel->fd != NULL ? 0 : -ENOMEM;
29 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
31 evsel->counts = zalloc((sizeof(*evsel->counts) +
32 (ncpus * sizeof(struct perf_counts_values))));
33 return evsel->counts != NULL ? 0 : -ENOMEM;
36 void perf_evsel__free_fd(struct perf_evsel *evsel)
38 xyarray__delete(evsel->fd);
42 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
46 for (cpu = 0; cpu < ncpus; cpu++)
47 for (thread = 0; thread < nthreads; ++thread) {
48 close(FD(evsel, cpu, thread));
49 FD(evsel, cpu, thread) = -1;
53 void perf_evsel__delete(struct perf_evsel *evsel)
55 assert(list_empty(&evsel->node));
56 xyarray__delete(evsel->fd);
60 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
61 int cpu, int thread, bool scale)
63 struct perf_counts_values count;
64 size_t nv = scale ? 3 : 1;
66 if (FD(evsel, cpu, thread) < 0)
69 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
72 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
78 else if (count.run < count.ena)
79 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
81 count.ena = count.run = 0;
83 evsel->counts->cpu[cpu] = count;
87 int __perf_evsel__read(struct perf_evsel *evsel,
88 int ncpus, int nthreads, bool scale)
90 size_t nv = scale ? 3 : 1;
92 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
96 for (cpu = 0; cpu < ncpus; cpu++) {
97 for (thread = 0; thread < nthreads; thread++) {
98 if (FD(evsel, cpu, thread) < 0)
101 if (readn(FD(evsel, cpu, thread),
102 &count, nv * sizeof(u64)) < 0)
105 aggr->val += count.val;
107 aggr->ena += count.ena;
108 aggr->run += count.run;
113 evsel->counts->scaled = 0;
115 if (aggr->run == 0) {
116 evsel->counts->scaled = -1;
121 if (aggr->run < aggr->ena) {
122 evsel->counts->scaled = 1;
123 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
126 aggr->ena = aggr->run = 0;
131 int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
135 if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0)
138 for (cpu = 0; cpu < cpus->nr; cpu++) {
139 FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
140 cpus->map[cpu], -1, 0);
141 if (FD(evsel, cpu, 0) < 0)
149 close(FD(evsel, cpu, 0));
150 FD(evsel, cpu, 0) = -1;
155 int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
159 if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
162 for (thread = 0; thread < threads->nr; thread++) {
163 FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
164 threads->map[thread], -1, -1, 0);
165 if (FD(evsel, 0, thread) < 0)
172 while (--thread >= 0) {
173 close(FD(evsel, 0, thread));
174 FD(evsel, 0, thread) = -1;
179 int perf_evsel__open(struct perf_evsel *evsel,
180 struct cpu_map *cpus, struct thread_map *threads)
183 return perf_evsel__open_per_cpu(evsel, cpus);
185 return perf_evsel__open_per_thread(evsel, threads);