return evlist->mmap != NULL ? 0 : -ENOMEM;
}
-static int __perf_evlist__mmap(struct perf_evlist *evlist,
- int idx, int prot, int mask, int fd)
+struct mmap_params {
+ int prot;
+ int mask;
+};
+
+static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
+ struct mmap_params *mp, int fd)
{
evlist->mmap[idx].prev = 0;
- evlist->mmap[idx].mask = mask;
- evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
+ evlist->mmap[idx].mask = mp->mask;
+ evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
MAP_SHARED, fd, 0);
if (evlist->mmap[idx].base == MAP_FAILED) {
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
}
static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
- int prot, int mask, int cpu, int thread,
- int *output)
+ struct mmap_params *mp, int cpu,
+ int thread, int *output)
{
struct perf_evsel *evsel;
if (*output == -1) {
*output = fd;
- if (__perf_evlist__mmap(evlist, idx, prot, mask,
- *output) < 0)
+ if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
return -1;
} else {
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
return 0;
}
-static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
- int mask)
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
+ struct mmap_params *mp)
{
int cpu, thread;
int nr_cpus = cpu_map__nr(evlist->cpus);
int output = -1;
for (thread = 0; thread < nr_threads; thread++) {
- if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
- cpu, thread, &output))
+ if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
+ thread, &output))
goto out_unmap;
}
}
return -1;
}
-static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
- int mask)
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
+ struct mmap_params *mp)
{
int thread;
int nr_threads = thread_map__nr(evlist->threads);
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
- if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
- thread, &output))
+ if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
+ &output))
goto out_unmap;
}
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
const struct thread_map *threads = evlist->threads;
- int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
+ struct mmap_params mp = {
+ .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
+ };
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
return -ENOMEM;
evlist->overwrite = overwrite;
evlist->mmap_len = perf_evlist__mmap_size(pages);
pr_debug("mmap size %zuB\n", evlist->mmap_len);
- mask = evlist->mmap_len - page_size - 1;
+ mp.mask = evlist->mmap_len - page_size - 1;
evlist__for_each(evlist, evsel) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
}
if (cpu_map__empty(cpus))
- return perf_evlist__mmap_per_thread(evlist, prot, mask);
+ return perf_evlist__mmap_per_thread(evlist, &mp);
- return perf_evlist__mmap_per_cpu(evlist, prot, mask);
+ return perf_evlist__mmap_per_cpu(evlist, &mp);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)