/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!atomic_read(&md->refcnt))
+ if (!refcount_read(&md->refcnt))
return NULL;
head = perf_mmap__read_head(md);
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
- if (!atomic_read(&md->refcnt))
+ if (!refcount_read(&md->refcnt))
return NULL;
head = perf_mmap__read_head(md);
{
u64 head;
- if (!atomic_read(&md->refcnt))
+ if (!refcount_read(&md->refcnt))
return;
head = perf_mmap__read_head(md);
static void perf_mmap__get(struct perf_mmap *map)
{
- atomic_inc(&map->refcnt);
+ refcount_inc(&map->refcnt);
}
static void perf_mmap__put(struct perf_mmap *md)
{
- BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
+ BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
- if (atomic_dec_and_test(&md->refcnt))
+ if (refcount_dec_and_test(&md->refcnt))
perf_mmap__munmap(md);
}
perf_mmap__write_tail(md, old);
}
- if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
+ if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
perf_mmap__put(md);
}
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
map->fd = -1;
- atomic_set(&map->refcnt, 0);
+ refcount_set(&map->refcnt, 0);
}
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
if (!map)
return NULL;
- for (i = 0; i < evlist->nr_mmaps; i++)
+ for (i = 0; i < evlist->nr_mmaps; i++) {
map[i].fd = -1;
+ /*
+ * When the perf_mmap() call is made we grab one refcount, plus
+ * one extra to let perf_evlist__mmap_consume() get the last
+ * events after all real references (perf_mmap__get()) are
+ * dropped.
+ *
+ * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
+ * thus does perf_mmap__get() on it.
+ */
+ refcount_set(&map[i].refcnt, 0);
+ }
return map;
}
* evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd().
*/
- atomic_set(&map->refcnt, 2);
+ refcount_set(&map->refcnt, 2);
map->prev = 0;
map->mask = mp->mask;
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,