#define PCPU_REF_DYING 2
#define PCPU_REF_DEAD 3
-#define REF_STATUS(count) ((unsigned long) count & PCPU_STATUS_MASK)
+#define REF_STATUS(count) (count & PCPU_STATUS_MASK)
void percpu_ref_init(struct percpu_ref *ref)
{
now <<= PCPU_STATUS_BITS;
now |= PCPU_REF_NONE;
- ref->pcpu_count = (void *) now;
+ ref->pcpu_count = now;
}
-static void percpu_ref_alloc(struct percpu_ref *ref, unsigned __user *pcpu_count)
+static void percpu_ref_alloc(struct percpu_ref *ref, unsigned long pcpu_count)
{
- unsigned __percpu *new;
- unsigned long last = (unsigned long) pcpu_count;
- unsigned long now = jiffies;
+ unsigned long new, now = jiffies;
now <<= PCPU_STATUS_BITS;
now |= PCPU_REF_NONE;
- if (now - last <= HZ << PCPU_STATUS_BITS) {
+ if (now - pcpu_count <= HZ << PCPU_STATUS_BITS) {
rcu_read_unlock();
- new = alloc_percpu(unsigned);
+ new = (unsigned long) alloc_percpu(unsigned);
rcu_read_lock();
if (!new)
goto update_time;
- BUG_ON(((unsigned long) new) & PCPU_STATUS_MASK);
+ BUG_ON(new & PCPU_STATUS_MASK);
if (cmpxchg(&ref->pcpu_count, pcpu_count, new) != pcpu_count)
- free_percpu(new);
+ free_percpu((void __percpu *) new);
else
pr_debug("created");
} else {
-update_time: new = (void *) now;
+update_time: new = now;
cmpxchg(&ref->pcpu_count, pcpu_count, new);
}
}
void __percpu_ref_get(struct percpu_ref *ref, bool alloc)
{
- unsigned __percpu *pcpu_count;
+ unsigned long pcpu_count;
uint64_t v;
- pcpu_count = rcu_dereference(ref->pcpu_count);
+ pcpu_count = ACCESS_ONCE(ref->pcpu_count);
if (REF_STATUS(pcpu_count) == PCPU_REF_PTR) {
- __this_cpu_inc(*pcpu_count);
+ /* for rcu - we're not using rcu_dereference() */
+ smp_read_barrier_depends();
+ __this_cpu_inc(*((unsigned __percpu *) pcpu_count));
} else {
v = atomic64_add_return(1 + (1ULL << PCPU_COUNT_BITS),
&ref->count);
int percpu_ref_put(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long pcpu_count;
uint64_t v;
int ret = 0;
rcu_read_lock();
- pcpu_count = rcu_dereference(ref->pcpu_count);
+ pcpu_count = ACCESS_ONCE(ref->pcpu_count);
switch (REF_STATUS(pcpu_count)) {
case PCPU_REF_PTR:
- __this_cpu_dec(*pcpu_count);
+ /* for rcu - we're not using rcu_dereference() */
+ smp_read_barrier_depends();
+ __this_cpu_dec(*((unsigned __percpu *) pcpu_count));
break;
case PCPU_REF_NONE:
case PCPU_REF_DYING:
int percpu_ref_kill(struct percpu_ref *ref)
{
- unsigned __percpu *old, *new, *pcpu_count = ref->pcpu_count;
- unsigned long status;
+ unsigned long old, new, status, pcpu_count;
+
+ pcpu_count = ACCESS_ONCE(ref->pcpu_count);
do {
status = REF_STATUS(pcpu_count);
switch (status) {
case PCPU_REF_PTR:
- new = (void *) PCPU_REF_DYING;
+ new = PCPU_REF_DYING;
break;
case PCPU_REF_NONE:
- new = (void *) PCPU_REF_DEAD;
+ new = PCPU_REF_DEAD;
break;
case PCPU_REF_DYING:
case PCPU_REF_DEAD:
synchronize_rcu();
for_each_possible_cpu(cpu)
- count += *per_cpu_ptr(pcpu_count, cpu);
+ count += *per_cpu_ptr((unsigned __percpu *) pcpu_count, cpu);
pr_debug("global %lli pcpu %i",
atomic64_read(&ref->count) & PCPU_COUNT_MASK,
atomic64_add((int) count, &ref->count);
smp_wmb();
/* Between setting global count and setting PCPU_REF_DEAD */
- ref->pcpu_count = (void *) PCPU_REF_DEAD;
+ ref->pcpu_count = PCPU_REF_DEAD;
- free_percpu(pcpu_count);
+ free_percpu((unsigned __percpu *) pcpu_count);
}
return 1;