From: Thomas Gleixner Date: Sat, 25 Jul 2009 14:21:48 +0000 (+0200) Subject: locking, percpu_counter: Annotate ::lock as raw X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=f032a450812f6c7edd532772cc7c48091bca9f27;p=linux-beck.git locking, percpu_counter: Annotate ::lock as raw The percpu_counter::lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 5edc9014263a..b9df9ed1adc0 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -16,7 +16,7 @@ #ifdef CONFIG_SMP struct percpu_counter { - spinlock_t lock; + raw_spinlock_t lock; s64 count; #ifdef CONFIG_HOTPLUG_CPU struct list_head list; /* All percpu_counters are on a list */ diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 28f2c33c6b53..f087105ed914 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -59,13 +59,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { int cpu; - spin_lock(&fbc->lock); + raw_spin_lock(&fbc->lock); for_each_possible_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); *pcount = 0; } fbc->count = amount; - spin_unlock(&fbc->lock); + raw_spin_unlock(&fbc->lock); } EXPORT_SYMBOL(percpu_counter_set); @@ -76,10 +76,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) preempt_disable(); count = __this_cpu_read(*fbc->counters) + amount; if (count >= batch || count <= -batch) { - spin_lock(&fbc->lock); + raw_spin_lock(&fbc->lock); fbc->count += count; __this_cpu_write(*fbc->counters, 0); - spin_unlock(&fbc->lock); + raw_spin_unlock(&fbc->lock); } else { __this_cpu_write(*fbc->counters, count); } @@ -96,13 +96,13 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) s64 ret; int cpu; - spin_lock(&fbc->lock); + raw_spin_lock(&fbc->lock); ret = fbc->count; for_each_online_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; } - spin_unlock(&fbc->lock); + raw_spin_unlock(&fbc->lock); return ret; } EXPORT_SYMBOL(__percpu_counter_sum); @@ -110,7 +110,7 @@ EXPORT_SYMBOL(__percpu_counter_sum); int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, struct lock_class_key *key) { - spin_lock_init(&fbc->lock); + raw_spin_lock_init(&fbc->lock); lockdep_set_class(&fbc->lock, key); fbc->count = amount; fbc->counters = alloc_percpu(s32); @@ -173,11 +173,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, s32 *pcount; unsigned long flags; - spin_lock_irqsave(&fbc->lock, flags); + raw_spin_lock_irqsave(&fbc->lock, flags); pcount = per_cpu_ptr(fbc->counters, cpu); fbc->count += *pcount; *pcount = 0; - spin_unlock_irqrestore(&fbc->lock, flags); + raw_spin_unlock_irqrestore(&fbc->lock, flags); } mutex_unlock(&percpu_counters_lock); #endif