1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15 #include <linux/gfp.h>
19 struct percpu_counter {
22 #ifdef CONFIG_HOTPLUG_CPU
23 struct list_head list; /* All percpu_counters are on a list */
25 s32 __percpu *counters;
28 extern int percpu_counter_batch;
30 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
31 struct lock_class_key *key);
33 #define percpu_counter_init(fbc, value, gfp) \
35 static struct lock_class_key __key; \
37 __percpu_counter_init(fbc, value, gfp, &__key); \
40 void percpu_counter_destroy(struct percpu_counter *fbc);
41 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
42 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
43 s64 __percpu_counter_sum(struct percpu_counter *fbc);
44 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
46 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
48 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
51 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
53 __percpu_counter_add(fbc, amount, percpu_counter_batch);
56 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
58 s64 ret = __percpu_counter_sum(fbc);
59 return ret < 0 ? 0 : ret;
62 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
64 return __percpu_counter_sum(fbc);
67 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
73 * It is possible for the percpu_counter_read() to return a small negative
74 * number for some counter which should never be negative.
77 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
81 barrier(); /* Prevent reloads of fbc->count */
87 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
89 return (fbc->counters != NULL);
92 #else /* !CONFIG_SMP */
94 struct percpu_counter {
98 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
105 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
109 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
114 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
116 if (fbc->count > rhs)
118 else if (fbc->count < rhs)
125 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
127 return percpu_counter_compare(fbc, rhs);
131 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
134 fbc->count += amount;
139 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
141 percpu_counter_add(fbc, amount);
144 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
150 * percpu_counter is intended to track positive numbers. In the UP case the
151 * number should never be negative.
153 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
158 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
160 return percpu_counter_read_positive(fbc);
163 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
165 return percpu_counter_read(fbc);
168 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
173 #endif /* CONFIG_SMP */
175 static inline void percpu_counter_inc(struct percpu_counter *fbc)
177 percpu_counter_add(fbc, 1);
180 static inline void percpu_counter_dec(struct percpu_counter *fbc)
182 percpu_counter_add(fbc, -1);
185 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
187 percpu_counter_add(fbc, -amount);
190 #endif /* _LINUX_PERCPU_COUNTER_H */