]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - include/linux/percpu.h
Merge tag 'v2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[mv-sheeva.git] / include / linux / percpu.h
index 5095b834a6fb52f1f746257805f34cdabf186528..27c3c6fcfad321a4915cba6b3ab6162cb10c19a1 100644 (file)
@@ -240,6 +240,21 @@ extern void __bad_size_call_parameter(void);
        pscr_ret__;                                                     \
 })
 
+#define __pcpu_size_call_return2(stem, variable, ...)                  \
+({                                                                     \
+       typeof(variable) pscr2_ret__;                                   \
+       __verify_pcpu_ptr(&(variable));                                 \
+       switch(sizeof(variable)) {                                      \
+       case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;    \
+       case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;    \
+       case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;    \
+       case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;    \
+       default:                                                        \
+               __bad_size_call_parameter(); break;                     \
+       }                                                               \
+       pscr2_ret__;                                                    \
+})
+
 #define __pcpu_size_call(stem, variable, ...)                          \
 do {                                                                   \
        __verify_pcpu_ptr(&(variable));                                 \
@@ -402,6 +417,89 @@ do {                                                                       \
 # define this_cpu_xor(pcp, val)                __pcpu_size_call(this_cpu_or_, (pcp), (val))
 #endif
 
+#define _this_cpu_generic_add_return(pcp, val)                         \
+({                                                                     \
+       typeof(pcp) ret__;                                              \
+       preempt_disable();                                              \
+       __this_cpu_add(pcp, val);                                       \
+       ret__ = __this_cpu_read(pcp);                                   \
+       preempt_enable();                                               \
+       ret__;                                                          \
+})
+
+#ifndef this_cpu_add_return
+# ifndef this_cpu_add_return_1
+#  define this_cpu_add_return_1(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_2
+#  define this_cpu_add_return_2(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_4
+#  define this_cpu_add_return_4(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_8
+#  define this_cpu_add_return_8(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define this_cpu_sub_return(pcp, val)  this_cpu_add_return(pcp, -(val))
+#define this_cpu_inc_return(pcp)       this_cpu_add_return(pcp, 1)
+#define this_cpu_dec_return(pcp)       this_cpu_add_return(pcp, -1)
+
+#define _this_cpu_generic_xchg(pcp, nval)                              \
+({     typeof(pcp) ret__;                                              \
+       preempt_disable();                                              \
+       ret__ = __this_cpu_read(pcp);                                   \
+       __this_cpu_write(pcp, nval);                                    \
+       preempt_enable();                                               \
+       ret__;                                                          \
+})
+
+#ifndef this_cpu_xchg
+# ifndef this_cpu_xchg_1
+#  define this_cpu_xchg_1(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_2
+#  define this_cpu_xchg_2(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_4
+#  define this_cpu_xchg_4(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_8
+#  define this_cpu_xchg_8(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
+# endif
+# define this_cpu_xchg(pcp, nval)      \
+       __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define _this_cpu_generic_cmpxchg(pcp, oval, nval)                     \
+({     typeof(pcp) ret__;                                              \
+       preempt_disable();                                              \
+       ret__ = __this_cpu_read(pcp);                                   \
+       if (ret__ == (oval))                                            \
+               __this_cpu_write(pcp, nval);                            \
+       preempt_enable();                                               \
+       ret__;                                                          \
+})
+
+#ifndef this_cpu_cmpxchg
+# ifndef this_cpu_cmpxchg_1
+#  define this_cpu_cmpxchg_1(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_2
+#  define this_cpu_cmpxchg_2(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_4
+#  define this_cpu_cmpxchg_4(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_8
+#  define this_cpu_cmpxchg_8(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define this_cpu_cmpxchg(pcp, oval, nval)     \
+       __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
 /*
  * Generic percpu operations that do not require preemption handling.
  * Either we do not care about races or the caller has the
@@ -529,11 +627,87 @@ do {                                                                      \
 # define __this_cpu_xor(pcp, val)      __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
 #endif
 
+#define __this_cpu_generic_add_return(pcp, val)                                \
+({                                                                     \
+       __this_cpu_add(pcp, val);                                       \
+       __this_cpu_read(pcp);                                           \
+})
+
+#ifndef __this_cpu_add_return
+# ifndef __this_cpu_add_return_1
+#  define __this_cpu_add_return_1(pcp, val)    __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_2
+#  define __this_cpu_add_return_2(pcp, val)    __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_4
+#  define __this_cpu_add_return_4(pcp, val)    __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_8
+#  define __this_cpu_add_return_8(pcp, val)    __this_cpu_generic_add_return(pcp, val)
+# endif
+# define __this_cpu_add_return(pcp, val)       __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define __this_cpu_sub_return(pcp, val)        this_cpu_add_return(pcp, -(val))
+#define __this_cpu_inc_return(pcp)     this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp)     this_cpu_add_return(pcp, -1)
+
+#define __this_cpu_generic_xchg(pcp, nval)                             \
+({     typeof(pcp) ret__;                                              \
+       ret__ = __this_cpu_read(pcp);                                   \
+       __this_cpu_write(pcp, nval);                                    \
+       ret__;                                                          \
+})
+
+#ifndef __this_cpu_xchg
+# ifndef __this_cpu_xchg_1
+#  define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_2
+#  define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_4
+#  define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_8
+#  define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# define __this_cpu_xchg(pcp, nval)    \
+       __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define __this_cpu_generic_cmpxchg(pcp, oval, nval)                    \
+({                                                                     \
+       typeof(pcp) ret__;                                              \
+       ret__ = __this_cpu_read(pcp);                                   \
+       if (ret__ == (oval))                                            \
+               __this_cpu_write(pcp, nval);                            \
+       ret__;                                                          \
+})
+
+#ifndef __this_cpu_cmpxchg
+# ifndef __this_cpu_cmpxchg_1
+#  define __this_cpu_cmpxchg_1(pcp, oval, nval)        __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_2
+#  define __this_cpu_cmpxchg_2(pcp, oval, nval)        __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_4
+#  define __this_cpu_cmpxchg_4(pcp, oval, nval)        __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_8
+#  define __this_cpu_cmpxchg_8(pcp, oval, nval)        __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define __this_cpu_cmpxchg(pcp, oval, nval)   \
+       __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
 /*
  * IRQ safe versions of the per cpu RMW operations. Note that these operations
  * are *not* safe against modification of the same variable from another
  * processors (which one gets when using regular atomic operations)
. They are guaranteed to be atomic vs. local interrupts and
* They are guaranteed to be atomic vs. local interrupts and
  * preemption only.
  */
 #define irqsafe_cpu_generic_to_op(pcp, val, op)                                \
@@ -620,4 +794,33 @@ do {                                                                       \
 # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
 #endif
 
+#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)                   \
+({                                                                     \
+       typeof(pcp) ret__;                                              \
+       unsigned long flags;                                            \
+       local_irq_save(flags);                                          \
+       ret__ = __this_cpu_read(pcp);                                   \
+       if (ret__ == (oval))                                            \
+               __this_cpu_write(pcp, nval);                            \
+       local_irq_restore(flags);                                       \
+       ret__;                                                          \
+})
+
+#ifndef irqsafe_cpu_cmpxchg
+# ifndef irqsafe_cpu_cmpxchg_1
+#  define irqsafe_cpu_cmpxchg_1(pcp, oval, nval)       irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_2
+#  define irqsafe_cpu_cmpxchg_2(pcp, oval, nval)       irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_4
+#  define irqsafe_cpu_cmpxchg_4(pcp, oval, nval)       irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_8
+#  define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)       irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define irqsafe_cpu_cmpxchg(pcp, oval, nval)          \
+       __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
+#endif
+
 #endif /* __LINUX_PERCPU_H */