-#ifndef _ARCH_LOCAL_H
-#define _ARCH_LOCAL_H
+#ifndef ASM_X86__LOCAL_H
+#define ASM_X86__LOCAL_H
#include <linux/percpu.h>
#include <asm/atomic.h>
#include <asm/asm.h>
-typedef struct
-{
+typedef struct {
atomic_long_t a;
} local_t;
#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
#define local_read(l) atomic_long_read(&(l)->a)
-#define local_set(l,i) atomic_long_set(&(l)->a, (i))
+#define local_set(l, i) atomic_long_set(&(l)->a, (i))
static inline void local_inc(local_t *l)
{
- __asm__ __volatile__(
- _ASM_INC "%0"
- :"+m" (l->a.counter));
+ asm volatile(_ASM_INC "%0"
+ : "+m" (l->a.counter));
}
static inline void local_dec(local_t *l)
{
- __asm__ __volatile__(
- _ASM_DEC "%0"
- :"+m" (l->a.counter));
+ asm volatile(_ASM_DEC "%0"
+ : "+m" (l->a.counter));
}
static inline void local_add(long i, local_t *l)
{
- __asm__ __volatile__(
- _ASM_ADD "%1,%0"
- :"+m" (l->a.counter)
- :"ir" (i));
+ asm volatile(_ASM_ADD "%1,%0"
+ : "+m" (l->a.counter)
+ : "ir" (i));
}
static inline void local_sub(long i, local_t *l)
{
- __asm__ __volatile__(
- _ASM_SUB "%1,%0"
- :"+m" (l->a.counter)
- :"ir" (i));
+ asm volatile(_ASM_SUB "%1,%0"
+ : "+m" (l->a.counter)
+ : "ir" (i));
}
/**
{
unsigned char c;
- __asm__ __volatile__(
- _ASM_SUB "%2,%0; sete %1"
- :"+m" (l->a.counter), "=qm" (c)
- :"ir" (i) : "memory");
+ asm volatile(_ASM_SUB "%2,%0; sete %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}
{
unsigned char c;
- __asm__ __volatile__(
- _ASM_DEC "%0; sete %1"
- :"+m" (l->a.counter), "=qm" (c)
- : : "memory");
+ asm volatile(_ASM_DEC "%0; sete %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
return c != 0;
}
{
unsigned char c;
- __asm__ __volatile__(
- _ASM_INC "%0; sete %1"
- :"+m" (l->a.counter), "=qm" (c)
- : : "memory");
+ asm volatile(_ASM_INC "%0; sete %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
return c != 0;
}
{
unsigned char c;
- __asm__ __volatile__(
- _ASM_ADD "%2,%0; sets %1"
- :"+m" (l->a.counter), "=qm" (c)
- :"ir" (i) : "memory");
+ asm volatile(_ASM_ADD "%2,%0; sets %1"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
return c;
}
long __i;
#ifdef CONFIG_M386
unsigned long flags;
- if(unlikely(boot_cpu_data.x86 <= 3))
+ if (unlikely(boot_cpu_data.x86 <= 3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
- __asm__ __volatile__(
- _ASM_XADD "%0, %1;"
- :"+r" (i), "+m" (l->a.counter)
- : : "memory");
+ asm volatile(_ASM_XADD "%0, %1;"
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
return i + __i;
#ifdef CONFIG_M386
static inline long local_sub_return(long i, local_t *l)
{
- return local_add_return(-i,l);
+ return local_add_return(-i, l);
}
-#define local_inc_return(l) (local_add_return(1,l))
-#define local_dec_return(l) (local_sub_return(1,l))
+#define local_inc_return(l) (local_add_return(1, l))
+#define local_dec_return(l) (local_sub_return(1, l))
#define local_cmpxchg(l, o, n) \
(cmpxchg_local(&((l)->a.counter), (o), (n)))
#define local_add_unless(l, a, u) \
({ \
long c, old; \
- c = local_read(l); \
+ c = local_read((l)); \
for (;;) { \
if (unlikely(c == (u))) \
break; \
- old = local_cmpxchg((l), c, c + (a)); \
+ old = local_cmpxchg((l), c, c + (a)); \
if (likely(old == c)) \
break; \
c = old; \
*/
#define __local_inc(l) local_inc(l)
#define __local_dec(l) local_dec(l)
-#define __local_add(i,l) local_add((i),(l))
-#define __local_sub(i,l) local_sub((i),(l))
+#define __local_add(i, l) local_add((i), (l))
+#define __local_sub(i, l) local_sub((i), (l))
/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
+#define cpu_local_wrap_v(l) \
+({ \
+ local_t res__; \
+ preempt_disable(); \
+ res__ = (l); \
+ preempt_enable(); \
+ res__; \
+})
#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
+({ \
+ preempt_disable(); \
+ (l); \
+ preempt_enable(); \
+}) \
+
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
+
+#define __cpu_local_inc(l) cpu_local_inc((l))
+#define __cpu_local_dec(l) cpu_local_dec((l))
#define __cpu_local_add(i, l) cpu_local_add((i), (l))
#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-#endif /* _ARCH_LOCAL_H */
+#endif /* ASM_X86__LOCAL_H */