* done for const @nr, but no code is generated due to gcc \
* const prop. \
*/ \
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
+ nr &= 0x1f; \
\
__asm__ __volatile__( \
"1: llock %0, [%1] \n" \
\
m += nr >> 5; \
\
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
+ nr &= 0x1f; \
\
/* \
* Explicit full memory barrier needed before/after as \
unsigned long temp, flags; \
m += nr >> 5; \
\
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
- \
/* \
* spin lock/unlock provide the needed smp_mb() before/after \
*/ \
bitops_lock(flags); \
\
temp = *m; \
- *m = temp c_op (1UL << nr); \
+ *m = temp c_op (1UL << (nr & 0x1f)); \
\
bitops_unlock(flags); \
}
unsigned long old, flags; \
m += nr >> 5; \
\
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
- \
bitops_lock(flags); \
\
old = *m; \
- *m = old c_op (1 << nr); \
+ *m = old c_op (1UL << (nr & 0x1f)); \
\
bitops_unlock(flags); \
\
- return (old & (1 << nr)) != 0; \
+ return (old & (1UL << (nr & 0x1f))) != 0; \
}
#endif /* CONFIG_ARC_HAS_LLSC */
unsigned long temp; \
m += nr >> 5; \
\
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
- \
temp = *m; \
- *m = temp c_op (1UL << nr); \
+ *m = temp c_op (1UL << (nr & 0x1f)); \
}
#define __TEST_N_BIT_OP(op, c_op, asm_op) \
unsigned long old; \
m += nr >> 5; \
\
- if (__builtin_constant_p(nr)) \
- nr &= 0x1f; \
- \
old = *m; \
- *m = old c_op (1 << nr); \
+ *m = old c_op (1UL << (nr & 0x1f)); \
\
- return (old & (1 << nr)) != 0; \
+ return (old & (1UL << (nr & 0x1f))) != 0; \
}
#define BIT_OPS(op, c_op, asm_op) \
addr += nr >> 5;
- if (__builtin_constant_p(nr))
- nr &= 0x1f;
-
- mask = 1 << nr;
+ mask = 1UL << (nr & 0x1f);
return ((mask & *addr) != 0);
}