1 #ifndef _ALPHA_BITOPS_H
2 #define _ALPHA_BITOPS_H
4 #include <asm/compiler.h>
5 #include <asm/barrier.h>
8 * Copyright 1994, Linus Torvalds.
12 * These have to be done with inline assembly: that way the bit-setting
13 * is guaranteed to be atomic. All bit operations return 0 if the bit
14 * was cleared before the operation and != 0 if it was not.
16 * To get proper branch prediction for the main line, we must branch
17 * forward to code at the end of this object's .text section, then
18 * branch back to restart the operation.
20 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
24 set_bit(unsigned long nr, volatile void * addr)
27 int *m = ((int *) addr) + (nr >> 5);
37 :"=&r" (temp), "=m" (*m)
38 :"Ir" (1UL << (nr & 31)), "m" (*m));
42 * WARNING: non atomic version.
45 __set_bit(unsigned long nr, volatile void * addr)
47 int *m = ((int *) addr) + (nr >> 5);
52 #define smp_mb__before_clear_bit() smp_mb()
53 #define smp_mb__after_clear_bit() smp_mb()
56 clear_bit(unsigned long nr, volatile void * addr)
59 int *m = ((int *) addr) + (nr >> 5);
69 :"=&r" (temp), "=m" (*m)
70 :"Ir" (1UL << (nr & 31)), "m" (*m));
74 clear_bit_unlock(unsigned long nr, volatile void * addr)
81 * WARNING: non atomic version.
83 static __inline__ void
84 __clear_bit(unsigned long nr, volatile void * addr)
86 int *m = ((int *) addr) + (nr >> 5);
88 *m &= ~(1 << (nr & 31));
92 __clear_bit_unlock(unsigned long nr, volatile void * addr)
95 __clear_bit(nr, addr);
99 change_bit(unsigned long nr, volatile void * addr)
102 int *m = ((int *) addr) + (nr >> 5);
104 __asm__ __volatile__(
112 :"=&r" (temp), "=m" (*m)
113 :"Ir" (1UL << (nr & 31)), "m" (*m));
117 * WARNING: non atomic version.
119 static __inline__ void
120 __change_bit(unsigned long nr, volatile void * addr)
122 int *m = ((int *) addr) + (nr >> 5);
124 *m ^= 1 << (nr & 31);
128 test_and_set_bit(unsigned long nr, volatile void *addr)
130 unsigned long oldbit;
132 int *m = ((int *) addr) + (nr >> 5);
134 __asm__ __volatile__(
151 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
152 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
158 test_and_set_bit_lock(unsigned long nr, volatile void *addr)
160 unsigned long oldbit;
162 int *m = ((int *) addr) + (nr >> 5);
164 __asm__ __volatile__(
178 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
179 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
185 * WARNING: non atomic version.
188 __test_and_set_bit(unsigned long nr, volatile void * addr)
190 unsigned long mask = 1 << (nr & 0x1f);
191 int *m = ((int *) addr) + (nr >> 5);
195 return (old & mask) != 0;
199 test_and_clear_bit(unsigned long nr, volatile void * addr)
201 unsigned long oldbit;
203 int *m = ((int *) addr) + (nr >> 5);
205 __asm__ __volatile__(
222 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
223 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
229 * WARNING: non atomic version.
232 __test_and_clear_bit(unsigned long nr, volatile void * addr)
234 unsigned long mask = 1 << (nr & 0x1f);
235 int *m = ((int *) addr) + (nr >> 5);
239 return (old & mask) != 0;
243 test_and_change_bit(unsigned long nr, volatile void * addr)
245 unsigned long oldbit;
247 int *m = ((int *) addr) + (nr >> 5);
249 __asm__ __volatile__(
264 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
265 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
271 * WARNING: non atomic version.
273 static __inline__ int
274 __test_and_change_bit(unsigned long nr, volatile void * addr)
276 unsigned long mask = 1 << (nr & 0x1f);
277 int *m = ((int *) addr) + (nr >> 5);
281 return (old & mask) != 0;
285 test_bit(int nr, const volatile void * addr)
287 return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
291 * ffz = Find First Zero in word. Undefined if no zero exists,
292 * so code should check against ~0UL first..
294 * Do a binary search on the bits. Due to the nature of large
295 * constants on the alpha, it is worthwhile to split the search.
297 static inline unsigned long ffz_b(unsigned long x)
299 unsigned long sum, x1, x2, x4;
301 x = ~x & -~x; /* set first 0 bit, clear others */
306 sum += (x4 != 0) * 4;
312 static inline unsigned long ffz(unsigned long word)
314 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
315 /* Whee. EV67 can calculate it directly. */
316 return __kernel_cttz(~word);
318 unsigned long bits, qofs, bofs;
320 bits = __kernel_cmpbge(word, ~0UL);
322 bits = __kernel_extbl(word, qofs);
325 return qofs*8 + bofs;
330 * __ffs = Find First set bit in word. Undefined if no set bit exists.
332 static inline unsigned long __ffs(unsigned long word)
334 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
335 /* Whee. EV67 can calculate it directly. */
336 return __kernel_cttz(word);
338 unsigned long bits, qofs, bofs;
340 bits = __kernel_cmpbge(0, word);
342 bits = __kernel_extbl(word, qofs);
345 return qofs*8 + bofs;
352 * ffs: find first bit set. This is defined the same way as
353 * the libc and compiler builtin ffs routines, therefore
354 * differs in spirit from the above __ffs.
357 static inline int ffs(int word)
359 int result = __ffs(word) + 1;
360 return word ? result : 0;
364 * fls: find last bit set.
366 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
367 static inline int fls64(unsigned long word)
369 return 64 - __kernel_ctlz(word);
372 extern const unsigned char __flsm1_tab[256];
374 static inline int fls64(unsigned long x)
376 unsigned long t, a, r;
378 t = __kernel_cmpbge (x, 0x0101010101010101UL);
380 t = __kernel_extbl (x, a);
381 r = a*8 + __flsm1_tab[t] + (x != 0);
387 static inline int fls(int x)
389 return fls64((unsigned int) x);
393 * hweightN: returns the hamming weight (i.e. the number
394 * of bits set) of a N-bit word
397 #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
398 /* Whee. EV67 can calculate it directly. */
399 static inline unsigned long hweight64(unsigned long w)
401 return __kernel_ctpop(w);
404 static inline unsigned int hweight32(unsigned int w)
409 static inline unsigned int hweight16(unsigned int w)
411 return hweight64(w & 0xffff);
414 static inline unsigned int hweight8(unsigned int w)
416 return hweight64(w & 0xff);
419 #include <asm-generic/bitops/hweight.h>
422 #endif /* __KERNEL__ */
424 #include <asm-generic/bitops/find.h>
429 * Every architecture must define this function. It's the fastest
430 * way of searching a 140-bit bitmap where the first 100 bits are
431 * unlikely to be set. It's guaranteed that at least one of the 140
434 static inline unsigned long
435 sched_find_first_bit(unsigned long b[3])
437 unsigned long b0 = b[0], b1 = b[1], b2 = b[2];
440 ofs = (b1 ? 64 : 128);
442 ofs = (b0 ? 0 : ofs);
445 return __ffs(b0) + ofs;
448 #include <asm-generic/bitops/ext2-non-atomic.h>
450 #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
451 #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
453 #include <asm-generic/bitops/minix.h>
455 #endif /* __KERNEL__ */
457 #endif /* _ALPHA_BITOPS_H */