3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 * Derived from "include/asm-i386/bitops.h"
7 * Copyright (C) 1992, Linus Torvalds
11 #ifndef _S390_BITOPS_H
12 #define _S390_BITOPS_H
14 #ifndef _LINUX_BITOPS_H
15 #error only <linux/bitops.h> can be included directly
18 #include <linux/compiler.h>
21 * 32 bit bitops format:
22 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
23 * bit 32 is the LSB of *(addr+4). That combined with the
24 * big endian byte order on S390 give the following bit
26 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
27 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
28 * after that follows the next long with bit numbers
29 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
30 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
31 * The reason for this bit ordering is the fact that
32 * in the architecture independent code bits operations
33 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
34 * with operation of the form "set_bit(bitnr, flags)".
36 * 64 bit bitops format:
37 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
38 * bit 64 is the LSB of *(addr+8). That combined with the
39 * big endian byte order on S390 give the following bit
41 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
42 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
43 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
44 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
45 * after that follows the next long with bit numbers
46 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
47 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
48 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
49 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
50 * The reason for this bit ordering is the fact that
51 * in the architecture independent code bits operations
52 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
53 * with operation of the form "set_bit(bitnr, flags)".
56 /* bitmap tables from arch/s390/kernel/bitmap.c */
57 extern const char _oi_bitmap[];
58 extern const char _ni_bitmap[];
59 extern const char _zb_findmap[];
60 extern const char _sb_findmap[];
64 #define __BITOPS_OR "or"
65 #define __BITOPS_AND "nr"
66 #define __BITOPS_XOR "xr"
68 #define __BITOPS_LOOP(__addr, __val, __op_string) \
70 unsigned long __old, __new; \
75 __op_string " %1,%3\n" \
78 : "=&d" (__old), "=&d" (__new), \
79 "=Q" (*(unsigned long *) __addr) \
80 : "d" (__val), "Q" (*(unsigned long *) __addr) \
85 #else /* CONFIG_64BIT */
87 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
89 #define __BITOPS_OR "laog"
90 #define __BITOPS_AND "lang"
91 #define __BITOPS_XOR "laxg"
93 #define __BITOPS_LOOP(__addr, __val, __op_string) \
95 unsigned long __old; \
98 __op_string " %0,%2,%1\n" \
99 : "=d" (__old), "+Q" (*(unsigned long *)__addr) \
105 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
107 #define __BITOPS_OR "ogr"
108 #define __BITOPS_AND "ngr"
109 #define __BITOPS_XOR "xgr"
111 #define __BITOPS_LOOP(__addr, __val, __op_string) \
113 unsigned long __old, __new; \
118 __op_string " %1,%3\n" \
121 : "=&d" (__old), "=&d" (__new), \
122 "=Q" (*(unsigned long *) __addr) \
123 : "d" (__val), "Q" (*(unsigned long *) __addr) \
128 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
130 #endif /* CONFIG_64BIT */
132 #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
136 * SMP safe set_bit routine based on compare and swap (CS)
138 static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
140 unsigned long addr, mask;
142 addr = (unsigned long) ptr;
143 /* calculate address for CS */
144 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
146 mask = 1UL << (nr & (BITS_PER_LONG - 1));
147 /* Do the atomic update. */
148 __BITOPS_LOOP(addr, mask, __BITOPS_OR);
152 * SMP safe clear_bit routine based on compare and swap (CS)
154 static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
156 unsigned long addr, mask;
158 addr = (unsigned long) ptr;
159 /* calculate address for CS */
160 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
162 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
163 /* Do the atomic update. */
164 __BITOPS_LOOP(addr, mask, __BITOPS_AND);
168 * SMP safe change_bit routine based on compare and swap (CS)
170 static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
172 unsigned long addr, mask;
174 addr = (unsigned long) ptr;
175 /* calculate address for CS */
176 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
178 mask = 1UL << (nr & (BITS_PER_LONG - 1));
179 /* Do the atomic update. */
180 __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
184 * SMP safe test_and_set_bit routine based on compare and swap (CS)
187 test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
189 unsigned long addr, old, mask;
191 addr = (unsigned long) ptr;
192 /* calculate address for CS */
193 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
194 /* make OR/test mask */
195 mask = 1UL << (nr & (BITS_PER_LONG - 1));
196 /* Do the atomic update. */
197 old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
199 return (old & mask) != 0;
203 * SMP safe test_and_clear_bit routine based on compare and swap (CS)
206 test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
208 unsigned long addr, old, mask;
210 addr = (unsigned long) ptr;
211 /* calculate address for CS */
212 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
213 /* make AND/test mask */
214 mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
215 /* Do the atomic update. */
216 old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
218 return (old & ~mask) != 0;
222 * SMP safe test_and_change_bit routine based on compare and swap (CS)
225 test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
227 unsigned long addr, old, mask;
229 addr = (unsigned long) ptr;
230 /* calculate address for CS */
231 addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
232 /* make XOR/test mask */
233 mask = 1UL << (nr & (BITS_PER_LONG - 1));
234 /* Do the atomic update. */
235 old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
237 return (old & mask) != 0;
239 #endif /* CONFIG_SMP */
242 * fast, non-SMP set_bit routine
244 static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
248 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
251 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
255 __constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
259 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
260 *(unsigned char *) addr |= 1 << (nr & 7);
263 #define set_bit_simple(nr,addr) \
264 (__builtin_constant_p((nr)) ? \
265 __constant_set_bit((nr),(addr)) : \
266 __set_bit((nr),(addr)) )
269 * fast, non-SMP clear_bit routine
272 __clear_bit(unsigned long nr, volatile unsigned long *ptr)
276 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
279 : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
283 __constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
287 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
288 *(unsigned char *) addr &= ~(1 << (nr & 7));
291 #define clear_bit_simple(nr,addr) \
292 (__builtin_constant_p((nr)) ? \
293 __constant_clear_bit((nr),(addr)) : \
294 __clear_bit((nr),(addr)) )
297 * fast, non-SMP change_bit routine
299 static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
303 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
306 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
310 __constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
314 addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
315 *(unsigned char *) addr ^= 1 << (nr & 7);
318 #define change_bit_simple(nr,addr) \
319 (__builtin_constant_p((nr)) ? \
320 __constant_change_bit((nr),(addr)) : \
321 __change_bit((nr),(addr)) )
324 * fast, non-SMP test_and_set_bit routine
327 test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
332 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
333 ch = *(unsigned char *) addr;
336 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
338 return (ch >> (nr & 7)) & 1;
340 #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
343 * fast, non-SMP test_and_clear_bit routine
346 test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
351 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
352 ch = *(unsigned char *) addr;
355 : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
357 return (ch >> (nr & 7)) & 1;
359 #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
362 * fast, non-SMP test_and_change_bit routine
365 test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
370 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
371 ch = *(unsigned char *) addr;
374 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
376 return (ch >> (nr & 7)) & 1;
378 #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
381 #define set_bit set_bit_cs
382 #define clear_bit clear_bit_cs
383 #define change_bit change_bit_cs
384 #define test_and_set_bit test_and_set_bit_cs
385 #define test_and_clear_bit test_and_clear_bit_cs
386 #define test_and_change_bit test_and_change_bit_cs
388 #define set_bit set_bit_simple
389 #define clear_bit clear_bit_simple
390 #define change_bit change_bit_simple
391 #define test_and_set_bit test_and_set_bit_simple
392 #define test_and_clear_bit test_and_clear_bit_simple
393 #define test_and_change_bit test_and_change_bit_simple
398 * This routine doesn't need to be atomic.
401 static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
406 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
407 ch = *(volatile unsigned char *) addr;
408 return (ch >> (nr & 7)) & 1;
412 __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
413 return (((volatile char *) addr)
414 [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
417 #define test_bit(nr,addr) \
418 (__builtin_constant_p((nr)) ? \
419 __constant_test_bit((nr),(addr)) : \
420 __test_bit((nr),(addr)) )
423 * Optimized find bit helper functions.
427 * __ffz_word_loop - find byte offset of first long != -1UL
428 * @addr: pointer to array of unsigned long
429 * @size: size of the array in bits
431 static inline unsigned long __ffz_word_loop(const unsigned long *addr,
434 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
435 unsigned long bytes = 0;
451 "0: cg %2,0(%0,%3)\n"
457 : "+&a" (bytes), "+&d" (size)
458 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
464 * __ffs_word_loop - find byte offset of first long != 0UL
465 * @addr: pointer to array of unsigned long
466 * @size: size of the array in bits
468 static inline unsigned long __ffs_word_loop(const unsigned long *addr,
471 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
472 unsigned long bytes = 0;
488 "0: cg %2,0(%0,%3)\n"
494 : "+&a" (bytes), "+&a" (size)
495 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
501 * __ffz_word - add number of the first unset bit
502 * @nr: base value the bit number is added to
503 * @word: the word that is searched for unset bits
505 static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
508 if ((word & 0xffffffff) == 0xffffffff) {
513 if ((word & 0xffff) == 0xffff) {
517 if ((word & 0xff) == 0xff) {
521 return nr + _zb_findmap[(unsigned char) word];
525 * __ffs_word - add number of the first set bit
526 * @nr: base value the bit number is added to
527 * @word: the word that is searched for set bits
529 static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
532 if ((word & 0xffffffff) == 0) {
537 if ((word & 0xffff) == 0) {
541 if ((word & 0xff) == 0) {
545 return nr + _sb_findmap[(unsigned char) word];
550 * __load_ulong_be - load big endian unsigned long
551 * @p: pointer to array of unsigned long
552 * @offset: byte offset of source value in the array
554 static inline unsigned long __load_ulong_be(const unsigned long *p,
555 unsigned long offset)
557 p = (unsigned long *)((unsigned long) p + offset);
562 * __load_ulong_le - load little endian unsigned long
563 * @p: pointer to array of unsigned long
564 * @offset: byte offset of source value in the array
566 static inline unsigned long __load_ulong_le(const unsigned long *p,
567 unsigned long offset)
571 p = (unsigned long *)((unsigned long) p + offset);
575 " icm %0,2,%O1+1(%R1)\n"
576 " icm %0,4,%O1+2(%R1)\n"
577 " icm %0,8,%O1+3(%R1)"
578 : "=&d" (word) : "Q" (*p) : "cc");
582 : "=d" (word) : "m" (*p) );
588 * The various find bit functions.
592 * ffz - find first zero in word.
593 * @word: The word to search
595 * Undefined if no zero exists, so code should check against ~0UL first.
597 static inline unsigned long ffz(unsigned long word)
599 return __ffz_word(0, word);
603 * __ffs - find first bit in word.
604 * @word: The word to search
606 * Undefined if no bit exists, so code should check against 0 first.
608 static inline unsigned long __ffs (unsigned long word)
610 return __ffs_word(0, word);
614 * ffs - find first bit set
615 * @x: the word to search
617 * This is defined the same way as
618 * the libc and compiler builtin ffs routines, therefore
619 * differs in spirit from the above ffz (man ffs).
621 static inline int ffs(int x)
625 return __ffs_word(1, x);
629 * find_first_zero_bit - find the first zero bit in a memory region
630 * @addr: The address to start the search at
631 * @size: The maximum size to search
633 * Returns the bit-number of the first zero bit, not the number of the byte
636 static inline unsigned long find_first_zero_bit(const unsigned long *addr,
639 unsigned long bytes, bits;
643 bytes = __ffz_word_loop(addr, size);
644 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
645 return (bits < size) ? bits : size;
647 #define find_first_zero_bit find_first_zero_bit
650 * find_first_bit - find the first set bit in a memory region
651 * @addr: The address to start the search at
652 * @size: The maximum size to search
654 * Returns the bit-number of the first set bit, not the number of the byte
657 static inline unsigned long find_first_bit(const unsigned long * addr,
660 unsigned long bytes, bits;
664 bytes = __ffs_word_loop(addr, size);
665 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
666 return (bits < size) ? bits : size;
668 #define find_first_bit find_first_bit
671 * Big endian variant whichs starts bit counting from left using
672 * the flogr (find leftmost one) instruction.
674 static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
676 register unsigned long bit asm("2") = val;
677 register unsigned long out asm("3");
680 " .insn rre,0xb9830000,%[bit],%[bit]\n"
681 : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
686 * 64 bit special left bitops format:
688 * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
689 * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
690 * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
691 * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
692 * after that follows the next long with bit numbers
693 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
694 * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
695 * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
696 * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
697 * The reason for this bit ordering is the fact that
698 * the hardware sets bits in a bitmap starting at bit 0
699 * and we don't want to scan the bitmap from the 'wrong
702 static inline unsigned long find_first_bit_left(const unsigned long *addr,
705 unsigned long bytes, bits;
709 bytes = __ffs_word_loop(addr, size);
710 bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
711 return (bits < size) ? bits : size;
714 static inline int find_next_bit_left(const unsigned long *addr,
716 unsigned long offset)
718 const unsigned long *p;
719 unsigned long bit, set;
723 bit = offset & (BITS_PER_LONG - 1);
726 p = addr + offset / BITS_PER_LONG;
728 set = __flo_word(0, *p & (~0UL >> bit));
730 return size + offset;
731 if (set < BITS_PER_LONG)
733 offset += BITS_PER_LONG;
734 size -= BITS_PER_LONG;
737 return offset + find_first_bit_left(p, size);
740 #define for_each_set_bit_left(bit, addr, size) \
741 for ((bit) = find_first_bit_left((addr), (size)); \
743 (bit) = find_next_bit_left((addr), (size), (bit) + 1))
745 /* same as for_each_set_bit() but use bit as value to start with */
746 #define for_each_set_bit_left_cont(bit, addr, size) \
747 for ((bit) = find_next_bit_left((addr), (size), (bit)); \
749 (bit) = find_next_bit_left((addr), (size), (bit) + 1))
752 * find_next_zero_bit - find the first zero bit in a memory region
753 * @addr: The address to base the search on
754 * @offset: The bitnumber to start searching at
755 * @size: The maximum size to search
757 static inline int find_next_zero_bit (const unsigned long * addr,
759 unsigned long offset)
761 const unsigned long *p;
762 unsigned long bit, set;
766 bit = offset & (BITS_PER_LONG - 1);
769 p = addr + offset / BITS_PER_LONG;
772 * __ffz_word returns BITS_PER_LONG
773 * if no zero bit is present in the word.
775 set = __ffz_word(bit, *p >> bit);
777 return size + offset;
778 if (set < BITS_PER_LONG)
780 offset += BITS_PER_LONG;
781 size -= BITS_PER_LONG;
784 return offset + find_first_zero_bit(p, size);
786 #define find_next_zero_bit find_next_zero_bit
789 * find_next_bit - find the first set bit in a memory region
790 * @addr: The address to base the search on
791 * @offset: The bitnumber to start searching at
792 * @size: The maximum size to search
794 static inline int find_next_bit (const unsigned long * addr,
796 unsigned long offset)
798 const unsigned long *p;
799 unsigned long bit, set;
803 bit = offset & (BITS_PER_LONG - 1);
806 p = addr + offset / BITS_PER_LONG;
809 * __ffs_word returns BITS_PER_LONG
810 * if no one bit is present in the word.
812 set = __ffs_word(0, *p & (~0UL << bit));
814 return size + offset;
815 if (set < BITS_PER_LONG)
817 offset += BITS_PER_LONG;
818 size -= BITS_PER_LONG;
821 return offset + find_first_bit(p, size);
823 #define find_next_bit find_next_bit
826 * Every architecture must define this function. It's the fastest
827 * way of searching a 140-bit bitmap where the first 100 bits are
828 * unlikely to be set. It's guaranteed that at least one of the 140
831 static inline int sched_find_first_bit(unsigned long *b)
833 return find_first_bit(b, 140);
836 #include <asm-generic/bitops/fls.h>
837 #include <asm-generic/bitops/__fls.h>
838 #include <asm-generic/bitops/fls64.h>
840 #include <asm-generic/bitops/hweight.h>
841 #include <asm-generic/bitops/lock.h>
844 * ATTENTION: intel byte ordering convention for ext2 and minix !!
845 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
846 * bit 32 is the LSB of (addr+4).
847 * That combined with the little endian byte order of Intel gives the
848 * following bit order in memory:
849 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
850 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
853 static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
855 unsigned long bytes, bits;
859 bytes = __ffz_word_loop(vaddr, size);
860 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
861 return (bits < size) ? bits : size;
863 #define find_first_zero_bit_le find_first_zero_bit_le
865 static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
866 unsigned long offset)
868 unsigned long *addr = vaddr, *p;
869 unsigned long bit, set;
873 bit = offset & (BITS_PER_LONG - 1);
876 p = addr + offset / BITS_PER_LONG;
879 * s390 version of ffz returns BITS_PER_LONG
880 * if no zero bit is present in the word.
882 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
884 return size + offset;
885 if (set < BITS_PER_LONG)
887 offset += BITS_PER_LONG;
888 size -= BITS_PER_LONG;
891 return offset + find_first_zero_bit_le(p, size);
893 #define find_next_zero_bit_le find_next_zero_bit_le
895 static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
897 unsigned long bytes, bits;
901 bytes = __ffs_word_loop(vaddr, size);
902 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
903 return (bits < size) ? bits : size;
905 #define find_first_bit_le find_first_bit_le
907 static inline int find_next_bit_le(void *vaddr, unsigned long size,
908 unsigned long offset)
910 unsigned long *addr = vaddr, *p;
911 unsigned long bit, set;
915 bit = offset & (BITS_PER_LONG - 1);
918 p = addr + offset / BITS_PER_LONG;
921 * s390 version of ffz returns BITS_PER_LONG
922 * if no zero bit is present in the word.
924 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
926 return size + offset;
927 if (set < BITS_PER_LONG)
929 offset += BITS_PER_LONG;
930 size -= BITS_PER_LONG;
933 return offset + find_first_bit_le(p, size);
935 #define find_next_bit_le find_next_bit_le
937 #include <asm-generic/bitops/le.h>
939 #include <asm-generic/bitops/ext2-atomic-setbit.h>
941 #endif /* _S390_BITOPS_H */