2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #ifndef _LINUX_BITOPS_H
13 #error only <linux/bitops.h> can be included directly
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/barrier.h>
19 #include <asm/byteorder.h> /* sigh ... */
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
23 #include <asm/sgidefs.h>
27 * These are the "slower" versions of the functions and are in bitops.c.
28 * These functions call raw_local_irq_{save,restore}().
30 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
31 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
32 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
33 int __mips_test_and_set_bit(unsigned long nr,
34 volatile unsigned long *addr);
35 int __mips_test_and_set_bit_lock(unsigned long nr,
36 volatile unsigned long *addr);
37 int __mips_test_and_clear_bit(unsigned long nr,
38 volatile unsigned long *addr);
39 int __mips_test_and_change_bit(unsigned long nr,
40 volatile unsigned long *addr);
44 * set_bit - Atomically set a bit in memory
46 * @addr: the address to start counting from
48 * This function is atomic and may not be reordered. See __set_bit()
49 * if you do not require the atomic guarantees.
50 * Note that @nr may be almost arbitrarily large; this function is not
51 * restricted to acting on a single-word quantity.
53 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
55 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
56 int bit = nr & SZLONG_MASK;
59 if (kernel_uses_llsc && R10000_LLSC_WAR) {
63 "1: " __LL "%0, %1 # set_bit \n"
68 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
69 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
70 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
71 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
75 " " __LL "%0, %1 # set_bit \n"
76 " " __INS "%0, %3, %2, 1 \n"
78 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
79 : "ir" (bit), "r" (~0));
80 } while (unlikely(!temp));
81 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
82 } else if (kernel_uses_llsc) {
87 " .set "MIPS_ISA_ARCH_LEVEL" \n"
88 " " __LL "%0, %1 # set_bit \n"
92 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
94 } while (unlikely(!temp));
96 __mips_set_bit(nr, addr);
100 * clear_bit - Clears a bit in memory
102 * @addr: Address to start counting from
104 * clear_bit() is atomic and may not be reordered. However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
107 * in order to ensure changes are visible on other processors.
109 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
111 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
112 int bit = nr & SZLONG_MASK;
115 if (kernel_uses_llsc && R10000_LLSC_WAR) {
116 __asm__ __volatile__(
118 " .set arch=r4000 \n"
119 "1: " __LL "%0, %1 # clear_bit \n"
124 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
125 : "ir" (~(1UL << bit)));
126 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
127 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
130 __asm__ __volatile__(
131 " " __LL "%0, %1 # clear_bit \n"
132 " " __INS "%0, $0, %2, 1 \n"
134 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
136 } while (unlikely(!temp));
137 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
138 } else if (kernel_uses_llsc) {
141 __asm__ __volatile__(
143 " .set "MIPS_ISA_ARCH_LEVEL" \n"
144 " " __LL "%0, %1 # clear_bit \n"
148 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
149 : "ir" (~(1UL << bit)));
150 } while (unlikely(!temp));
152 __mips_clear_bit(nr, addr);
156 * clear_bit_unlock - Clears a bit in memory
158 * @addr: Address to start counting from
160 * clear_bit() is atomic and implies release semantics before the memory
161 * operation. It can be used for an unlock.
163 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
165 smp_mb__before_atomic();
170 * change_bit - Toggle a bit in memory
172 * @addr: Address to start counting from
174 * change_bit() is atomic and may not be reordered.
175 * Note that @nr may be almost arbitrarily large; this function is not
176 * restricted to acting on a single-word quantity.
178 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
180 int bit = nr & SZLONG_MASK;
182 if (kernel_uses_llsc && R10000_LLSC_WAR) {
183 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
186 __asm__ __volatile__(
188 " .set arch=r4000 \n"
189 "1: " __LL "%0, %1 # change_bit \n"
194 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
195 : "ir" (1UL << bit));
196 } else if (kernel_uses_llsc) {
197 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
202 __asm__ __volatile__(
204 " .set "MIPS_ISA_ARCH_LEVEL" \n"
205 " " __LL "%0, %1 # change_bit \n"
209 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
210 : "ir" (1UL << bit));
211 } while (unlikely(!temp));
213 __mips_change_bit(nr, addr);
217 * test_and_set_bit - Set a bit and return its old value
219 * @addr: Address to count from
221 * This operation is atomic and cannot be reordered.
222 * It also implies a memory barrier.
224 static inline int test_and_set_bit(unsigned long nr,
225 volatile unsigned long *addr)
227 int bit = nr & SZLONG_MASK;
230 smp_mb__before_llsc();
232 if (kernel_uses_llsc && R10000_LLSC_WAR) {
233 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
236 __asm__ __volatile__(
238 " .set arch=r4000 \n"
239 "1: " __LL "%0, %1 # test_and_set_bit \n"
245 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
248 } else if (kernel_uses_llsc) {
249 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
254 __asm__ __volatile__(
256 " .set "MIPS_ISA_ARCH_LEVEL" \n"
257 " " __LL "%0, %1 # test_and_set_bit \n"
261 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
264 } while (unlikely(!res));
266 res = temp & (1UL << bit);
268 res = __mips_test_and_set_bit(nr, addr);
276 * test_and_set_bit_lock - Set a bit and return its old value
278 * @addr: Address to count from
280 * This operation is atomic and implies acquire ordering semantics
281 * after the memory operation.
283 static inline int test_and_set_bit_lock(unsigned long nr,
284 volatile unsigned long *addr)
286 int bit = nr & SZLONG_MASK;
289 if (kernel_uses_llsc && R10000_LLSC_WAR) {
290 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
293 __asm__ __volatile__(
295 " .set arch=r4000 \n"
296 "1: " __LL "%0, %1 # test_and_set_bit \n"
302 : "=&r" (temp), "+m" (*m), "=&r" (res)
305 } else if (kernel_uses_llsc) {
306 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
311 __asm__ __volatile__(
313 " .set "MIPS_ISA_ARCH_LEVEL" \n"
314 " " __LL "%0, %1 # test_and_set_bit \n"
318 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
321 } while (unlikely(!res));
323 res = temp & (1UL << bit);
325 res = __mips_test_and_set_bit_lock(nr, addr);
332 * test_and_clear_bit - Clear a bit and return its old value
334 * @addr: Address to count from
336 * This operation is atomic and cannot be reordered.
337 * It also implies a memory barrier.
339 static inline int test_and_clear_bit(unsigned long nr,
340 volatile unsigned long *addr)
342 int bit = nr & SZLONG_MASK;
345 smp_mb__before_llsc();
347 if (kernel_uses_llsc && R10000_LLSC_WAR) {
348 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
351 __asm__ __volatile__(
353 " .set arch=r4000 \n"
354 "1: " __LL "%0, %1 # test_and_clear_bit \n"
361 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
364 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
365 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
366 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
371 __asm__ __volatile__(
372 " " __LL "%0, %1 # test_and_clear_bit \n"
373 " " __EXT "%2, %0, %3, 1 \n"
374 " " __INS "%0, $0, %3, 1 \n"
376 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
379 } while (unlikely(!temp));
381 } else if (kernel_uses_llsc) {
382 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
387 __asm__ __volatile__(
389 " .set "MIPS_ISA_ARCH_LEVEL" \n"
390 " " __LL "%0, %1 # test_and_clear_bit \n"
395 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
398 } while (unlikely(!res));
400 res = temp & (1UL << bit);
402 res = __mips_test_and_clear_bit(nr, addr);
410 * test_and_change_bit - Change a bit and return its old value
412 * @addr: Address to count from
414 * This operation is atomic and cannot be reordered.
415 * It also implies a memory barrier.
417 static inline int test_and_change_bit(unsigned long nr,
418 volatile unsigned long *addr)
420 int bit = nr & SZLONG_MASK;
423 smp_mb__before_llsc();
425 if (kernel_uses_llsc && R10000_LLSC_WAR) {
426 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
429 __asm__ __volatile__(
431 " .set arch=r4000 \n"
432 "1: " __LL "%0, %1 # test_and_change_bit \n"
438 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
441 } else if (kernel_uses_llsc) {
442 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
447 __asm__ __volatile__(
449 " .set "MIPS_ISA_ARCH_LEVEL" \n"
450 " " __LL "%0, %1 # test_and_change_bit \n"
452 " " __SC "\t%2, %1 \n"
454 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
457 } while (unlikely(!res));
459 res = temp & (1UL << bit);
461 res = __mips_test_and_change_bit(nr, addr);
468 #include <asm-generic/bitops/non-atomic.h>
471 * __clear_bit_unlock - Clears a bit in memory
473 * @addr: Address to start counting from
475 * __clear_bit() is non-atomic and implies release semantics before the memory
476 * operation. It can be used for an unlock if no other CPUs can concurrently
477 * modify other bits in the word.
479 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
481 smp_mb__before_llsc();
482 __clear_bit(nr, addr);
487 * Return the bit position (0..63) of the most significant 1 bit in a word
488 * Returns -1 if no 1 bit exists
490 static __always_inline unsigned long __fls(unsigned long word)
494 if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) &&
495 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
498 " .set "MIPS_ISA_LEVEL" \n"
507 if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) &&
508 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
511 " .set "MIPS_ISA_LEVEL" \n"
520 num = BITS_PER_LONG - 1;
522 #if BITS_PER_LONG == 64
523 if (!(word & (~0ul << 32))) {
528 if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
532 if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
536 if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
540 if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
544 if (!(word & (~0ul << (BITS_PER_LONG-1))))
550 * __ffs - find first bit in word.
551 * @word: The word to search
553 * Returns 0..SZLONG-1
554 * Undefined if no bit exists, so code should check against 0 first.
556 static __always_inline unsigned long __ffs(unsigned long word)
558 return __fls(word & -word);
562 * fls - find last bit set.
563 * @word: The word to search
565 * This is defined the same way as ffs.
566 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
568 static inline int fls(unsigned int x)
572 if (!__builtin_constant_p(x) &&
573 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
576 " .set "MIPS_ISA_LEVEL" \n"
588 if (!(x & 0xffff0000u)) {
592 if (!(x & 0xff000000u)) {
596 if (!(x & 0xf0000000u)) {
600 if (!(x & 0xc0000000u)) {
604 if (!(x & 0x80000000u)) {
611 #include <asm-generic/bitops/fls64.h>
614 * ffs - find first bit set.
615 * @word: The word to search
617 * This is defined the same way as
618 * the libc and compiler builtin ffs routines, therefore
619 * differs in spirit from the above ffz (man ffs).
621 static inline int ffs(int word)
626 return fls(word & -word);
629 #include <asm-generic/bitops/ffz.h>
630 #include <asm-generic/bitops/find.h>
634 #include <asm-generic/bitops/sched.h>
636 #include <asm/arch_hweight.h>
637 #include <asm-generic/bitops/const_hweight.h>
639 #include <asm-generic/bitops/le.h>
640 #include <asm-generic/bitops/ext2-atomic.h>
642 #endif /* __KERNEL__ */
644 #endif /* _ASM_BITOPS_H */