1 #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
2 #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
4 #include <linux/types.h>
5 #include <linux/bitops/find.h>
6 #include <linux/bitops/hweight.h>
7 #include <linux/kernel.h>
9 #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
10 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
11 #define BITS_PER_BYTE 8
12 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
15 * __set_bit - Set a bit in memory
17 * @addr: the address to start counting from
19 * Unlike set_bit(), this function is non-atomic and may be reordered.
20 * If it's called on the same region of memory simultaneously, the effect
21 * may be that only one operation succeeds.
23 static inline void __set_bit(int nr, volatile unsigned long *addr)
25 unsigned long mask = BIT_MASK(nr);
26 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
31 static inline void __clear_bit(int nr, volatile unsigned long *addr)
33 unsigned long mask = BIT_MASK(nr);
34 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
40 * __change_bit - Toggle a bit in memory
41 * @nr: the bit to change
42 * @addr: the address to start counting from
44 * Unlike change_bit(), this function is non-atomic and may be reordered.
45 * If it's called on the same region of memory simultaneously, the effect
46 * may be that only one operation succeeds.
48 static inline void __change_bit(int nr, volatile unsigned long *addr)
50 unsigned long mask = BIT_MASK(nr);
51 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
57 * __test_and_set_bit - Set a bit and return its old value
59 * @addr: Address to count from
61 * This operation is non-atomic and can be reordered.
62 * If two examples of this operation race, one can appear to succeed
63 * but actually fail. You must protect multiple accesses with a lock.
65 static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
67 unsigned long mask = BIT_MASK(nr);
68 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
69 unsigned long old = *p;
72 return (old & mask) != 0;
76 * __test_and_clear_bit - Clear a bit and return its old value
78 * @addr: Address to count from
80 * This operation is non-atomic and can be reordered.
81 * If two examples of this operation race, one can appear to succeed
82 * but actually fail. You must protect multiple accesses with a lock.
84 static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
86 unsigned long mask = BIT_MASK(nr);
87 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
88 unsigned long old = *p;
91 return (old & mask) != 0;
94 /* WARNING: non atomic and it can be reordered! */
95 static inline int __test_and_change_bit(int nr,
96 volatile unsigned long *addr)
98 unsigned long mask = BIT_MASK(nr);
99 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
100 unsigned long old = *p;
103 return (old & mask) != 0;
107 * test_bit - Determine whether a bit is set
108 * @nr: bit number to test
109 * @addr: Address to start counting from
111 static inline int test_bit(int nr, const volatile unsigned long *addr)
113 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
117 * __ffs - find first bit in word.
118 * @word: The word to search
120 * Undefined if no bit exists, so code should check against 0 first.
122 static inline unsigned long __ffs(unsigned long word)
126 if ((word & 0xffffffff) == 0) {
130 if ((word & 0xffff) == 0) {
134 if ((word & 0xff) == 0) {
138 if ((word & 0xf) == 0) {
142 if ((word & 0x3) == 0) {
146 if ((word & 0x1) == 0)
151 unsigned long find_next_bit(const unsigned long *addr,
153 unsigned long offset);
155 static inline unsigned long hweight_long(unsigned long w)
157 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
160 #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */