1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SPINLOCK_H
3 #define __LINUX_SPINLOCK_H
6 * include/linux/spinlock.h - generic spinlock/rwlock declarations
8 * here's the role of the various spinlock/rwlock related include files:
12 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
15 * linux/spinlock_types.h:
16 * defines the generic type and initializers
18 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
19 * implementations, mostly inline assembly code
21 * (also included on UP-debug builds:)
23 * linux/spinlock_api_smp.h:
24 * contains the prototypes for the _spin_*() APIs.
26 * linux/spinlock.h: builds the final spin_*() APIs.
30 * linux/spinlock_type_up.h:
31 * contains the generic, simplified UP spinlock type.
32 * (which is an empty structure on non-debug builds)
34 * linux/spinlock_types.h:
35 * defines the generic type and initializers
37 * linux/spinlock_up.h:
38 * contains the arch_spin_*()/etc. version of UP
39 * builds. (which are NOPs on non-debug, non-preempt
42 * (included on UP-non-debug builds:)
44 * linux/spinlock_api_up.h:
45 * builds the _spin_*() APIs.
47 * linux/spinlock.h: builds the final spin_*() APIs.
50 #include <linux/typecheck.h>
51 #include <linux/preempt.h>
52 #include <linux/linkage.h>
53 #include <linux/compiler.h>
54 #include <linux/irqflags.h>
55 #include <linux/thread_info.h>
56 #include <linux/kernel.h>
57 #include <linux/stringify.h>
58 #include <linux/bottom_half.h>
59 #include <asm/barrier.h>
63 * Must define these before including other files, inline functions need them
65 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
67 #define LOCK_SECTION_START(extra) \
70 ".ifndef " LOCK_SECTION_NAME "\n\t" \
71 LOCK_SECTION_NAME ":\n\t" \
74 #define LOCK_SECTION_END \
77 #define __lockfunc __attribute__((section(".spinlock.text")))
80 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
82 #include <linux/spinlock_types.h>
85 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
88 # include <asm/spinlock.h>
90 # include <linux/spinlock_up.h>
93 #ifdef CONFIG_DEBUG_SPINLOCK
94 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
95 struct lock_class_key *key);
96 # define raw_spin_lock_init(lock) \
98 static struct lock_class_key __key; \
100 __raw_spin_lock_init((lock), #lock, &__key); \
104 # define raw_spin_lock_init(lock) \
105 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
108 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
110 #ifdef arch_spin_is_contended
111 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
113 #define raw_spin_is_contended(lock) (((void)(lock), 0))
114 #endif /*arch_spin_is_contended*/
117 * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
118 * between program-order earlier lock acquisitions and program-order later
121 * This guarantees that the following two properties hold:
123 * 1) Given the snippet:
129 * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
130 * spin_lock(S); smp_mb();
131 * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
135 * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
136 * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
137 * preceding the call to smp_mb__after_spinlock() in __schedule() and in
140 * 2) Given the snippet:
146 * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
147 * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
148 * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
152 * it is forbidden that CPU0's critical section executes before CPU1's
153 * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
154 * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
155 * preceding the calls to smp_rmb() in try_to_wake_up() for similar
156 * snippets but "projected" onto two CPUs.
158 * Property (2) upgrades the lock to an RCsc lock.
160 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
161 * the LL/SC loop, they need no further barriers. Similarly all our TSO
162 * architectures imply an smp_mb() for each atomic instruction and equally don't
165 * Architectures that can implement ACQUIRE better need to take care.
167 #ifndef smp_mb__after_spinlock
168 #define smp_mb__after_spinlock() do { } while (0)
171 #ifdef CONFIG_DEBUG_SPINLOCK
172 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
173 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
174 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
175 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
177 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
180 arch_spin_lock(&lock->raw_lock);
183 #ifndef arch_spin_lock_flags
184 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
188 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
191 arch_spin_lock_flags(&lock->raw_lock, *flags);
194 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
196 return arch_spin_trylock(&(lock)->raw_lock);
199 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
201 arch_spin_unlock(&lock->raw_lock);
207 * Define the various spin_lock methods. Note we define these
208 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
209 * various methods are defined as nops in the case they are not
212 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
214 #define raw_spin_lock(lock) _raw_spin_lock(lock)
216 #ifdef CONFIG_DEBUG_LOCK_ALLOC
217 # define raw_spin_lock_nested(lock, subclass) \
218 _raw_spin_lock_nested(lock, subclass)
220 # define raw_spin_lock_nest_lock(lock, nest_lock) \
222 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
223 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
227 * Always evaluate the 'subclass' argument to avoid that the compiler
228 * warns about set-but-not-used variables when building with
229 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
231 # define raw_spin_lock_nested(lock, subclass) \
232 _raw_spin_lock(((void)(subclass), (lock)))
233 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
236 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
238 #define raw_spin_lock_irqsave(lock, flags) \
240 typecheck(unsigned long, flags); \
241 flags = _raw_spin_lock_irqsave(lock); \
244 #ifdef CONFIG_DEBUG_LOCK_ALLOC
245 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
247 typecheck(unsigned long, flags); \
248 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
251 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
253 typecheck(unsigned long, flags); \
254 flags = _raw_spin_lock_irqsave(lock); \
260 #define raw_spin_lock_irqsave(lock, flags) \
262 typecheck(unsigned long, flags); \
263 _raw_spin_lock_irqsave(lock, flags); \
266 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
267 raw_spin_lock_irqsave(lock, flags)
271 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
272 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
273 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
274 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
276 #define raw_spin_unlock_irqrestore(lock, flags) \
278 typecheck(unsigned long, flags); \
279 _raw_spin_unlock_irqrestore(lock, flags); \
281 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
283 #define raw_spin_trylock_bh(lock) \
284 __cond_lock(lock, _raw_spin_trylock_bh(lock))
286 #define raw_spin_trylock_irq(lock) \
288 local_irq_disable(); \
289 raw_spin_trylock(lock) ? \
290 1 : ({ local_irq_enable(); 0; }); \
293 #define raw_spin_trylock_irqsave(lock, flags) \
295 local_irq_save(flags); \
296 raw_spin_trylock(lock) ? \
297 1 : ({ local_irq_restore(flags); 0; }); \
300 /* Include rwlock functions */
301 #include <linux/rwlock.h>
304 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
306 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
307 # include <linux/spinlock_api_smp.h>
309 # include <linux/spinlock_api_up.h>
313 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
316 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
321 #define spin_lock_init(_lock) \
323 spinlock_check(_lock); \
324 raw_spin_lock_init(&(_lock)->rlock); \
327 static __always_inline void spin_lock(spinlock_t *lock)
329 raw_spin_lock(&lock->rlock);
332 static __always_inline void spin_lock_bh(spinlock_t *lock)
334 raw_spin_lock_bh(&lock->rlock);
337 static __always_inline int spin_trylock(spinlock_t *lock)
339 return raw_spin_trylock(&lock->rlock);
342 #define spin_lock_nested(lock, subclass) \
344 raw_spin_lock_nested(spinlock_check(lock), subclass); \
347 #define spin_lock_nest_lock(lock, nest_lock) \
349 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
352 static __always_inline void spin_lock_irq(spinlock_t *lock)
354 raw_spin_lock_irq(&lock->rlock);
357 #define spin_lock_irqsave(lock, flags) \
359 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
362 #define spin_lock_irqsave_nested(lock, flags, subclass) \
364 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
367 static __always_inline void spin_unlock(spinlock_t *lock)
369 raw_spin_unlock(&lock->rlock);
372 static __always_inline void spin_unlock_bh(spinlock_t *lock)
374 raw_spin_unlock_bh(&lock->rlock);
377 static __always_inline void spin_unlock_irq(spinlock_t *lock)
379 raw_spin_unlock_irq(&lock->rlock);
382 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
384 raw_spin_unlock_irqrestore(&lock->rlock, flags);
387 static __always_inline int spin_trylock_bh(spinlock_t *lock)
389 return raw_spin_trylock_bh(&lock->rlock);
392 static __always_inline int spin_trylock_irq(spinlock_t *lock)
394 return raw_spin_trylock_irq(&lock->rlock);
397 #define spin_trylock_irqsave(lock, flags) \
399 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
403 * spin_is_locked() - Check whether a spinlock is locked.
404 * @lock: Pointer to the spinlock.
406 * This function is NOT required to provide any memory ordering
407 * guarantees; it could be used for debugging purposes or, when
408 * additional synchronization is needed, accompanied with other
409 * constructs (memory barriers) enforcing the synchronization.
411 * Returns: 1 if @lock is locked, 0 otherwise.
413 * Note that the function only tells you that the spinlock is
414 * seen to be locked, not that it is locked on your CPU.
416 * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
417 * the return value is always 0 (see include/linux/spinlock_up.h).
418 * Therefore you should not rely heavily on the return value.
420 static __always_inline int spin_is_locked(spinlock_t *lock)
422 return raw_spin_is_locked(&lock->rlock);
425 static __always_inline int spin_is_contended(spinlock_t *lock)
427 return raw_spin_is_contended(&lock->rlock);
430 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
433 * Pull the atomic_t declaration:
434 * (asm-mips/atomic.h needs above definitions)
436 #include <linux/atomic.h>
438 * atomic_dec_and_lock - lock on reaching reference count zero
439 * @atomic: the atomic counter
440 * @lock: the spinlock in question
442 * Decrements @atomic by 1. If the result is 0, returns true and locks
443 * @lock. Returns false for all other cases.
445 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
446 #define atomic_dec_and_lock(atomic, lock) \
447 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
449 extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
450 unsigned long *flags);
451 #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
452 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
454 int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
455 size_t max_size, unsigned int cpu_mult,
456 gfp_t gfp, const char *name,
457 struct lock_class_key *key);
459 #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
461 static struct lock_class_key key; \
464 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
465 cpu_mult, gfp, #locks, &key); \
469 void free_bucket_spinlocks(spinlock_t *locks);
471 #endif /* __LINUX_SPINLOCK_H */