1 #ifndef _ASM_X86_SPINLOCK_H
2 #define _ASM_X86_SPINLOCK_H
4 #include <linux/jump_label.h>
5 #include <linux/atomic.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
9 #include <asm/paravirt.h>
10 #include <asm/bitops.h>
13 * Your basic SMP spinlocks, allowing only a single CPU anywhere
15 * Simple spin lock operations. There are two variants, one clears IRQ's
16 * on the local processor, one does not.
18 * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
20 * (the type definitions are in asm/spinlock_types.h)
23 /* How long a lock should spin before we consider blocking */
24 #define SPIN_THRESHOLD (1 << 15)
26 extern struct static_key paravirt_ticketlocks_enabled;
27 static __always_inline bool static_key_false(struct static_key *key);
29 #ifdef CONFIG_PARAVIRT_SPINLOCKS
30 #define vcpu_is_preempted vcpu_is_preempted
31 static inline bool vcpu_is_preempted(int cpu)
33 return pv_lock_ops.vcpu_is_preempted(cpu);
37 #include <asm/qspinlock.h>
40 * Read-write spinlocks, allowing multiple readers
41 * but only one writer.
43 * NOTE! it is quite common to have readers in interrupts
44 * but no interrupt writers. For those circumstances we
45 * can "mix" irq-safe locks - any writer needs to get a
46 * irq-safe write-lock, but readers can get non-irqsafe
49 * On x86, we implement read-write locks using the generic qrwlock with
50 * x86 specific optimization.
53 #include <asm/qrwlock.h>
55 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
56 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
58 #define arch_spin_relax(lock) cpu_relax()
59 #define arch_read_relax(lock) cpu_relax()
60 #define arch_write_relax(lock) cpu_relax()
62 #endif /* _ASM_X86_SPINLOCK_H */