]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/include/asm/barrier.h
Merge tag 'mips_fixes_4.16_2' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
[linux.git] / arch / x86 / include / asm / barrier.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_BARRIER_H
3 #define _ASM_X86_BARRIER_H
4
5 #include <asm/alternative.h>
6 #include <asm/nops.h>
7
8 /*
9  * Force strict CPU ordering.
10  * And yes, this might be required on UP too when we're talking
11  * to devices.
12  */
13
14 #ifdef CONFIG_X86_32
15 #define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
16                                       X86_FEATURE_XMM2) ::: "memory", "cc")
17 #define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
18                                        X86_FEATURE_XMM2) ::: "memory", "cc")
19 #define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
20                                        X86_FEATURE_XMM2) ::: "memory", "cc")
21 #else
22 #define mb()    asm volatile("mfence":::"memory")
23 #define rmb()   asm volatile("lfence":::"memory")
24 #define wmb()   asm volatile("sfence" ::: "memory")
25 #endif
26
27 /**
28  * array_index_mask_nospec() - generate a mask that is ~0UL when the
29  *      bounds check succeeds and 0 otherwise
30  * @index: array element index
31  * @size: number of elements in array
32  *
33  * Returns:
34  *     0 - (index < size)
35  */
36 static inline unsigned long array_index_mask_nospec(unsigned long index,
37                 unsigned long size)
38 {
39         unsigned long mask;
40
41         asm ("cmp %1,%2; sbb %0,%0;"
42                         :"=r" (mask)
43                         :"g"(size),"r" (index)
44                         :"cc");
45         return mask;
46 }
47
48 /* Override the default implementation from linux/nospec.h. */
49 #define array_index_mask_nospec array_index_mask_nospec
50
51 /* Prevent speculative execution past this barrier. */
52 #define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
53                                            "lfence", X86_FEATURE_LFENCE_RDTSC)
54
55 #ifdef CONFIG_X86_PPRO_FENCE
56 #define dma_rmb()       rmb()
57 #else
58 #define dma_rmb()       barrier()
59 #endif
60 #define dma_wmb()       barrier()
61
62 #ifdef CONFIG_X86_32
63 #define __smp_mb()      asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
64 #else
65 #define __smp_mb()      asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
66 #endif
67 #define __smp_rmb()     dma_rmb()
68 #define __smp_wmb()     barrier()
69 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
70
71 #if defined(CONFIG_X86_PPRO_FENCE)
72
73 /*
74  * For this option x86 doesn't have a strong TSO memory
75  * model and we should fall back to full barriers.
76  */
77
78 #define __smp_store_release(p, v)                                       \
79 do {                                                                    \
80         compiletime_assert_atomic_type(*p);                             \
81         __smp_mb();                                                     \
82         WRITE_ONCE(*p, v);                                              \
83 } while (0)
84
85 #define __smp_load_acquire(p)                                           \
86 ({                                                                      \
87         typeof(*p) ___p1 = READ_ONCE(*p);                               \
88         compiletime_assert_atomic_type(*p);                             \
89         __smp_mb();                                                     \
90         ___p1;                                                          \
91 })
92
93 #else /* regular x86 TSO memory ordering */
94
95 #define __smp_store_release(p, v)                                       \
96 do {                                                                    \
97         compiletime_assert_atomic_type(*p);                             \
98         barrier();                                                      \
99         WRITE_ONCE(*p, v);                                              \
100 } while (0)
101
102 #define __smp_load_acquire(p)                                           \
103 ({                                                                      \
104         typeof(*p) ___p1 = READ_ONCE(*p);                               \
105         compiletime_assert_atomic_type(*p);                             \
106         barrier();                                                      \
107         ___p1;                                                          \
108 })
109
110 #endif
111
112 /* Atomic operations are already serializing on x86 */
113 #define __smp_mb__before_atomic()       barrier()
114 #define __smp_mb__after_atomic()        barrier()
115
116 #include <asm-generic/barrier.h>
117
118 #endif /* _ASM_X86_BARRIER_H */