]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/mips/include/asm/atomic.h
Linux 5.6-rc7
[linux.git] / arch / mips / include / asm / atomic.h
1 /*
2  * Atomic operations that C can't guarantee us.  Useful for
3  * resource counting etc..
4  *
5  * But use these as seldom as possible since they are much more slower
6  * than regular operations.
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  *
12  * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13  */
14 #ifndef _ASM_ATOMIC_H
15 #define _ASM_ATOMIC_H
16
17 #include <linux/irqflags.h>
18 #include <linux/types.h>
19 #include <asm/barrier.h>
20 #include <asm/compiler.h>
21 #include <asm/cpu-features.h>
22 #include <asm/cmpxchg.h>
23 #include <asm/llsc.h>
24 #include <asm/sync.h>
25 #include <asm/war.h>
26
27 #define ATOMIC_OPS(pfx, type)                                           \
28 static __always_inline type pfx##_read(const pfx##_t *v)                \
29 {                                                                       \
30         return READ_ONCE(v->counter);                                   \
31 }                                                                       \
32                                                                         \
33 static __always_inline void pfx##_set(pfx##_t *v, type i)               \
34 {                                                                       \
35         WRITE_ONCE(v->counter, i);                                      \
36 }                                                                       \
37                                                                         \
38 static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n)   \
39 {                                                                       \
40         return cmpxchg(&v->counter, o, n);                              \
41 }                                                                       \
42                                                                         \
43 static __always_inline type pfx##_xchg(pfx##_t *v, type n)              \
44 {                                                                       \
45         return xchg(&v->counter, n);                                    \
46 }
47
48 #define ATOMIC_INIT(i)          { (i) }
49 ATOMIC_OPS(atomic, int)
50
51 #ifdef CONFIG_64BIT
52 # define ATOMIC64_INIT(i)       { (i) }
53 ATOMIC_OPS(atomic64, s64)
54 #endif
55
56 #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)                  \
57 static __inline__ void pfx##_##op(type i, pfx##_t * v)                  \
58 {                                                                       \
59         type temp;                                                      \
60                                                                         \
61         if (!kernel_uses_llsc) {                                        \
62                 unsigned long flags;                                    \
63                                                                         \
64                 raw_local_irq_save(flags);                              \
65                 v->counter c_op i;                                      \
66                 raw_local_irq_restore(flags);                           \
67                 return;                                                 \
68         }                                                               \
69                                                                         \
70         __asm__ __volatile__(                                           \
71         "       .set    push                                    \n"     \
72         "       .set    " MIPS_ISA_LEVEL "                      \n"     \
73         "       " __SYNC(full, loongson3_war) "                 \n"     \
74         "1:     " #ll " %0, %1          # " #pfx "_" #op "      \n"     \
75         "       " #asm_op " %0, %2                              \n"     \
76         "       " #sc " %0, %1                                  \n"     \
77         "\t" __SC_BEQZ "%0, 1b                                  \n"     \
78         "       .set    pop                                     \n"     \
79         : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)            \
80         : "Ir" (i) : __LLSC_CLOBBER);                                   \
81 }
82
83 #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)           \
84 static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
85 {                                                                       \
86         type temp, result;                                              \
87                                                                         \
88         if (!kernel_uses_llsc) {                                        \
89                 unsigned long flags;                                    \
90                                                                         \
91                 raw_local_irq_save(flags);                              \
92                 result = v->counter;                                    \
93                 result c_op i;                                          \
94                 v->counter = result;                                    \
95                 raw_local_irq_restore(flags);                           \
96                 return result;                                          \
97         }                                                               \
98                                                                         \
99         __asm__ __volatile__(                                           \
100         "       .set    push                                    \n"     \
101         "       .set    " MIPS_ISA_LEVEL "                      \n"     \
102         "       " __SYNC(full, loongson3_war) "                 \n"     \
103         "1:     " #ll " %1, %2          # " #pfx "_" #op "_return\n"    \
104         "       " #asm_op " %0, %1, %3                          \n"     \
105         "       " #sc " %0, %2                                  \n"     \
106         "\t" __SC_BEQZ "%0, 1b                                  \n"     \
107         "       " #asm_op " %0, %1, %3                          \n"     \
108         "       .set    pop                                     \n"     \
109         : "=&r" (result), "=&r" (temp),                                 \
110           "+" GCC_OFF_SMALL_ASM() (v->counter)                          \
111         : "Ir" (i) : __LLSC_CLOBBER);                                   \
112                                                                         \
113         return result;                                                  \
114 }
115
116 #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)            \
117 static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)  \
118 {                                                                       \
119         int temp, result;                                               \
120                                                                         \
121         if (!kernel_uses_llsc) {                                        \
122                 unsigned long flags;                                    \
123                                                                         \
124                 raw_local_irq_save(flags);                              \
125                 result = v->counter;                                    \
126                 v->counter c_op i;                                      \
127                 raw_local_irq_restore(flags);                           \
128                 return result;                                          \
129         }                                                               \
130                                                                         \
131         __asm__ __volatile__(                                           \
132         "       .set    push                                    \n"     \
133         "       .set    " MIPS_ISA_LEVEL "                      \n"     \
134         "       " __SYNC(full, loongson3_war) "                 \n"     \
135         "1:     " #ll " %1, %2          # " #pfx "_fetch_" #op "\n"     \
136         "       " #asm_op " %0, %1, %3                          \n"     \
137         "       " #sc " %0, %2                                  \n"     \
138         "\t" __SC_BEQZ "%0, 1b                                  \n"     \
139         "       .set    pop                                     \n"     \
140         "       move    %0, %1                                  \n"     \
141         : "=&r" (result), "=&r" (temp),                                 \
142           "+" GCC_OFF_SMALL_ASM() (v->counter)                          \
143         : "Ir" (i) : __LLSC_CLOBBER);                                   \
144                                                                         \
145         return result;                                                  \
146 }
147
148 #undef ATOMIC_OPS
149 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc)                 \
150         ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)                  \
151         ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)           \
152         ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
153
154 ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
155 ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
156
157 #define atomic_add_return_relaxed       atomic_add_return_relaxed
158 #define atomic_sub_return_relaxed       atomic_sub_return_relaxed
159 #define atomic_fetch_add_relaxed        atomic_fetch_add_relaxed
160 #define atomic_fetch_sub_relaxed        atomic_fetch_sub_relaxed
161
162 #ifdef CONFIG_64BIT
163 ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
164 ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
165 # define atomic64_add_return_relaxed    atomic64_add_return_relaxed
166 # define atomic64_sub_return_relaxed    atomic64_sub_return_relaxed
167 # define atomic64_fetch_add_relaxed     atomic64_fetch_add_relaxed
168 # define atomic64_fetch_sub_relaxed     atomic64_fetch_sub_relaxed
169 #endif /* CONFIG_64BIT */
170
171 #undef ATOMIC_OPS
172 #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc)                 \
173         ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)                  \
174         ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
175
176 ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
177 ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
178 ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
179
180 #define atomic_fetch_and_relaxed        atomic_fetch_and_relaxed
181 #define atomic_fetch_or_relaxed         atomic_fetch_or_relaxed
182 #define atomic_fetch_xor_relaxed        atomic_fetch_xor_relaxed
183
184 #ifdef CONFIG_64BIT
185 ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
186 ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
187 ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
188 # define atomic64_fetch_and_relaxed     atomic64_fetch_and_relaxed
189 # define atomic64_fetch_or_relaxed      atomic64_fetch_or_relaxed
190 # define atomic64_fetch_xor_relaxed     atomic64_fetch_xor_relaxed
191 #endif
192
193 #undef ATOMIC_OPS
194 #undef ATOMIC_FETCH_OP
195 #undef ATOMIC_OP_RETURN
196 #undef ATOMIC_OP
197
198 /*
199  * atomic_sub_if_positive - conditionally subtract integer from atomic variable
200  * @i: integer value to subtract
201  * @v: pointer of type atomic_t
202  *
203  * Atomically test @v and subtract @i if @v is greater or equal than @i.
204  * The function returns the old value of @v minus @i.
205  */
206 #define ATOMIC_SIP_OP(pfx, type, op, ll, sc)                            \
207 static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v)        \
208 {                                                                       \
209         type temp, result;                                              \
210                                                                         \
211         smp_mb__before_atomic();                                        \
212                                                                         \
213         if (!kernel_uses_llsc) {                                        \
214                 unsigned long flags;                                    \
215                                                                         \
216                 raw_local_irq_save(flags);                              \
217                 result = v->counter;                                    \
218                 result -= i;                                            \
219                 if (result >= 0)                                        \
220                         v->counter = result;                            \
221                 raw_local_irq_restore(flags);                           \
222                 smp_mb__after_atomic();                                 \
223                 return result;                                          \
224         }                                                               \
225                                                                         \
226         __asm__ __volatile__(                                           \
227         "       .set    push                                    \n"     \
228         "       .set    " MIPS_ISA_LEVEL "                      \n"     \
229         "       " __SYNC(full, loongson3_war) "                 \n"     \
230         "1:     " #ll " %1, %2          # atomic_sub_if_positive\n"     \
231         "       .set    pop                                     \n"     \
232         "       " #op " %0, %1, %3                              \n"     \
233         "       move    %1, %0                                  \n"     \
234         "       bltz    %0, 2f                                  \n"     \
235         "       .set    push                                    \n"     \
236         "       .set    " MIPS_ISA_LEVEL "                      \n"     \
237         "       " #sc " %1, %2                                  \n"     \
238         "       " __SC_BEQZ "%1, 1b                             \n"     \
239         "2:     " __SYNC(full, loongson3_war) "                 \n"     \
240         "       .set    pop                                     \n"     \
241         : "=&r" (result), "=&r" (temp),                                 \
242           "+" GCC_OFF_SMALL_ASM() (v->counter)                          \
243         : "Ir" (i)                                                      \
244         : __LLSC_CLOBBER);                                              \
245                                                                         \
246         /*                                                              \
247          * In the Loongson3 workaround case we already have a           \
248          * completion barrier at 2: above, which is needed due to the   \
249          * bltz that can branch to code outside of the LL/SC loop. As   \
250          * such, we don't need to emit another barrier here.            \
251          */                                                             \
252         if (!__SYNC_loongson3_war)                                      \
253                 smp_mb__after_atomic();                                 \
254                                                                         \
255         return result;                                                  \
256 }
257
258 ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
259 #define atomic_dec_if_positive(v)       atomic_sub_if_positive(1, v)
260
261 #ifdef CONFIG_64BIT
262 ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
263 #define atomic64_dec_if_positive(v)     atomic64_sub_if_positive(1, v)
264 #endif
265
266 #undef ATOMIC_SIP_OP
267
268 #endif /* _ASM_ATOMIC_H */