]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/arm64/include/asm/atomic_ll_sc.h
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[linux.git] / arch / arm64 / include / asm / atomic_ll_sc.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/atomic.h
4  *
5  * Copyright (C) 1996 Russell King.
6  * Copyright (C) 2002 Deep Blue Solutions Ltd.
7  * Copyright (C) 2012 ARM Ltd.
8  */
9
10 #ifndef __ASM_ATOMIC_LL_SC_H
11 #define __ASM_ATOMIC_LL_SC_H
12
13 #ifndef __ARM64_IN_ATOMIC_IMPL
14 #error "please don't include this file directly"
15 #endif
16
17 /*
18  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
19  * store exclusive to ensure that these are atomic.  We may loop
20  * to ensure that the update happens.
21  *
22  * NOTE: these functions do *not* follow the PCS and must explicitly
23  * save any clobbered registers other than x0 (regardless of return
24  * value).  This is achieved through -fcall-saved-* compiler flags for
25  * this file, which unfortunately don't work on a per-function basis
26  * (the optimize attribute silently ignores these options).
27  */
28
29 #define ATOMIC_OP(op, asm_op)                                           \
30 __LL_SC_INLINE void                                                     \
31 __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v))                    \
32 {                                                                       \
33         unsigned long tmp;                                              \
34         int result;                                                     \
35                                                                         \
36         asm volatile("// atomic_" #op "\n"                              \
37 "       prfm    pstl1strm, %2\n"                                        \
38 "1:     ldxr    %w0, %2\n"                                              \
39 "       " #asm_op "     %w0, %w0, %w3\n"                                \
40 "       stxr    %w1, %w0, %2\n"                                         \
41 "       cbnz    %w1, 1b"                                                \
42         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
43         : "Ir" (i));                                                    \
44 }                                                                       \
45 __LL_SC_EXPORT(arch_atomic_##op);
46
47 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)            \
48 __LL_SC_INLINE int                                                      \
49 __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v))     \
50 {                                                                       \
51         unsigned long tmp;                                              \
52         int result;                                                     \
53                                                                         \
54         asm volatile("// atomic_" #op "_return" #name "\n"              \
55 "       prfm    pstl1strm, %2\n"                                        \
56 "1:     ld" #acq "xr    %w0, %2\n"                                      \
57 "       " #asm_op "     %w0, %w0, %w3\n"                                \
58 "       st" #rel "xr    %w1, %w0, %2\n"                                 \
59 "       cbnz    %w1, 1b\n"                                              \
60 "       " #mb                                                           \
61         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
62         : "Ir" (i)                                                      \
63         : cl);                                                          \
64                                                                         \
65         return result;                                                  \
66 }                                                                       \
67 __LL_SC_EXPORT(arch_atomic_##op##_return##name);
68
69 #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)             \
70 __LL_SC_INLINE int                                                      \
71 __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v))        \
72 {                                                                       \
73         unsigned long tmp;                                              \
74         int val, result;                                                \
75                                                                         \
76         asm volatile("// atomic_fetch_" #op #name "\n"                  \
77 "       prfm    pstl1strm, %3\n"                                        \
78 "1:     ld" #acq "xr    %w0, %3\n"                                      \
79 "       " #asm_op "     %w1, %w0, %w4\n"                                \
80 "       st" #rel "xr    %w2, %w1, %3\n"                                 \
81 "       cbnz    %w2, 1b\n"                                              \
82 "       " #mb                                                           \
83         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
84         : "Ir" (i)                                                      \
85         : cl);                                                          \
86                                                                         \
87         return result;                                                  \
88 }                                                                       \
89 __LL_SC_EXPORT(arch_atomic_fetch_##op##name);
90
91 #define ATOMIC_OPS(...)                                                 \
92         ATOMIC_OP(__VA_ARGS__)                                          \
93         ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)\
94         ATOMIC_OP_RETURN(_relaxed,        ,  ,  ,         , __VA_ARGS__)\
95         ATOMIC_OP_RETURN(_acquire,        , a,  , "memory", __VA_ARGS__)\
96         ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)\
97         ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
98         ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
99         ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
100         ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
101
102 ATOMIC_OPS(add, add)
103 ATOMIC_OPS(sub, sub)
104
105 #undef ATOMIC_OPS
106 #define ATOMIC_OPS(...)                                                 \
107         ATOMIC_OP(__VA_ARGS__)                                          \
108         ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
109         ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
110         ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
111         ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
112
113 ATOMIC_OPS(and, and)
114 ATOMIC_OPS(andnot, bic)
115 ATOMIC_OPS(or, orr)
116 ATOMIC_OPS(xor, eor)
117
118 #undef ATOMIC_OPS
119 #undef ATOMIC_FETCH_OP
120 #undef ATOMIC_OP_RETURN
121 #undef ATOMIC_OP
122
123 #define ATOMIC64_OP(op, asm_op)                                         \
124 __LL_SC_INLINE void                                                     \
125 __LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v))               \
126 {                                                                       \
127         long result;                                                    \
128         unsigned long tmp;                                              \
129                                                                         \
130         asm volatile("// atomic64_" #op "\n"                            \
131 "       prfm    pstl1strm, %2\n"                                        \
132 "1:     ldxr    %0, %2\n"                                               \
133 "       " #asm_op "     %0, %0, %3\n"                                   \
134 "       stxr    %w1, %0, %2\n"                                          \
135 "       cbnz    %w1, 1b"                                                \
136         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
137         : "Ir" (i));                                                    \
138 }                                                                       \
139 __LL_SC_EXPORT(arch_atomic64_##op);
140
141 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)          \
142 __LL_SC_INLINE long                                                     \
143 __LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
144 {                                                                       \
145         long result;                                                    \
146         unsigned long tmp;                                              \
147                                                                         \
148         asm volatile("// atomic64_" #op "_return" #name "\n"            \
149 "       prfm    pstl1strm, %2\n"                                        \
150 "1:     ld" #acq "xr    %0, %2\n"                                       \
151 "       " #asm_op "     %0, %0, %3\n"                                   \
152 "       st" #rel "xr    %w1, %0, %2\n"                                  \
153 "       cbnz    %w1, 1b\n"                                              \
154 "       " #mb                                                           \
155         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
156         : "Ir" (i)                                                      \
157         : cl);                                                          \
158                                                                         \
159         return result;                                                  \
160 }                                                                       \
161 __LL_SC_EXPORT(arch_atomic64_##op##_return##name);
162
163 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)           \
164 __LL_SC_INLINE long                                                     \
165 __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v))   \
166 {                                                                       \
167         long result, val;                                               \
168         unsigned long tmp;                                              \
169                                                                         \
170         asm volatile("// atomic64_fetch_" #op #name "\n"                \
171 "       prfm    pstl1strm, %3\n"                                        \
172 "1:     ld" #acq "xr    %0, %3\n"                                       \
173 "       " #asm_op "     %1, %0, %4\n"                                   \
174 "       st" #rel "xr    %w2, %1, %3\n"                                  \
175 "       cbnz    %w2, 1b\n"                                              \
176 "       " #mb                                                           \
177         : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
178         : "Ir" (i)                                                      \
179         : cl);                                                          \
180                                                                         \
181         return result;                                                  \
182 }                                                                       \
183 __LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
184
185 #define ATOMIC64_OPS(...)                                               \
186         ATOMIC64_OP(__VA_ARGS__)                                        \
187         ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)      \
188         ATOMIC64_OP_RETURN(_relaxed,,  ,  ,         , __VA_ARGS__)      \
189         ATOMIC64_OP_RETURN(_acquire,, a,  , "memory", __VA_ARGS__)      \
190         ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)      \
191         ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)      \
192         ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
193         ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
194         ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
195
196 ATOMIC64_OPS(add, add)
197 ATOMIC64_OPS(sub, sub)
198
199 #undef ATOMIC64_OPS
200 #define ATOMIC64_OPS(...)                                               \
201         ATOMIC64_OP(__VA_ARGS__)                                        \
202         ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)      \
203         ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
204         ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
205         ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
206
207 ATOMIC64_OPS(and, and)
208 ATOMIC64_OPS(andnot, bic)
209 ATOMIC64_OPS(or, orr)
210 ATOMIC64_OPS(xor, eor)
211
212 #undef ATOMIC64_OPS
213 #undef ATOMIC64_FETCH_OP
214 #undef ATOMIC64_OP_RETURN
215 #undef ATOMIC64_OP
216
217 __LL_SC_INLINE long
218 __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
219 {
220         long result;
221         unsigned long tmp;
222
223         asm volatile("// atomic64_dec_if_positive\n"
224 "       prfm    pstl1strm, %2\n"
225 "1:     ldxr    %0, %2\n"
226 "       subs    %0, %0, #1\n"
227 "       b.lt    2f\n"
228 "       stlxr   %w1, %0, %2\n"
229 "       cbnz    %w1, 1b\n"
230 "       dmb     ish\n"
231 "2:"
232         : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
233         :
234         : "cc", "memory");
235
236         return result;
237 }
238 __LL_SC_EXPORT(arch_atomic64_dec_if_positive);
239
240 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl)              \
241 __LL_SC_INLINE u##sz                                                    \
242 __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr,            \
243                                          unsigned long old,             \
244                                          u##sz new))                    \
245 {                                                                       \
246         unsigned long tmp;                                              \
247         u##sz oldval;                                                   \
248                                                                         \
249         /*                                                              \
250          * Sub-word sizes require explicit casting so that the compare  \
251          * part of the cmpxchg doesn't end up interpreting non-zero     \
252          * upper bits of the register containing "old".                 \
253          */                                                             \
254         if (sz < 32)                                                    \
255                 old = (u##sz)old;                                       \
256                                                                         \
257         asm volatile(                                                   \
258         "       prfm    pstl1strm, %[v]\n"                              \
259         "1:     ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n"          \
260         "       eor     %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"  \
261         "       cbnz    %" #w "[tmp], 2f\n"                             \
262         "       st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n"    \
263         "       cbnz    %w[tmp], 1b\n"                                  \
264         "       " #mb "\n"                                              \
265         "2:"                                                            \
266         : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),                   \
267           [v] "+Q" (*(u##sz *)ptr)                                      \
268         : [old] "Kr" (old), [new] "r" (new)                             \
269         : cl);                                                          \
270                                                                         \
271         return oldval;                                                  \
272 }                                                                       \
273 __LL_SC_EXPORT(__cmpxchg_case_##name##sz);
274
275 __CMPXCHG_CASE(w, b,     ,  8,        ,  ,  ,         )
276 __CMPXCHG_CASE(w, h,     , 16,        ,  ,  ,         )
277 __CMPXCHG_CASE(w,  ,     , 32,        ,  ,  ,         )
278 __CMPXCHG_CASE( ,  ,     , 64,        ,  ,  ,         )
279 __CMPXCHG_CASE(w, b, acq_,  8,        , a,  , "memory")
280 __CMPXCHG_CASE(w, h, acq_, 16,        , a,  , "memory")
281 __CMPXCHG_CASE(w,  , acq_, 32,        , a,  , "memory")
282 __CMPXCHG_CASE( ,  , acq_, 64,        , a,  , "memory")
283 __CMPXCHG_CASE(w, b, rel_,  8,        ,  , l, "memory")
284 __CMPXCHG_CASE(w, h, rel_, 16,        ,  , l, "memory")
285 __CMPXCHG_CASE(w,  , rel_, 32,        ,  , l, "memory")
286 __CMPXCHG_CASE( ,  , rel_, 64,        ,  , l, "memory")
287 __CMPXCHG_CASE(w, b,  mb_,  8, dmb ish,  , l, "memory")
288 __CMPXCHG_CASE(w, h,  mb_, 16, dmb ish,  , l, "memory")
289 __CMPXCHG_CASE(w,  ,  mb_, 32, dmb ish,  , l, "memory")
290 __CMPXCHG_CASE( ,  ,  mb_, 64, dmb ish,  , l, "memory")
291
292 #undef __CMPXCHG_CASE
293
294 #define __CMPXCHG_DBL(name, mb, rel, cl)                                \
295 __LL_SC_INLINE long                                                     \
296 __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1,               \
297                                       unsigned long old2,               \
298                                       unsigned long new1,               \
299                                       unsigned long new2,               \
300                                       volatile void *ptr))              \
301 {                                                                       \
302         unsigned long tmp, ret;                                         \
303                                                                         \
304         asm volatile("// __cmpxchg_double" #name "\n"                   \
305         "       prfm    pstl1strm, %2\n"                                \
306         "1:     ldxp    %0, %1, %2\n"                                   \
307         "       eor     %0, %0, %3\n"                                   \
308         "       eor     %1, %1, %4\n"                                   \
309         "       orr     %1, %0, %1\n"                                   \
310         "       cbnz    %1, 2f\n"                                       \
311         "       st" #rel "xp    %w0, %5, %6, %2\n"                      \
312         "       cbnz    %w0, 1b\n"                                      \
313         "       " #mb "\n"                                              \
314         "2:"                                                            \
315         : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
316         : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
317         : cl);                                                          \
318                                                                         \
319         return ret;                                                     \
320 }                                                                       \
321 __LL_SC_EXPORT(__cmpxchg_double##name);
322
323 __CMPXCHG_DBL(   ,        ,  ,         )
324 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
325
326 #undef __CMPXCHG_DBL
327
328 #endif  /* __ASM_ATOMIC_LL_SC_H */