1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * membarrier system call
10 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
11 * except MEMBARRIER_CMD_QUERY.
13 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
14 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
15 (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
16 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
18 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
21 #define MEMBARRIER_CMD_BITMASK \
22 (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
23 | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
24 | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
25 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
26 | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
28 static void ipi_mb(void *info)
30 smp_mb(); /* IPIs should be serializing but paranoid. */
33 static int membarrier_global_expedited(void)
36 bool fallback = false;
37 cpumask_var_t tmpmask;
39 if (num_online_cpus() == 1)
43 * Matches memory barriers around rq->curr modification in
46 smp_mb(); /* system call entry is not a mb. */
49 * Expedited membarrier commands guarantee that they won't
50 * block, hence the GFP_NOWAIT allocation flag and fallback
53 if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
54 /* Fallback for OOM. */
59 for_each_online_cpu(cpu) {
60 struct task_struct *p;
63 * Skipping the current CPU is OK even through we can be
64 * migrated at any point. The current CPU, at the point
65 * where we read raw_smp_processor_id(), is ensured to
66 * be in program order with respect to the caller
67 * thread. Therefore, we can skip this CPU from the
70 if (cpu == raw_smp_processor_id())
74 p = task_rcu_dereference(&cpu_rq(cpu)->curr);
75 if (p && p->mm && (atomic_read(&p->mm->membarrier_state) &
76 MEMBARRIER_STATE_GLOBAL_EXPEDITED)) {
78 __cpumask_set_cpu(cpu, tmpmask);
80 smp_call_function_single(cpu, ipi_mb, NULL, 1);
86 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
88 free_cpumask_var(tmpmask);
93 * Memory barrier on the caller thread _after_ we finished
94 * waiting for the last IPI. Matches memory barriers around
95 * rq->curr modification in scheduler.
97 smp_mb(); /* exit from system call is not a mb */
101 static int membarrier_private_expedited(int flags)
104 bool fallback = false;
105 cpumask_var_t tmpmask;
107 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
108 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
110 if (!(atomic_read(¤t->mm->membarrier_state) &
111 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
114 if (!(atomic_read(¤t->mm->membarrier_state) &
115 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
119 if (num_online_cpus() == 1)
123 * Matches memory barriers around rq->curr modification in
126 smp_mb(); /* system call entry is not a mb. */
129 * Expedited membarrier commands guarantee that they won't
130 * block, hence the GFP_NOWAIT allocation flag and fallback
133 if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
134 /* Fallback for OOM. */
139 for_each_online_cpu(cpu) {
140 struct task_struct *p;
143 * Skipping the current CPU is OK even through we can be
144 * migrated at any point. The current CPU, at the point
145 * where we read raw_smp_processor_id(), is ensured to
146 * be in program order with respect to the caller
147 * thread. Therefore, we can skip this CPU from the
150 if (cpu == raw_smp_processor_id())
153 p = task_rcu_dereference(&cpu_rq(cpu)->curr);
154 if (p && p->mm == current->mm) {
156 __cpumask_set_cpu(cpu, tmpmask);
158 smp_call_function_single(cpu, ipi_mb, NULL, 1);
164 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
166 free_cpumask_var(tmpmask);
171 * Memory barrier on the caller thread _after_ we finished
172 * waiting for the last IPI. Matches memory barriers around
173 * rq->curr modification in scheduler.
175 smp_mb(); /* exit from system call is not a mb */
180 static int membarrier_register_global_expedited(void)
182 struct task_struct *p = current;
183 struct mm_struct *mm = p->mm;
185 if (atomic_read(&mm->membarrier_state) &
186 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
188 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
189 if (atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1) {
191 * For single mm user, single threaded process, we can
192 * simply issue a memory barrier after setting
193 * MEMBARRIER_STATE_GLOBAL_EXPEDITED to guarantee that
194 * no memory access following registration is reordered
195 * before registration.
200 * For multi-mm user threads, we need to ensure all
201 * future scheduler executions will observe the new
202 * thread flag state for this mm.
206 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
207 &mm->membarrier_state);
212 static int membarrier_register_private_expedited(int flags)
214 struct task_struct *p = current;
215 struct mm_struct *mm = p->mm;
216 int state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY;
218 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
219 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
221 state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
225 * We need to consider threads belonging to different thread
226 * groups, which use the same mm. (CLONE_VM but not
229 if (atomic_read(&mm->membarrier_state) & state)
231 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
232 if (flags & MEMBARRIER_FLAG_SYNC_CORE)
233 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE,
234 &mm->membarrier_state);
235 if (!(atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1)) {
237 * Ensure all future scheduler executions will observe the
238 * new thread flag state for this process.
242 atomic_or(state, &mm->membarrier_state);
248 * sys_membarrier - issue memory barriers on a set of threads
249 * @cmd: Takes command values defined in enum membarrier_cmd.
250 * @flags: Currently needs to be 0. For future extensions.
252 * If this system call is not implemented, -ENOSYS is returned. If the
253 * command specified does not exist, not available on the running
254 * kernel, or if the command argument is invalid, this system call
255 * returns -EINVAL. For a given command, with flags argument set to 0,
256 * this system call is guaranteed to always return the same value until
259 * All memory accesses performed in program order from each targeted thread
260 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
261 * the semantic "barrier()" to represent a compiler barrier forcing memory
262 * accesses to be performed in program order across the barrier, and
263 * smp_mb() to represent explicit memory barriers forcing full memory
264 * ordering across the barrier, we have the following ordering table for
265 * each pair of barrier(), sys_membarrier() and smp_mb():
267 * The pair ordering is detailed as (O: ordered, X: not ordered):
269 * barrier() smp_mb() sys_membarrier()
272 * sys_membarrier() O O O
274 SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
279 case MEMBARRIER_CMD_QUERY:
281 int cmd_mask = MEMBARRIER_CMD_BITMASK;
283 if (tick_nohz_full_enabled())
284 cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
287 case MEMBARRIER_CMD_GLOBAL:
288 /* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
289 if (tick_nohz_full_enabled())
291 if (num_online_cpus() > 1)
294 case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
295 return membarrier_global_expedited();
296 case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
297 return membarrier_register_global_expedited();
298 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
299 return membarrier_private_expedited(0);
300 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
301 return membarrier_register_private_expedited(0);
302 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
303 return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
304 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
305 return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);