1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * membarrier system call
10 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
11 * except MEMBARRIER_CMD_QUERY.
13 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
14 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
15 (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
16 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
18 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
21 #define MEMBARRIER_CMD_BITMASK \
22 (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
23 | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
24 | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
25 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
26 | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
28 static void ipi_mb(void *info)
30 smp_mb(); /* IPIs should be serializing but paranoid. */
33 static void ipi_sync_rq_state(void *info)
35 struct mm_struct *mm = (struct mm_struct *) info;
37 if (current->mm != mm)
39 this_cpu_write(runqueues.membarrier_state,
40 atomic_read(&mm->membarrier_state));
42 * Issue a memory barrier after setting
43 * MEMBARRIER_STATE_GLOBAL_EXPEDITED in the current runqueue to
44 * guarantee that no memory access following registration is reordered
45 * before registration.
50 void membarrier_exec_mmap(struct mm_struct *mm)
53 * Issue a memory barrier before clearing membarrier_state to
54 * guarantee that no memory access prior to exec is reordered after
55 * clearing this state.
58 atomic_set(&mm->membarrier_state, 0);
60 * Keep the runqueue membarrier_state in sync with this mm
63 this_cpu_write(runqueues.membarrier_state, 0);
66 static int membarrier_global_expedited(void)
69 bool fallback = false;
70 cpumask_var_t tmpmask;
72 if (num_online_cpus() == 1)
76 * Matches memory barriers around rq->curr modification in
79 smp_mb(); /* system call entry is not a mb. */
82 * Expedited membarrier commands guarantee that they won't
83 * block, hence the GFP_NOWAIT allocation flag and fallback
86 if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
87 /* Fallback for OOM. */
93 for_each_online_cpu(cpu) {
94 struct task_struct *p;
97 * Skipping the current CPU is OK even through we can be
98 * migrated at any point. The current CPU, at the point
99 * where we read raw_smp_processor_id(), is ensured to
100 * be in program order with respect to the caller
101 * thread. Therefore, we can skip this CPU from the
104 if (cpu == raw_smp_processor_id())
107 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) &
108 MEMBARRIER_STATE_GLOBAL_EXPEDITED))
112 * Skip the CPU if it runs a kernel thread. The scheduler
113 * leaves the prior task mm in place as an optimization when
114 * scheduling a kthread.
116 p = rcu_dereference(cpu_rq(cpu)->curr);
117 if (p->flags & PF_KTHREAD)
121 __cpumask_set_cpu(cpu, tmpmask);
123 smp_call_function_single(cpu, ipi_mb, NULL, 1);
128 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
130 free_cpumask_var(tmpmask);
135 * Memory barrier on the caller thread _after_ we finished
136 * waiting for the last IPI. Matches memory barriers around
137 * rq->curr modification in scheduler.
139 smp_mb(); /* exit from system call is not a mb */
143 static int membarrier_private_expedited(int flags)
146 bool fallback = false;
147 cpumask_var_t tmpmask;
148 struct mm_struct *mm = current->mm;
150 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
151 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
153 if (!(atomic_read(&mm->membarrier_state) &
154 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
157 if (!(atomic_read(&mm->membarrier_state) &
158 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
162 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)
166 * Matches memory barriers around rq->curr modification in
169 smp_mb(); /* system call entry is not a mb. */
172 * Expedited membarrier commands guarantee that they won't
173 * block, hence the GFP_NOWAIT allocation flag and fallback
176 if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
177 /* Fallback for OOM. */
183 for_each_online_cpu(cpu) {
184 struct task_struct *p;
187 * Skipping the current CPU is OK even through we can be
188 * migrated at any point. The current CPU, at the point
189 * where we read raw_smp_processor_id(), is ensured to
190 * be in program order with respect to the caller
191 * thread. Therefore, we can skip this CPU from the
194 if (cpu == raw_smp_processor_id())
197 p = rcu_dereference(cpu_rq(cpu)->curr);
198 if (p && p->mm == mm) {
200 __cpumask_set_cpu(cpu, tmpmask);
202 smp_call_function_single(cpu, ipi_mb, NULL, 1);
208 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
210 free_cpumask_var(tmpmask);
215 * Memory barrier on the caller thread _after_ we finished
216 * waiting for the last IPI. Matches memory barriers around
217 * rq->curr modification in scheduler.
219 smp_mb(); /* exit from system call is not a mb */
224 static int sync_runqueues_membarrier_state(struct mm_struct *mm)
226 int membarrier_state = atomic_read(&mm->membarrier_state);
227 cpumask_var_t tmpmask;
230 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) {
231 this_cpu_write(runqueues.membarrier_state, membarrier_state);
234 * For single mm user, we can simply issue a memory barrier
235 * after setting MEMBARRIER_STATE_GLOBAL_EXPEDITED in the
236 * mm and in the current runqueue to guarantee that no memory
237 * access following registration is reordered before
244 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
248 * For mm with multiple users, we need to ensure all future
249 * scheduler executions will observe @mm's new membarrier
255 * For each cpu runqueue, if the task's mm match @mm, ensure that all
256 * @mm's membarrier state set bits are also set in in the runqueue's
257 * membarrier state. This ensures that a runqueue scheduling
258 * between threads which are users of @mm has its membarrier state
263 for_each_online_cpu(cpu) {
264 struct rq *rq = cpu_rq(cpu);
265 struct task_struct *p;
267 p = rcu_dereference(&rq->curr);
268 if (p && p->mm == mm)
269 __cpumask_set_cpu(cpu, tmpmask);
274 smp_call_function_many(tmpmask, ipi_sync_rq_state, mm, 1);
277 free_cpumask_var(tmpmask);
283 static int membarrier_register_global_expedited(void)
285 struct task_struct *p = current;
286 struct mm_struct *mm = p->mm;
289 if (atomic_read(&mm->membarrier_state) &
290 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
292 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
293 ret = sync_runqueues_membarrier_state(mm);
296 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
297 &mm->membarrier_state);
302 static int membarrier_register_private_expedited(int flags)
304 struct task_struct *p = current;
305 struct mm_struct *mm = p->mm;
306 int ready_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
307 set_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED,
310 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
311 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
314 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
318 * We need to consider threads belonging to different thread
319 * groups, which use the same mm. (CLONE_VM but not
322 if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state)
324 if (flags & MEMBARRIER_FLAG_SYNC_CORE)
325 set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE;
326 atomic_or(set_state, &mm->membarrier_state);
327 ret = sync_runqueues_membarrier_state(mm);
330 atomic_or(ready_state, &mm->membarrier_state);
336 * sys_membarrier - issue memory barriers on a set of threads
337 * @cmd: Takes command values defined in enum membarrier_cmd.
338 * @flags: Currently needs to be 0. For future extensions.
340 * If this system call is not implemented, -ENOSYS is returned. If the
341 * command specified does not exist, not available on the running
342 * kernel, or if the command argument is invalid, this system call
343 * returns -EINVAL. For a given command, with flags argument set to 0,
344 * if this system call returns -ENOSYS or -EINVAL, it is guaranteed to
345 * always return the same value until reboot. In addition, it can return
346 * -ENOMEM if there is not enough memory available to perform the system
349 * All memory accesses performed in program order from each targeted thread
350 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
351 * the semantic "barrier()" to represent a compiler barrier forcing memory
352 * accesses to be performed in program order across the barrier, and
353 * smp_mb() to represent explicit memory barriers forcing full memory
354 * ordering across the barrier, we have the following ordering table for
355 * each pair of barrier(), sys_membarrier() and smp_mb():
357 * The pair ordering is detailed as (O: ordered, X: not ordered):
359 * barrier() smp_mb() sys_membarrier()
362 * sys_membarrier() O O O
364 SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
369 case MEMBARRIER_CMD_QUERY:
371 int cmd_mask = MEMBARRIER_CMD_BITMASK;
373 if (tick_nohz_full_enabled())
374 cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
377 case MEMBARRIER_CMD_GLOBAL:
378 /* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
379 if (tick_nohz_full_enabled())
381 if (num_online_cpus() > 1)
384 case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
385 return membarrier_global_expedited();
386 case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
387 return membarrier_register_global_expedited();
388 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
389 return membarrier_private_expedited(0);
390 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
391 return membarrier_register_private_expedited(0);
392 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
393 return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
394 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
395 return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);