1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * membarrier system call
10 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
11 * except MEMBARRIER_CMD_QUERY.
13 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
14 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
15 (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
16 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
18 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
21 #define MEMBARRIER_CMD_BITMASK \
22 (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
23 | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
24 | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
25 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
26 | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
28 static void ipi_mb(void *info)
30 smp_mb(); /* IPIs should be serializing but paranoid. */
33 static void ipi_sync_rq_state(void *info)
35 struct mm_struct *mm = (struct mm_struct *) info;
37 if (current->mm != mm)
39 this_cpu_write(runqueues.membarrier_state,
40 atomic_read(&mm->membarrier_state));
42 * Issue a memory barrier after setting
43 * MEMBARRIER_STATE_GLOBAL_EXPEDITED in the current runqueue to
44 * guarantee that no memory access following registration is reordered
45 * before registration.
50 void membarrier_exec_mmap(struct mm_struct *mm)
53 * Issue a memory barrier before clearing membarrier_state to
54 * guarantee that no memory access prior to exec is reordered after
55 * clearing this state.
58 atomic_set(&mm->membarrier_state, 0);
60 * Keep the runqueue membarrier_state in sync with this mm
63 this_cpu_write(runqueues.membarrier_state, 0);
66 static int membarrier_global_expedited(void)
69 cpumask_var_t tmpmask;
71 if (num_online_cpus() == 1)
75 * Matches memory barriers around rq->curr modification in
78 smp_mb(); /* system call entry is not a mb. */
80 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
85 for_each_online_cpu(cpu) {
86 struct task_struct *p;
89 * Skipping the current CPU is OK even through we can be
90 * migrated at any point. The current CPU, at the point
91 * where we read raw_smp_processor_id(), is ensured to
92 * be in program order with respect to the caller
93 * thread. Therefore, we can skip this CPU from the
96 if (cpu == raw_smp_processor_id())
99 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) &
100 MEMBARRIER_STATE_GLOBAL_EXPEDITED))
104 * Skip the CPU if it runs a kernel thread. The scheduler
105 * leaves the prior task mm in place as an optimization when
106 * scheduling a kthread.
108 p = rcu_dereference(cpu_rq(cpu)->curr);
109 if (p->flags & PF_KTHREAD)
112 __cpumask_set_cpu(cpu, tmpmask);
117 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
120 free_cpumask_var(tmpmask);
124 * Memory barrier on the caller thread _after_ we finished
125 * waiting for the last IPI. Matches memory barriers around
126 * rq->curr modification in scheduler.
128 smp_mb(); /* exit from system call is not a mb */
132 static int membarrier_private_expedited(int flags)
135 cpumask_var_t tmpmask;
136 struct mm_struct *mm = current->mm;
138 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
139 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
141 if (!(atomic_read(&mm->membarrier_state) &
142 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
145 if (!(atomic_read(&mm->membarrier_state) &
146 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
150 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)
154 * Matches memory barriers around rq->curr modification in
157 smp_mb(); /* system call entry is not a mb. */
159 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
164 for_each_online_cpu(cpu) {
165 struct task_struct *p;
168 * Skipping the current CPU is OK even through we can be
169 * migrated at any point. The current CPU, at the point
170 * where we read raw_smp_processor_id(), is ensured to
171 * be in program order with respect to the caller
172 * thread. Therefore, we can skip this CPU from the
175 if (cpu == raw_smp_processor_id())
178 p = rcu_dereference(cpu_rq(cpu)->curr);
179 if (p && p->mm == mm)
180 __cpumask_set_cpu(cpu, tmpmask);
185 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
188 free_cpumask_var(tmpmask);
192 * Memory barrier on the caller thread _after_ we finished
193 * waiting for the last IPI. Matches memory barriers around
194 * rq->curr modification in scheduler.
196 smp_mb(); /* exit from system call is not a mb */
201 static int sync_runqueues_membarrier_state(struct mm_struct *mm)
203 int membarrier_state = atomic_read(&mm->membarrier_state);
204 cpumask_var_t tmpmask;
207 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) {
208 this_cpu_write(runqueues.membarrier_state, membarrier_state);
211 * For single mm user, we can simply issue a memory barrier
212 * after setting MEMBARRIER_STATE_GLOBAL_EXPEDITED in the
213 * mm and in the current runqueue to guarantee that no memory
214 * access following registration is reordered before
221 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
225 * For mm with multiple users, we need to ensure all future
226 * scheduler executions will observe @mm's new membarrier
232 * For each cpu runqueue, if the task's mm match @mm, ensure that all
233 * @mm's membarrier state set bits are also set in in the runqueue's
234 * membarrier state. This ensures that a runqueue scheduling
235 * between threads which are users of @mm has its membarrier state
240 for_each_online_cpu(cpu) {
241 struct rq *rq = cpu_rq(cpu);
242 struct task_struct *p;
244 p = rcu_dereference(rq->curr);
245 if (p && p->mm == mm)
246 __cpumask_set_cpu(cpu, tmpmask);
251 smp_call_function_many(tmpmask, ipi_sync_rq_state, mm, 1);
254 free_cpumask_var(tmpmask);
260 static int membarrier_register_global_expedited(void)
262 struct task_struct *p = current;
263 struct mm_struct *mm = p->mm;
266 if (atomic_read(&mm->membarrier_state) &
267 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
269 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
270 ret = sync_runqueues_membarrier_state(mm);
273 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
274 &mm->membarrier_state);
279 static int membarrier_register_private_expedited(int flags)
281 struct task_struct *p = current;
282 struct mm_struct *mm = p->mm;
283 int ready_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
284 set_state = MEMBARRIER_STATE_PRIVATE_EXPEDITED,
287 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
288 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
291 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
295 * We need to consider threads belonging to different thread
296 * groups, which use the same mm. (CLONE_VM but not
299 if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state)
301 if (flags & MEMBARRIER_FLAG_SYNC_CORE)
302 set_state |= MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE;
303 atomic_or(set_state, &mm->membarrier_state);
304 ret = sync_runqueues_membarrier_state(mm);
307 atomic_or(ready_state, &mm->membarrier_state);
313 * sys_membarrier - issue memory barriers on a set of threads
314 * @cmd: Takes command values defined in enum membarrier_cmd.
315 * @flags: Currently needs to be 0. For future extensions.
317 * If this system call is not implemented, -ENOSYS is returned. If the
318 * command specified does not exist, not available on the running
319 * kernel, or if the command argument is invalid, this system call
320 * returns -EINVAL. For a given command, with flags argument set to 0,
321 * if this system call returns -ENOSYS or -EINVAL, it is guaranteed to
322 * always return the same value until reboot. In addition, it can return
323 * -ENOMEM if there is not enough memory available to perform the system
326 * All memory accesses performed in program order from each targeted thread
327 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
328 * the semantic "barrier()" to represent a compiler barrier forcing memory
329 * accesses to be performed in program order across the barrier, and
330 * smp_mb() to represent explicit memory barriers forcing full memory
331 * ordering across the barrier, we have the following ordering table for
332 * each pair of barrier(), sys_membarrier() and smp_mb():
334 * The pair ordering is detailed as (O: ordered, X: not ordered):
336 * barrier() smp_mb() sys_membarrier()
339 * sys_membarrier() O O O
341 SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
346 case MEMBARRIER_CMD_QUERY:
348 int cmd_mask = MEMBARRIER_CMD_BITMASK;
350 if (tick_nohz_full_enabled())
351 cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
354 case MEMBARRIER_CMD_GLOBAL:
355 /* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
356 if (tick_nohz_full_enabled())
358 if (num_online_cpus() > 1)
361 case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
362 return membarrier_global_expedited();
363 case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
364 return membarrier_register_global_expedited();
365 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
366 return membarrier_private_expedited(0);
367 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
368 return membarrier_register_private_expedited(0);
369 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
370 return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
371 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
372 return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);