2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012
21 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com>
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
29 #include <linux/export.h>
30 #include <linux/mutex.h>
31 #include <linux/percpu.h>
32 #include <linux/preempt.h>
33 #include <linux/rcupdate.h>
34 #include <linux/sched.h>
35 #include <linux/smp.h>
36 #include <linux/delay.h>
37 #include <linux/srcu.h>
42 * Initialize an rcu_batch structure to empty.
44 static inline void rcu_batch_init(struct rcu_batch *b)
51 * Enqueue a callback onto the tail of the specified rcu_batch structure.
53 static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
56 b->tail = &head->next;
60 * Is the specified rcu_batch structure empty?
62 static inline bool rcu_batch_empty(struct rcu_batch *b)
64 return b->tail == &b->head;
68 * Remove the callback at the head of the specified rcu_batch structure
69 * and return a pointer to it, or return NULL if the structure is empty.
71 static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
73 struct rcu_head *head;
75 if (rcu_batch_empty(b))
80 if (b->tail == &head->next)
87 * Move all callbacks from the rcu_batch structure specified by "from" to
88 * the structure specified by "to".
90 static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
92 if (!rcu_batch_empty(from)) {
93 *to->tail = from->head;
94 to->tail = from->tail;
99 static int init_srcu_struct_fields(struct srcu_struct *sp)
102 spin_lock_init(&sp->queue_lock);
104 rcu_batch_init(&sp->batch_queue);
105 rcu_batch_init(&sp->batch_check0);
106 rcu_batch_init(&sp->batch_check1);
107 rcu_batch_init(&sp->batch_done);
108 INIT_DELAYED_WORK(&sp->work, process_srcu);
109 sp->per_cpu_ref = alloc_percpu(struct srcu_array);
110 return sp->per_cpu_ref ? 0 : -ENOMEM;
113 #ifdef CONFIG_DEBUG_LOCK_ALLOC
115 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
116 struct lock_class_key *key)
118 /* Don't re-initialize a lock while it is held. */
119 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
120 lockdep_init_map(&sp->dep_map, name, key, 0);
121 return init_srcu_struct_fields(sp);
123 EXPORT_SYMBOL_GPL(__init_srcu_struct);
125 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
128 * init_srcu_struct - initialize a sleep-RCU structure
129 * @sp: structure to initialize.
131 * Must invoke this on a given srcu_struct before passing that srcu_struct
132 * to any other function. Each srcu_struct represents a separate domain
133 * of SRCU protection.
135 int init_srcu_struct(struct srcu_struct *sp)
137 return init_srcu_struct_fields(sp);
139 EXPORT_SYMBOL_GPL(init_srcu_struct);
141 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
144 * Returns approximate total of the readers' ->lock_count[] values for the
145 * rank of per-CPU counters specified by idx.
147 static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
150 unsigned long sum = 0;
152 for_each_possible_cpu(cpu) {
153 struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
155 sum += READ_ONCE(cpuc->lock_count[idx]);
161 * Returns approximate total of the readers' ->unlock_count[] values for the
162 * rank of per-CPU counters specified by idx.
164 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
167 unsigned long sum = 0;
169 for_each_possible_cpu(cpu) {
170 struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
172 sum += READ_ONCE(cpuc->unlock_count[idx]);
178 * Return true if the number of pre-existing readers is determined to
181 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
183 unsigned long unlocks;
185 unlocks = srcu_readers_unlock_idx(sp, idx);
188 * Make sure that a lock is always counted if the corresponding unlock
189 * is counted. Needs to be a smp_mb() as the read side may contain a
190 * read from a variable that is written to before the synchronize_srcu()
191 * in the write side. In this case smp_mb()s A and B act like the store
194 * This smp_mb() also pairs with smp_mb() C to prevent accesses after the
195 * synchronize_srcu() from being executed before the grace period ends.
200 * If the locks are the same as the unlocks, then there must have
201 * been no readers on this index at some time in between. This does not
202 * mean that there are no more readers, as one could have read the
203 * current index but not have incremented the lock counter yet.
205 * Possible bug: There is no guarantee that there haven't been ULONG_MAX
206 * increments of ->lock_count[] since the unlocks were counted, meaning
207 * that this could return true even if there are still active readers.
208 * Since there are no memory barriers around srcu_flip(), the CPU is not
209 * required to increment ->completed before running
210 * srcu_readers_unlock_idx(), which means that there could be an
211 * arbitrarily large number of critical sections that execute after
212 * srcu_readers_unlock_idx() but use the old value of ->completed.
214 return srcu_readers_lock_idx(sp, idx) == unlocks;
218 * srcu_readers_active - returns true if there are readers. and false
220 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
222 * Note that this is not an atomic primitive, and can therefore suffer
223 * severe errors when invoked on an active srcu_struct. That said, it
224 * can be useful as an error check at cleanup time.
226 static bool srcu_readers_active(struct srcu_struct *sp)
229 unsigned long sum = 0;
231 for_each_possible_cpu(cpu) {
232 struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
234 sum += READ_ONCE(cpuc->lock_count[0]);
235 sum += READ_ONCE(cpuc->lock_count[1]);
236 sum -= READ_ONCE(cpuc->unlock_count[0]);
237 sum -= READ_ONCE(cpuc->unlock_count[1]);
243 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
244 * @sp: structure to clean up.
246 * Must invoke this after you are finished using a given srcu_struct that
247 * was initialized via init_srcu_struct(), else you leak memory.
249 void cleanup_srcu_struct(struct srcu_struct *sp)
251 if (WARN_ON(srcu_readers_active(sp)))
252 return; /* Leakage unless caller handles error. */
253 free_percpu(sp->per_cpu_ref);
254 sp->per_cpu_ref = NULL;
256 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
259 * Counts the new reader in the appropriate per-CPU element of the
260 * srcu_struct. Must be called from process context.
261 * Returns an index that must be passed to the matching srcu_read_unlock().
263 int __srcu_read_lock(struct srcu_struct *sp)
267 idx = READ_ONCE(sp->completed) & 0x1;
268 __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
269 smp_mb(); /* B */ /* Avoid leaking the critical section. */
272 EXPORT_SYMBOL_GPL(__srcu_read_lock);
275 * Removes the count for the old reader from the appropriate per-CPU
276 * element of the srcu_struct. Note that this may well be a different
277 * CPU than that which was incremented by the corresponding srcu_read_lock().
278 * Must be called from process context.
280 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
282 smp_mb(); /* C */ /* Avoid leaking the critical section. */
283 this_cpu_inc(sp->per_cpu_ref->unlock_count[idx]);
285 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
288 * We use an adaptive strategy for synchronize_srcu() and especially for
289 * synchronize_srcu_expedited(). We spin for a fixed time period
290 * (defined below) to allow SRCU readers to exit their read-side critical
291 * sections. If there are still some readers after 10 microseconds,
292 * we repeatedly block for 1-millisecond time periods. This approach
293 * has done well in testing, so there is no need for a config parameter.
295 #define SRCU_RETRY_CHECK_DELAY 5
296 #define SYNCHRONIZE_SRCU_TRYCOUNT 2
297 #define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
300 * @@@ Wait until all pre-existing readers complete. Such readers
301 * will have used the index specified by "idx".
302 * the caller should ensures the ->completed is not changed while checking
303 * and idx = (->completed & 1) ^ 1
305 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
308 if (srcu_readers_active_idx_check(sp, idx))
312 udelay(SRCU_RETRY_CHECK_DELAY);
317 * Increment the ->completed counter so that future SRCU readers will
318 * use the other rank of the ->(un)lock_count[] arrays. This allows
319 * us to wait for pre-existing readers in a starvation-free manner.
321 static void srcu_flip(struct srcu_struct *sp)
327 * Enqueue an SRCU callback on the specified srcu_struct structure,
328 * initiating grace-period processing if it is not already running.
330 * Note that all CPUs must agree that the grace period extended beyond
331 * all pre-existing SRCU read-side critical section. On systems with
332 * more than one CPU, this means that when "func()" is invoked, each CPU
333 * is guaranteed to have executed a full memory barrier since the end of
334 * its last corresponding SRCU read-side critical section whose beginning
335 * preceded the call to call_rcu(). It also means that each CPU executing
336 * an SRCU read-side critical section that continues beyond the start of
337 * "func()" must have executed a memory barrier after the call_rcu()
338 * but before the beginning of that SRCU read-side critical section.
339 * Note that these guarantees include CPUs that are offline, idle, or
340 * executing in user mode, as well as CPUs that are executing in the kernel.
342 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
343 * resulting SRCU callback function "func()", then both CPU A and CPU
344 * B are guaranteed to execute a full memory barrier during the time
345 * interval between the call to call_rcu() and the invocation of "func()".
346 * This guarantee applies even if CPU A and CPU B are the same CPU (but
347 * again only if the system has more than one CPU).
349 * Of course, these guarantees apply only for invocations of call_srcu(),
350 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
351 * srcu_struct structure.
353 void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
360 spin_lock_irqsave(&sp->queue_lock, flags);
361 smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
362 rcu_batch_queue(&sp->batch_queue, head);
365 queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
367 spin_unlock_irqrestore(&sp->queue_lock, flags);
369 EXPORT_SYMBOL_GPL(call_srcu);
371 static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
372 static void srcu_reschedule(struct srcu_struct *sp);
375 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
377 static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
379 struct rcu_synchronize rcu;
380 struct rcu_head *head = &rcu.head;
383 RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
384 lock_is_held(&rcu_bh_lock_map) ||
385 lock_is_held(&rcu_lock_map) ||
386 lock_is_held(&rcu_sched_lock_map),
387 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
390 init_completion(&rcu.completion);
393 head->func = wakeme_after_rcu;
394 spin_lock_irq(&sp->queue_lock);
395 smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */
397 /* steal the processing owner */
399 rcu_batch_queue(&sp->batch_check0, head);
400 spin_unlock_irq(&sp->queue_lock);
402 srcu_advance_batches(sp, trycount);
403 if (!rcu_batch_empty(&sp->batch_done)) {
404 BUG_ON(sp->batch_done.head != head);
405 rcu_batch_dequeue(&sp->batch_done);
408 /* give the processing owner to work_struct */
411 rcu_batch_queue(&sp->batch_queue, head);
412 spin_unlock_irq(&sp->queue_lock);
416 wait_for_completion(&rcu.completion);
417 smp_mb(); /* Caller's later accesses after GP. */
423 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
424 * @sp: srcu_struct with which to synchronize.
426 * Wait for the count to drain to zero of both indexes. To avoid the
427 * possible starvation of synchronize_srcu(), it waits for the count of
428 * the index=((->completed & 1) ^ 1) to drain to zero at first,
429 * and then flip the completed and wait for the count of the other index.
431 * Can block; must be called from process context.
433 * Note that it is illegal to call synchronize_srcu() from the corresponding
434 * SRCU read-side critical section; doing so will result in deadlock.
435 * However, it is perfectly legal to call synchronize_srcu() on one
436 * srcu_struct from some other srcu_struct's read-side critical section,
437 * as long as the resulting graph of srcu_structs is acyclic.
439 * There are memory-ordering constraints implied by synchronize_srcu().
440 * On systems with more than one CPU, when synchronize_srcu() returns,
441 * each CPU is guaranteed to have executed a full memory barrier since
442 * the end of its last corresponding SRCU-sched read-side critical section
443 * whose beginning preceded the call to synchronize_srcu(). In addition,
444 * each CPU having an SRCU read-side critical section that extends beyond
445 * the return from synchronize_srcu() is guaranteed to have executed a
446 * full memory barrier after the beginning of synchronize_srcu() and before
447 * the beginning of that SRCU read-side critical section. Note that these
448 * guarantees include CPUs that are offline, idle, or executing in user mode,
449 * as well as CPUs that are executing in the kernel.
451 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
452 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
453 * to have executed a full memory barrier during the execution of
454 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
455 * are the same CPU, but again only if the system has more than one CPU.
457 * Of course, these memory-ordering guarantees apply only when
458 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
459 * passed the same srcu_struct structure.
461 void synchronize_srcu(struct srcu_struct *sp)
463 __synchronize_srcu(sp, (rcu_gp_is_expedited() && !rcu_gp_is_normal())
464 ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
465 : SYNCHRONIZE_SRCU_TRYCOUNT);
467 EXPORT_SYMBOL_GPL(synchronize_srcu);
470 * synchronize_srcu_expedited - Brute-force SRCU grace period
471 * @sp: srcu_struct with which to synchronize.
473 * Wait for an SRCU grace period to elapse, but be more aggressive about
474 * spinning rather than blocking when waiting.
476 * Note that synchronize_srcu_expedited() has the same deadlock and
477 * memory-ordering properties as does synchronize_srcu().
479 void synchronize_srcu_expedited(struct srcu_struct *sp)
481 __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
483 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
486 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
487 * @sp: srcu_struct on which to wait for in-flight callbacks.
489 void srcu_barrier(struct srcu_struct *sp)
491 synchronize_srcu(sp);
493 EXPORT_SYMBOL_GPL(srcu_barrier);
496 * srcu_batches_completed - return batches completed.
497 * @sp: srcu_struct on which to report batch completion.
499 * Report the number of batches, correlated with, but not necessarily
500 * precisely the same as, the number of grace periods that have elapsed.
502 unsigned long srcu_batches_completed(struct srcu_struct *sp)
504 return sp->completed;
506 EXPORT_SYMBOL_GPL(srcu_batches_completed);
508 #define SRCU_CALLBACK_BATCH 10
509 #define SRCU_INTERVAL 1
512 * Move any new SRCU callbacks to the first stage of the SRCU grace
515 static void srcu_collect_new(struct srcu_struct *sp)
517 if (!rcu_batch_empty(&sp->batch_queue)) {
518 spin_lock_irq(&sp->queue_lock);
519 rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
520 spin_unlock_irq(&sp->queue_lock);
525 * Core SRCU state machine. Advance callbacks from ->batch_check0 to
526 * ->batch_check1 and then to ->batch_done as readers drain.
528 static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
530 int idx = 1 ^ (sp->completed & 1);
533 * Because readers might be delayed for an extended period after
534 * fetching ->completed for their index, at any point in time there
535 * might well be readers using both idx=0 and idx=1. We therefore
536 * need to wait for readers to clear from both index values before
537 * invoking a callback.
540 if (rcu_batch_empty(&sp->batch_check0) &&
541 rcu_batch_empty(&sp->batch_check1))
542 return; /* no callbacks need to be advanced */
544 if (!try_check_zero(sp, idx, trycount))
545 return; /* failed to advance, will try after SRCU_INTERVAL */
548 * The callbacks in ->batch_check1 have already done with their
549 * first zero check and flip back when they were enqueued on
550 * ->batch_check0 in a previous invocation of srcu_advance_batches().
551 * (Presumably try_check_zero() returned false during that
552 * invocation, leaving the callbacks stranded on ->batch_check1.)
553 * They are therefore ready to invoke, so move them to ->batch_done.
555 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
557 if (rcu_batch_empty(&sp->batch_check0))
558 return; /* no callbacks need to be advanced */
562 * The callbacks in ->batch_check0 just finished their
563 * first check zero and flip, so move them to ->batch_check1
564 * for future checking on the other idx.
566 rcu_batch_move(&sp->batch_check1, &sp->batch_check0);
569 * SRCU read-side critical sections are normally short, so check
570 * at least twice in quick succession after a flip.
572 trycount = trycount < 2 ? 2 : trycount;
573 if (!try_check_zero(sp, idx^1, trycount))
574 return; /* failed to advance, will try after SRCU_INTERVAL */
577 * The callbacks in ->batch_check1 have now waited for all
578 * pre-existing readers using both idx values. They are therefore
579 * ready to invoke, so move them to ->batch_done.
581 rcu_batch_move(&sp->batch_done, &sp->batch_check1);
585 * Invoke a limited number of SRCU callbacks that have passed through
586 * their grace period. If there are more to do, SRCU will reschedule
587 * the workqueue. Note that needed memory barriers have been executed
588 * in this task's context by srcu_readers_active_idx_check().
590 static void srcu_invoke_callbacks(struct srcu_struct *sp)
593 struct rcu_head *head;
595 for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
596 head = rcu_batch_dequeue(&sp->batch_done);
606 * Finished one round of SRCU grace period. Start another if there are
607 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
609 static void srcu_reschedule(struct srcu_struct *sp)
613 if (rcu_batch_empty(&sp->batch_done) &&
614 rcu_batch_empty(&sp->batch_check1) &&
615 rcu_batch_empty(&sp->batch_check0) &&
616 rcu_batch_empty(&sp->batch_queue)) {
617 spin_lock_irq(&sp->queue_lock);
618 if (rcu_batch_empty(&sp->batch_done) &&
619 rcu_batch_empty(&sp->batch_check1) &&
620 rcu_batch_empty(&sp->batch_check0) &&
621 rcu_batch_empty(&sp->batch_queue)) {
625 spin_unlock_irq(&sp->queue_lock);
629 queue_delayed_work(system_power_efficient_wq,
630 &sp->work, SRCU_INTERVAL);
634 * This is the work-queue function that handles SRCU grace periods.
636 void process_srcu(struct work_struct *work)
638 struct srcu_struct *sp;
640 sp = container_of(work, struct srcu_struct, work.work);
642 srcu_collect_new(sp);
643 srcu_advance_batches(sp, 1);
644 srcu_invoke_callbacks(sp);
647 EXPORT_SYMBOL_GPL(process_srcu);