2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright IBM Corporation, 2008
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
22 * For detailed explanation of Read-Copy Update mechanism see -
25 #ifndef __LINUX_TINY_H
26 #define __LINUX_TINY_H
28 #include <linux/cache.h>
31 static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
36 static inline unsigned long get_state_synchronize_rcu(void)
41 static inline void cond_synchronize_rcu(unsigned long oldstate)
46 static inline unsigned long get_state_synchronize_sched(void)
51 static inline void cond_synchronize_sched(unsigned long oldstate)
56 static inline void rcu_barrier_bh(void)
58 wait_rcu_gp(call_rcu_bh);
61 static inline void rcu_barrier_sched(void)
63 wait_rcu_gp(call_rcu_sched);
66 static inline void synchronize_rcu_expedited(void)
68 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
71 static inline void rcu_barrier(void)
73 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
76 static inline void synchronize_rcu_bh(void)
81 static inline void synchronize_rcu_bh_expedited(void)
86 static inline void synchronize_sched_expedited(void)
91 static inline void kfree_call_rcu(struct rcu_head *head,
97 static inline void rcu_note_context_switch(void)
103 * Take advantage of the fact that there is only one CPU, which
104 * allows us to ignore virtualization-based context switches.
106 static inline void rcu_virt_note_context_switch(int cpu)
111 * Return the number of grace periods started.
113 static inline unsigned long rcu_batches_started(void)
119 * Return the number of bottom-half grace periods started.
121 static inline unsigned long rcu_batches_started_bh(void)
127 * Return the number of sched grace periods started.
129 static inline unsigned long rcu_batches_started_sched(void)
135 * Return the number of grace periods completed.
137 static inline unsigned long rcu_batches_completed(void)
143 * Return the number of bottom-half grace periods completed.
145 static inline unsigned long rcu_batches_completed_bh(void)
151 * Return the number of sched grace periods completed.
153 static inline unsigned long rcu_batches_completed_sched(void)
159 * Return the number of expedited grace periods completed.
161 static inline unsigned long rcu_exp_batches_completed(void)
167 * Return the number of expedited sched grace periods completed.
169 static inline unsigned long rcu_exp_batches_completed_sched(void)
174 static inline void rcu_force_quiescent_state(void)
178 static inline void rcu_bh_force_quiescent_state(void)
182 static inline void rcu_sched_force_quiescent_state(void)
186 static inline void show_rcu_gp_kthreads(void)
190 static inline void rcu_cpu_stall_reset(void)
194 static inline void rcu_idle_enter(void)
198 static inline void rcu_idle_exit(void)
202 static inline void rcu_irq_enter(void)
206 static inline void rcu_irq_exit_irqson(void)
210 static inline void rcu_irq_enter_irqson(void)
214 static inline void rcu_irq_exit(void)
218 static inline void exit_rcu(void)
222 #ifdef CONFIG_DEBUG_LOCK_ALLOC
223 extern int rcu_scheduler_active __read_mostly;
224 void rcu_scheduler_starting(void);
225 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
226 static inline void rcu_scheduler_starting(void)
229 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
231 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
233 static inline bool rcu_is_watching(void)
235 return __rcu_is_watching();
238 #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
240 static inline bool rcu_is_watching(void)
245 #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
247 static inline void rcu_all_qs(void)
249 barrier(); /* Avoid RCU read-side critical sections leaking across. */
252 /* RCUtree hotplug events */
253 #define rcutree_prepare_cpu NULL
254 #define rcutree_online_cpu NULL
255 #define rcutree_offline_cpu NULL
256 #define rcutree_dead_cpu NULL
257 #define rcutree_dying_cpu NULL
259 #endif /* __LINUX_RCUTINY_H */