2 * sched_clock for unstable cpu clocks
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
16 * cpu_clock(i) provides a fast (execution time) high resolution
17 * clock with bounded drift between CPUs. The value of cpu_clock(i)
18 * is monotonic for constant i. The timestamp returned is in nanoseconds.
20 * ######################### BIG FAT WARNING ##########################
21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
23 * ####################################################################
25 * There is no strict promise about the base, although it tends to start
26 * at 0 on boot (but people really shouldn't rely on that).
28 * cpu_clock(i) -- can be used from any context, including NMI.
29 * local_clock() -- is cpu_clock() on the current cpu.
35 * The implementation either uses sched_clock() when
36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
37 * sched_clock() is assumed to provide these properties (mostly it means
38 * the architecture provides a globally synchronized highres time source).
40 * Otherwise it tries to create a semi stable clock from a mixture of other
43 * - GTOD (clock monotomic)
45 * - explicit idle events
47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
48 * deltas are filtered to provide monotonicity and keeping it within an
51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
52 * that is otherwise invisible (TSC gets stopped).
55 #include <linux/spinlock.h>
56 #include <linux/hardirq.h>
57 #include <linux/export.h>
58 #include <linux/percpu.h>
59 #include <linux/ktime.h>
60 #include <linux/sched.h>
61 #include <linux/static_key.h>
62 #include <linux/workqueue.h>
63 #include <linux/compiler.h>
64 #include <linux/tick.h>
67 * Scheduler clock - returns current time in nanosec units.
68 * This is default implementation.
69 * Architectures and sub-architectures can override this.
71 unsigned long long __weak sched_clock(void)
73 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
74 * (NSEC_PER_SEC / HZ);
76 EXPORT_SYMBOL_GPL(sched_clock);
78 __read_mostly int sched_clock_running;
80 void sched_clock_init(void)
82 sched_clock_running = 1;
85 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
86 static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable);
87 static int __sched_clock_stable_early;
90 * We want: ktime_get_ns() + gtod_offset == sched_clock() + raw_offset
92 static __read_mostly u64 raw_offset;
93 static __read_mostly u64 gtod_offset;
95 struct sched_clock_data {
101 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
103 static inline struct sched_clock_data *this_scd(void)
105 return this_cpu_ptr(&sched_clock_data);
108 static inline struct sched_clock_data *cpu_sdc(int cpu)
110 return &per_cpu(sched_clock_data, cpu);
113 int sched_clock_stable(void)
115 return static_branch_likely(&__sched_clock_stable);
118 static void __set_sched_clock_stable(void)
120 struct sched_clock_data *scd = this_scd();
123 * Attempt to make the (initial) unstable->stable transition continuous.
125 raw_offset = (scd->tick_gtod + gtod_offset) - (scd->tick_raw);
127 printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
128 scd->tick_gtod, gtod_offset,
129 scd->tick_raw, raw_offset);
131 static_branch_enable(&__sched_clock_stable);
132 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
135 void set_sched_clock_stable(void)
137 __sched_clock_stable_early = 1;
139 smp_mb(); /* matches sched_clock_init_late() */
142 * This really should only be called early (before
143 * sched_clock_init_late()) when guestimating our sched_clock() is
146 * After that we test stability and we can negate our guess using
147 * clear_sched_clock_stable, possibly from a watchdog.
149 if (WARN_ON_ONCE(sched_clock_running == 2))
150 __set_sched_clock_stable();
153 static void __clear_sched_clock_stable(struct work_struct *work)
155 struct sched_clock_data *scd = this_scd();
158 * Attempt to make the stable->unstable transition continuous.
160 * Trouble is, this is typically called from the TSC watchdog
161 * timer, which is late per definition. This means the tick
162 * values can already be screwy.
164 * Still do what we can.
166 gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod);
168 printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
169 scd->tick_gtod, gtod_offset,
170 scd->tick_raw, raw_offset);
172 static_branch_disable(&__sched_clock_stable);
173 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
176 static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
178 void clear_sched_clock_stable(void)
180 __sched_clock_stable_early = 0;
182 smp_mb(); /* matches sched_clock_init_late() */
184 if (sched_clock_running == 2)
185 schedule_work(&sched_clock_work);
188 void sched_clock_init_late(void)
190 sched_clock_running = 2;
192 * Ensure that it is impossible to not do a static_key update.
194 * Either {set,clear}_sched_clock_stable() must see sched_clock_running
195 * and do the update, or we must see their __sched_clock_stable_early
196 * and do the update, or both.
198 smp_mb(); /* matches {set,clear}_sched_clock_stable() */
200 if (__sched_clock_stable_early)
201 __set_sched_clock_stable();
203 __clear_sched_clock_stable(NULL);
207 * min, max except they take wrapping into account
210 static inline u64 wrap_min(u64 x, u64 y)
212 return (s64)(x - y) < 0 ? x : y;
215 static inline u64 wrap_max(u64 x, u64 y)
217 return (s64)(x - y) > 0 ? x : y;
221 * update the percpu scd from the raw @now value
223 * - filter out backward motion
224 * - use the GTOD tick value to create a window to filter crazy TSC values
226 static u64 sched_clock_local(struct sched_clock_data *scd)
228 u64 now, clock, old_clock, min_clock, max_clock;
233 delta = now - scd->tick_raw;
234 if (unlikely(delta < 0))
237 old_clock = scd->clock;
240 * scd->clock = clamp(scd->tick_gtod + delta,
241 * max(scd->tick_gtod, scd->clock),
242 * scd->tick_gtod + TICK_NSEC);
245 clock = scd->tick_gtod + gtod_offset + delta;
246 min_clock = wrap_max(scd->tick_gtod, old_clock);
247 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
249 clock = wrap_max(clock, min_clock);
250 clock = wrap_min(clock, max_clock);
252 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
258 static u64 sched_clock_remote(struct sched_clock_data *scd)
260 struct sched_clock_data *my_scd = this_scd();
261 u64 this_clock, remote_clock;
262 u64 *ptr, old_val, val;
264 #if BITS_PER_LONG != 64
267 * Careful here: The local and the remote clock values need to
268 * be read out atomic as we need to compare the values and
269 * then update either the local or the remote side. So the
270 * cmpxchg64 below only protects one readout.
272 * We must reread via sched_clock_local() in the retry case on
273 * 32bit as an NMI could use sched_clock_local() via the
274 * tracer and hit between the readout of
275 * the low32bit and the high 32bit portion.
277 this_clock = sched_clock_local(my_scd);
279 * We must enforce atomic readout on 32bit, otherwise the
280 * update on the remote cpu can hit inbetween the readout of
281 * the low32bit and the high 32bit portion.
283 remote_clock = cmpxchg64(&scd->clock, 0, 0);
286 * On 64bit the read of [my]scd->clock is atomic versus the
287 * update, so we can avoid the above 32bit dance.
289 sched_clock_local(my_scd);
291 this_clock = my_scd->clock;
292 remote_clock = scd->clock;
296 * Use the opportunity that we have both locks
297 * taken to couple the two clocks: we take the
298 * larger time as the latest time for both
299 * runqueues. (this creates monotonic movement)
301 if (likely((s64)(remote_clock - this_clock) < 0)) {
303 old_val = remote_clock;
307 * Should be rare, but possible:
309 ptr = &my_scd->clock;
310 old_val = this_clock;
314 if (cmpxchg64(ptr, old_val, val) != old_val)
321 * Similar to cpu_clock(), but requires local IRQs to be disabled.
325 u64 sched_clock_cpu(int cpu)
327 struct sched_clock_data *scd;
330 if (sched_clock_stable())
331 return sched_clock() + raw_offset;
333 if (unlikely(!sched_clock_running))
336 preempt_disable_notrace();
339 if (cpu != smp_processor_id())
340 clock = sched_clock_remote(scd);
342 clock = sched_clock_local(scd);
343 preempt_enable_notrace();
347 EXPORT_SYMBOL_GPL(sched_clock_cpu);
349 void sched_clock_tick(void)
351 struct sched_clock_data *scd;
353 WARN_ON_ONCE(!irqs_disabled());
356 * Update these values even if sched_clock_stable(), because it can
357 * become unstable at any point in time at which point we need some
358 * values to fall back on.
360 * XXX arguably we can skip this if we expose tsc_clocksource_reliable
363 scd->tick_raw = sched_clock();
364 scd->tick_gtod = ktime_get_ns();
366 if (!sched_clock_stable() && likely(sched_clock_running))
367 sched_clock_local(scd);
371 * We are going deep-idle (irqs are disabled):
373 void sched_clock_idle_sleep_event(void)
375 sched_clock_cpu(smp_processor_id());
377 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
380 * We just idled delta nanoseconds (called with irqs disabled):
382 void sched_clock_idle_wakeup_event(u64 delta_ns)
384 if (timekeeping_suspended)
388 touch_softlockup_watchdog_sched();
390 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
392 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
394 u64 sched_clock_cpu(int cpu)
396 if (unlikely(!sched_clock_running))
399 return sched_clock();
402 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
405 * Running clock - returns the time that has elapsed while a guest has been
407 * On a guest this value should be local_clock minus the time the guest was
408 * suspended by the hypervisor (for any reason).
409 * On bare metal this function should return the same as local_clock.
410 * Architectures and sub-architectures can override this.
412 u64 __weak running_clock(void)
414 return local_clock();