]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'linus' into timers/core
authorThomas Gleixner <tglx@linutronix.de>
Tue, 19 May 2015 14:12:32 +0000 (16:12 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 19 May 2015 14:12:32 +0000 (16:12 +0200)
Make sure the upstream fixes are applied before adding further
modifications.

49 files changed:
Kbuild
arch/x86/kernel/cpu/perf_event_intel_rapl.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
drivers/clocksource/asm9260_timer.c
drivers/clocksource/timer-integrator-ap.c
drivers/clocksource/timer-sun5i.c
drivers/power/reset/ltc2952-poweroff.c
include/linux/alarmtimer.h
include/linux/clocksource.h
include/linux/hrtimer.h
include/linux/interrupt.h
include/linux/jiffies.h
include/linux/perf_event.h
include/linux/rcupdate.h
include/linux/rcutree.h
include/linux/timekeeper_internal.h
include/linux/timer.h
include/linux/timerqueue.h
kernel/events/core.c
kernel/futex.c
kernel/locking/rtmutex.c
kernel/rcu/tree_plugin.h
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/time/Makefile
kernel/time/alarmtimer.c
kernel/time/hrtimer.c
kernel/time/posix-timers.c
kernel/time/tick-broadcast-hrtimer.c
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/time/tick-internal.h
kernel/time/tick-sched.c
kernel/time/tick-sched.h
kernel/time/time.c
kernel/time/timeconst.bc
kernel/time/timekeeping.c
kernel/time/timekeeping.h
kernel/time/timer.c
kernel/time/timer_list.c
lib/timerqueue.c
net/core/pktgen.c
net/sched/sch_api.c
sound/core/hrtimer.c
sound/drivers/pcsp/pcsp.c

diff --git a/Kbuild b/Kbuild
index 6f0d82a9245d897c89a63819e857f474ab93066e..df99a5f53beb880482871e99453bf04ef2f0fb06 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -2,8 +2,9 @@
 # Kbuild for top-level directory of the kernel
 # This file takes care of the following:
 # 1) Generate bounds.h
-# 2) Generate asm-offsets.h (may need bounds.h)
-# 3) Check for missing system calls
+# 2) Generate timeconst.h
+# 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
+# 4) Check for missing system calls
 
 # Default sed regexp - multiline due to syntax constraints
 define sed-y
@@ -47,7 +48,26 @@ $(obj)/$(bounds-file): kernel/bounds.s FORCE
        $(call filechk,offsets,__LINUX_BOUNDS_H__)
 
 #####
-# 2) Generate asm-offsets.h
+# 2) Generate timeconst.h
+
+timeconst-file := include/generated/timeconst.h
+
+#always  += $(timeconst-file)
+targets += $(timeconst-file)
+
+quiet_cmd_gentimeconst = GEN     $@
+define cmd_gentimeconst
+       (echo $(CONFIG_HZ) | bc -q $< ) > $@
+endef
+define filechk_gentimeconst
+       (echo $(CONFIG_HZ) | bc -q $< )
+endef
+
+$(obj)/$(timeconst-file): kernel/time/timeconst.bc FORCE
+       $(call filechk,gentimeconst)
+
+#####
+# 3) Generate asm-offsets.h
 #
 
 offsets-file := include/generated/asm-offsets.h
@@ -57,7 +77,7 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s
 
 # We use internal kbuild rules to avoid the "is up to date" message from make
 arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
-                                      $(obj)/$(bounds-file) FORCE
+                                      $(obj)/$(timeconst-file) $(obj)/$(bounds-file) FORCE
        $(Q)mkdir -p $(dir $@)
        $(call if_changed_dep,cc_s_c)
 
@@ -65,7 +85,7 @@ $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
        $(call filechk,offsets,__ASM_OFFSETS_H__)
 
 #####
-# 3) Check for missing system calls
+# 4) Check for missing system calls
 #
 
 always += missing-syscalls
@@ -77,5 +97,5 @@ quiet_cmd_syscalls = CALL    $<
 missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
        $(call cmd,syscalls)
 
-# Keep these two files during make clean
-no-clean-files := $(bounds-file) $(offsets-file)
+# Keep these three files during make clean
+no-clean-files := $(bounds-file) $(offsets-file) $(timeconst-file)
index 358c54ad20d4084db807a05ae49def561aa4dd32..5cbd4e64feb582d927c6751914757a6784d68a60 100644 (file)
@@ -204,9 +204,8 @@ static u64 rapl_event_update(struct perf_event *event)
 
 static void rapl_start_hrtimer(struct rapl_pmu *pmu)
 {
-       __hrtimer_start_range_ns(&pmu->hrtimer,
-                       pmu->timer_interval, 0,
-                       HRTIMER_MODE_REL_PINNED, 0);
+       hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
+                    HRTIMER_MODE_REL_PINNED);
 }
 
 static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
index c635b8b49e931e7926efc3dc96475a8c577958e0..7c411f0e58fd5fa0eb7368dd4bf9100c9eae3cbe 100644 (file)
@@ -233,9 +233,8 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
 
 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
 {
-       __hrtimer_start_range_ns(&box->hrtimer,
-                       ns_to_ktime(box->hrtimer_duration), 0,
-                       HRTIMER_MODE_REL_PINNED, 0);
+       hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
+                     HRTIMER_MODE_REL_PINNED);
 }
 
 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
index 2c9c993727c85a1975b845d3fe7b49d3115def97..4c2ba59897e84bda7c12368874e3c8f7716ae70c 100644 (file)
@@ -178,7 +178,7 @@ static void __init asm9260_timer_init(struct device_node *np)
        unsigned long rate;
 
        priv.base = of_io_request_and_map(np, 0, np->name);
-       if (!priv.base)
+       if (IS_ERR(priv.base))
                panic("%s: unable to map resource", np->name);
 
        clk = of_clk_get(np, 0);
index b9efd30513d56214612913dfc0f0a30b9464ea70..c97d1980c0f856f37b3f264d6b525f3a9e68540a 100644 (file)
@@ -166,7 +166,7 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
        struct device_node *sec_node;
 
        base = of_io_request_and_map(node, 0, "integrator-timer");
-       if (!base)
+       if (IS_ERR(base))
                return;
 
        clk = of_clk_get(node, 0);
index 28aa4b7bb6020c416974ec52c8f80eb12366e705..0ffb4ea7c9253eb883afe47cd9682722f5c588c6 100644 (file)
@@ -324,7 +324,7 @@ static void __init sun5i_timer_init(struct device_node *node)
        int irq;
 
        timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (!timer_base)
+       if (IS_ERR(timer_base))
                panic("Can't map registers");
 
        irq = irq_of_parse_and_map(node, 0);
index 1e08195551fe7d505511a6dd92b07d1ab4300911..5f855f99bdfcdde73bef8e039f6820e6c256a92e 100644 (file)
@@ -158,7 +158,6 @@ static irqreturn_t ltc2952_poweroff_handler(int irq, void *dev_id)
                              HRTIMER_MODE_REL);
        } else {
                hrtimer_cancel(&data->timer_trigger);
-               /* omitting return value check, timer should have been valid */
        }
        return IRQ_HANDLED;
 }
index a899402a5a0e6325c0b3dce2f03c7dca06846238..52f3b7da4f2d4dbb181aa1fe5c8d73e4df8f1814 100644 (file)
@@ -43,8 +43,8 @@ struct alarm {
 
 void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
                enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
-int alarm_start(struct alarm *alarm, ktime_t start);
-int alarm_start_relative(struct alarm *alarm, ktime_t start);
+void alarm_start(struct alarm *alarm, ktime_t start);
+void alarm_start_relative(struct alarm *alarm, ktime_t start);
 void alarm_restart(struct alarm *alarm);
 int alarm_try_to_cancel(struct alarm *alarm);
 int alarm_cancel(struct alarm *alarm);
index d27d0152271f9e8b487a48a9f2d74f51fe9a58a5..278dd279a7a8035e8be073a9664ea88f7357984a 100644 (file)
@@ -181,7 +181,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
 
 extern int clocksource_unregister(struct clocksource*);
 extern void clocksource_touch_watchdog(void);
-extern struct clocksource* clocksource_get_next(void);
 extern void clocksource_change_rating(struct clocksource *cs, int rating);
 extern void clocksource_suspend(void);
 extern void clocksource_resume(void);
index 05f6df1fdf5bbfc70880f188c40e61264f764cc7..470d876c2edafd9d4fa0cffb9f599fbc514c2c90 100644 (file)
@@ -130,6 +130,12 @@ struct hrtimer_sleeper {
        struct task_struct *task;
 };
 
+#ifdef CONFIG_64BIT
+# define HRTIMER_CLOCK_BASE_ALIGN      64
+#else
+# define HRTIMER_CLOCK_BASE_ALIGN      32
+#endif
+
 /**
  * struct hrtimer_clock_base - the timer base for a specific clock
  * @cpu_base:          per cpu clock base
@@ -137,9 +143,7 @@ struct hrtimer_sleeper {
  *                     timer to a base on another cpu.
  * @clockid:           clock id for per_cpu support
  * @active:            red black tree root node for the active timers
- * @resolution:                the resolution of the clock, in nanoseconds
  * @get_time:          function to retrieve the current time of the clock
- * @softirq_time:      the time when running the hrtimer queue in the softirq
  * @offset:            offset of this clock to the monotonic base
  */
 struct hrtimer_clock_base {
@@ -147,11 +151,9 @@ struct hrtimer_clock_base {
        int                     index;
        clockid_t               clockid;
        struct timerqueue_head  active;
-       ktime_t                 resolution;
        ktime_t                 (*get_time)(void);
-       ktime_t                 softirq_time;
        ktime_t                 offset;
-};
+} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
 
 enum  hrtimer_base_type {
        HRTIMER_BASE_MONOTONIC,
@@ -167,9 +169,10 @@ enum  hrtimer_base_type {
  *                     and timers
  * @cpu:               cpu number
  * @active_bases:      Bitfield to mark bases with active timers
- * @clock_was_set:     Indicates that clock was set from irq context.
+ * @clock_was_set_seq: Sequence counter of clock was set events
  * @expires_next:      absolute time of the next event which was scheduled
  *                     via clock_set_next_event()
+ * @next_timer:                Pointer to the first expiring timer
  * @in_hrtirq:         hrtimer_interrupt() is currently executing
  * @hres_active:       State of high resolution mode
  * @hang_detected:     The last hrtimer interrupt detected a hang
@@ -178,27 +181,34 @@ enum  hrtimer_base_type {
  * @nr_hangs:          Total number of hrtimer interrupt hangs
  * @max_hang_time:     Maximum time spent in hrtimer_interrupt
  * @clock_base:                array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ *      Do not dereference the pointer because it is not reliable on
+ *      cross cpu removals.
  */
 struct hrtimer_cpu_base {
        raw_spinlock_t                  lock;
        unsigned int                    cpu;
        unsigned int                    active_bases;
-       unsigned int                    clock_was_set;
+       unsigned int                    clock_was_set_seq;
 #ifdef CONFIG_HIGH_RES_TIMERS
+       unsigned int                    in_hrtirq       : 1,
+                                       hres_active     : 1,
+                                       hang_detected   : 1;
        ktime_t                         expires_next;
-       int                             in_hrtirq;
-       int                             hres_active;
-       int                             hang_detected;
-       unsigned long                   nr_events;
-       unsigned long                   nr_retries;
-       unsigned long                   nr_hangs;
-       ktime_t                         max_hang_time;
+       struct hrtimer                  *next_timer;
+       unsigned int                    nr_events;
+       unsigned int                    nr_retries;
+       unsigned int                    nr_hangs;
+       unsigned int                    max_hang_time;
 #endif
        struct hrtimer_clock_base       clock_base[HRTIMER_MAX_CLOCK_BASES];
-};
+} ____cacheline_aligned;
 
 static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
 {
+       BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
+
        timer->node.expires = time;
        timer->_softexpires = time;
 }
@@ -262,19 +272,16 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
        return ktime_sub(timer->node.expires, timer->base->get_time());
 }
 
-#ifdef CONFIG_HIGH_RES_TIMERS
-struct clock_event_device;
-
-extern void hrtimer_interrupt(struct clock_event_device *dev);
-
-/*
- * In high resolution mode the time reference must be read accurate
- */
 static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
 {
        return timer->base->get_time();
 }
 
+#ifdef CONFIG_HIGH_RES_TIMERS
+struct clock_event_device;
+
+extern void hrtimer_interrupt(struct clock_event_device *dev);
+
 static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 {
        return timer->base->cpu_base->hres_active;
@@ -295,21 +302,16 @@ extern void hrtimer_peek_ahead_timers(void);
 
 extern void clock_was_set_delayed(void);
 
+extern unsigned int hrtimer_resolution;
+
 #else
 
 # define MONOTONIC_RES_NSEC    LOW_RES_NSEC
 # define KTIME_MONOTONIC_RES   KTIME_LOW_RES
 
-static inline void hrtimer_peek_ahead_timers(void) { }
+#define hrtimer_resolution     LOW_RES_NSEC
 
-/*
- * In non high resolution mode the time reference is taken from
- * the base softirq time variable.
- */
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
-{
-       return timer->base->softirq_time;
-}
+static inline void hrtimer_peek_ahead_timers(void) { }
 
 static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 {
@@ -353,39 +355,45 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
 #endif
 
 /* Basic timer operations: */
-extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
-                        const enum hrtimer_mode mode);
-extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                        unsigned long range_ns, const enum hrtimer_mode mode);
-extern int
-__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-                        unsigned long delta_ns,
-                        const enum hrtimer_mode mode, int wakeup);
+
+/**
+ * hrtimer_start - (re)start an hrtimer on the current CPU
+ * @timer:     the timer to be added
+ * @tim:       expiry time
+ * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
+ *             relative (HRTIMER_MODE_REL)
+ */
+static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
+                                const enum hrtimer_mode mode)
+{
+       hrtimer_start_range_ns(timer, tim, 0, mode);
+}
 
 extern int hrtimer_cancel(struct hrtimer *timer);
 extern int hrtimer_try_to_cancel(struct hrtimer *timer);
 
-static inline int hrtimer_start_expires(struct hrtimer *timer,
-                                               enum hrtimer_mode mode)
+static inline void hrtimer_start_expires(struct hrtimer *timer,
+                                        enum hrtimer_mode mode)
 {
        unsigned long delta;
        ktime_t soft, hard;
        soft = hrtimer_get_softexpires(timer);
        hard = hrtimer_get_expires(timer);
        delta = ktime_to_ns(ktime_sub(hard, soft));
-       return hrtimer_start_range_ns(timer, soft, delta, mode);
+       hrtimer_start_range_ns(timer, soft, delta, mode);
 }
 
-static inline int hrtimer_restart(struct hrtimer *timer)
+static inline void hrtimer_restart(struct hrtimer *timer)
 {
-       return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+       hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
 }
 
 /* Query timers: */
 extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
-extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
 
-extern ktime_t hrtimer_get_next_event(void);
+extern u64 hrtimer_get_next_event(void);
 
 /*
  * A timer is active, when it is enqueued into the rbtree or the
@@ -418,7 +426,22 @@ static inline int hrtimer_callback_running(struct hrtimer *timer)
 extern u64
 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
 
-/* Forward a hrtimer so it expires after the hrtimer's current now */
+/**
+ * hrtimer_forward_now - forward the timer expiry so it expires after now
+ * @timer:     hrtimer to forward
+ * @interval:  the interval to forward
+ *
+ * Forward the timer expiry so it will expire after the current time
+ * of the hrtimer clock base. Returns the number of overruns.
+ *
+ * Can be safely called from the callback function of @timer. If
+ * called from other contexts @timer must neither be enqueued nor
+ * running the callback and the caller needs to take care of
+ * serialization.
+ *
+ * Note: This only updates the timer expiry value and does not requeue
+ * the timer.
+ */
 static inline u64 hrtimer_forward_now(struct hrtimer *timer,
                                      ktime_t interval)
 {
@@ -443,7 +466,6 @@ extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
 
 /* Soft interrupt function to run the hrtimer queues: */
 extern void hrtimer_run_queues(void);
-extern void hrtimer_run_pending(void);
 
 /* Bootup initialization: */
 extern void __init hrtimers_init(void);
index 950ae45018260224c9138b043f9071ce4d1f9acc..be7e75c945e97b07d5f248ded5c1e0d9240756ad 100644 (file)
@@ -413,7 +413,8 @@ enum
        BLOCK_IOPOLL_SOFTIRQ,
        TASKLET_SOFTIRQ,
        SCHED_SOFTIRQ,
-       HRTIMER_SOFTIRQ,
+       HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
+                           numbering. Sigh! */
        RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
 
        NR_SOFTIRQS
@@ -592,10 +593,10 @@ tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
                     clockid_t which_clock, enum hrtimer_mode mode);
 
 static inline
-int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
-                         const enum hrtimer_mode mode)
+void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+                          const enum hrtimer_mode mode)
 {
-       return hrtimer_start(&ttimer->timer, time, mode);
+       hrtimer_start(&ttimer->timer, time, mode);
 }
 
 static inline
index c367cbdf73ab1a5b83f1af48c848be21b466167d..5e75af6cf1bc1d8e74f2074817b0293334bf8174 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/time.h>
 #include <linux/timex.h>
 #include <asm/param.h>                 /* for HZ */
+#include <generated/timeconst.h>
 
 /*
  * The following defines establish the engineering parameters of the PLL
@@ -288,7 +289,80 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
        return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
 }
 
-extern unsigned long msecs_to_jiffies(const unsigned int m);
+extern unsigned long __msecs_to_jiffies(const unsigned int m);
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+/*
+ * HZ is equal to or smaller than 1000, and 1000 is a nice round
+ * multiple of HZ, divide with the factor between them, but round
+ * upwards:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+               return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+}
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+/*
+ * HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
+ * simply multiply with the factor between them.
+ *
+ * But first make sure the multiplication result cannot overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+               if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+                       return MAX_JIFFY_OFFSET;
+               return m * (HZ / MSEC_PER_SEC);
+}
+#else
+/*
+ * Generic case - multiply, round and divide. But first check that if
+ * we are doing a net multiplication, that we wouldn't overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+               if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+                       return MAX_JIFFY_OFFSET;
+
+               return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
+                       >> MSEC_TO_HZ_SHR32;
+}
+#endif
+/**
+ * msecs_to_jiffies: - convert milliseconds to jiffies
+ * @m: time in milliseconds
+ *
+ * conversion is done as follows:
+ *
+ * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
+ *
+ * - 'too large' values [that would result in larger than
+ *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ *   the input value by a factor or dividing it with a factor and
+ *   handling any 32-bit overflows.
+ *   for the details see __msecs_to_jiffies()
+ *
+ * msecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __msecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static inline unsigned long msecs_to_jiffies(const unsigned int m)
+{
+       if (__builtin_constant_p(m)) {
+               if ((int)m < 0)
+                       return MAX_JIFFY_OFFSET;
+               return _msecs_to_jiffies(m);
+       } else {
+               return __msecs_to_jiffies(m);
+       }
+}
+
 extern unsigned long usecs_to_jiffies(const unsigned int u);
 extern unsigned long timespec_to_jiffies(const struct timespec *value);
 extern void jiffies_to_timespec(const unsigned long jiffies,
index 61992cf2e9771699ee06595c8fbb1bd39633018a..cf3342a8ad807c7583d0b2ea1a72bc34735f3e5b 100644 (file)
@@ -566,8 +566,12 @@ struct perf_cpu_context {
        struct perf_event_context       *task_ctx;
        int                             active_oncpu;
        int                             exclusive;
+
+       raw_spinlock_t                  hrtimer_lock;
        struct hrtimer                  hrtimer;
        ktime_t                         hrtimer_interval;
+       unsigned int                    hrtimer_active;
+
        struct pmu                      *unique_pmu;
        struct perf_cgroup              *cgrp;
 };
index 573a5afd5ed884d5bdcfc4af6cf88c3b9d25214d..0627a447c589dedd3c0c82aa6ca8d39c86bad037 100644 (file)
@@ -44,6 +44,8 @@
 #include <linux/debugobjects.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
+#include <linux/ktime.h>
+
 #include <asm/barrier.h>
 
 extern int rcu_expedited; /* for sysctl */
@@ -1154,9 +1156,9 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
        __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
 
 #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
-static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
+static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 {
-       *delta_jiffies = ULONG_MAX;
+       *nextevt = KTIME_MAX;
        return 0;
 }
 #endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
index d2e583a6aacacf09ee9dc3bf3646b6a3cff3494e..db2e31beaae7c5f179972ec1a49051c240a90329 100644 (file)
@@ -32,7 +32,7 @@
 
 void rcu_note_context_switch(void);
 #ifndef CONFIG_RCU_NOCB_CPU_ALL
-int rcu_needs_cpu(unsigned long *delta_jiffies);
+int rcu_needs_cpu(u64 basem, u64 *nextevt);
 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 void rcu_cpu_stall_reset(void);
 
index fb86963859c772846dfc531fc9cc8c0825f36ac7..6f8276ae579ca2e22e5776f5efaf28ba02957765 100644 (file)
@@ -49,6 +49,7 @@ struct tk_read_base {
  * @offs_boot:         Offset clock monotonic -> clock boottime
  * @offs_tai:          Offset clock monotonic -> clock tai
  * @tai_offset:                The current UTC to TAI offset in seconds
+ * @clock_was_set_seq: The sequence number of clock was set events
  * @raw_time:          Monotonic raw base time in timespec64 format
  * @cycle_interval:    Number of clock cycles in one NTP interval
  * @xtime_interval:    Number of clock shifted nano seconds in one NTP
@@ -85,6 +86,7 @@ struct timekeeper {
        ktime_t                 offs_boot;
        ktime_t                 offs_tai;
        s32                     tai_offset;
+       unsigned int            clock_was_set_seq;
        struct timespec64       raw_time;
 
        /* The following members are for timekeeping internal use */
index 8c5a197e1587de4c647ff205b5b591c31a0dbcc6..fbb80e0030bfa0cd63ef892ef5cffeca51f934dd 100644 (file)
@@ -187,13 +187,6 @@ extern void set_timer_slack(struct timer_list *time, int slack_hz);
  */
 #define NEXT_TIMER_MAX_DELTA   ((1UL << 30) - 1)
 
-/*
- * Return when the next timer-wheel timeout occurs (in absolute jiffies),
- * locks the timer base and does the comparison against the given
- * jiffie.
- */
-extern unsigned long get_next_timer_interrupt(unsigned long now);
-
 /*
  * Timer-statistics info:
  */
index a520fd70a59f371f40a34f79e883dcf7b32c23f7..7eec17ad7fa195bba39c06e44817ef3f3a1b0402 100644 (file)
@@ -16,10 +16,10 @@ struct timerqueue_head {
 };
 
 
-extern void timerqueue_add(struct timerqueue_head *head,
-                               struct timerqueue_node *node);
-extern void timerqueue_del(struct timerqueue_head *head,
-                               struct timerqueue_node *node);
+extern bool timerqueue_add(struct timerqueue_head *head,
+                          struct timerqueue_node *node);
+extern bool timerqueue_del(struct timerqueue_head *head,
+                          struct timerqueue_node *node);
 extern struct timerqueue_node *timerqueue_iterate_next(
                                                struct timerqueue_node *node);
 
index 1a3bf48743ce1c62c26077d642084cbdc8b40d6b..1c6c2826af1ee0bfa6e8d1c9ba1284fb065550c3 100644 (file)
 
 static struct workqueue_struct *perf_wq;
 
+typedef int (*remote_function_f)(void *);
+
 struct remote_function_call {
        struct task_struct      *p;
-       int                     (*func)(void *info);
+       remote_function_f       func;
        void                    *info;
        int                     ret;
 };
@@ -86,7 +88,7 @@ static void remote_function(void *data)
  *         -EAGAIN - when the process moved away
  */
 static int
-task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
+task_function_call(struct task_struct *p, remote_function_f func, void *info)
 {
        struct remote_function_call data = {
                .p      = p,
@@ -110,7 +112,7 @@ task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
  *
  * returns: @func return value or -ENXIO when the cpu is offline
  */
-static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
+static int cpu_function_call(int cpu, remote_function_f func, void *info)
 {
        struct remote_function_call data = {
                .p      = NULL,
@@ -747,62 +749,31 @@ perf_cgroup_mark_enabled(struct perf_event *event,
 /*
  * function must be called with interrupts disbled
  */
-static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
+static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
 {
        struct perf_cpu_context *cpuctx;
-       enum hrtimer_restart ret = HRTIMER_NORESTART;
        int rotations = 0;
 
        WARN_ON(!irqs_disabled());
 
        cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
-
        rotations = perf_rotate_context(cpuctx);
 
-       /*
-        * arm timer if needed
-        */
-       if (rotations) {
+       raw_spin_lock(&cpuctx->hrtimer_lock);
+       if (rotations)
                hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
-               ret = HRTIMER_RESTART;
-       }
-
-       return ret;
-}
-
-/* CPU is going down */
-void perf_cpu_hrtimer_cancel(int cpu)
-{
-       struct perf_cpu_context *cpuctx;
-       struct pmu *pmu;
-       unsigned long flags;
-
-       if (WARN_ON(cpu != smp_processor_id()))
-               return;
-
-       local_irq_save(flags);
-
-       rcu_read_lock();
-
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
-
-               if (pmu->task_ctx_nr == perf_sw_context)
-                       continue;
-
-               hrtimer_cancel(&cpuctx->hrtimer);
-       }
-
-       rcu_read_unlock();
+       else
+               cpuctx->hrtimer_active = 0;
+       raw_spin_unlock(&cpuctx->hrtimer_lock);
 
-       local_irq_restore(flags);
+       return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
 }
 
-static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
+static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
 {
-       struct hrtimer *hr = &cpuctx->hrtimer;
+       struct hrtimer *timer = &cpuctx->hrtimer;
        struct pmu *pmu = cpuctx->ctx.pmu;
-       int timer;
+       u64 interval;
 
        /* no multiplexing needed for SW PMU */
        if (pmu->task_ctx_nr == perf_sw_context)
@@ -812,31 +783,36 @@ static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
         * check default is sane, if not set then force to
         * default interval (1/tick)
         */
-       timer = pmu->hrtimer_interval_ms;
-       if (timer < 1)
-               timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
+       interval = pmu->hrtimer_interval_ms;
+       if (interval < 1)
+               interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
 
-       cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
+       cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
 
-       hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
-       hr->function = perf_cpu_hrtimer_handler;
+       raw_spin_lock_init(&cpuctx->hrtimer_lock);
+       hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+       timer->function = perf_mux_hrtimer_handler;
 }
 
-static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
+static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
 {
-       struct hrtimer *hr = &cpuctx->hrtimer;
+       struct hrtimer *timer = &cpuctx->hrtimer;
        struct pmu *pmu = cpuctx->ctx.pmu;
+       unsigned long flags;
 
        /* not for SW PMU */
        if (pmu->task_ctx_nr == perf_sw_context)
-               return;
+               return 0;
 
-       if (hrtimer_active(hr))
-               return;
+       raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
+       if (!cpuctx->hrtimer_active) {
+               cpuctx->hrtimer_active = 1;
+               hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
+               hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
+       }
+       raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
 
-       if (!hrtimer_callback_running(hr))
-               __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
-                                        0, HRTIMER_MODE_REL_PINNED, 0);
+       return 0;
 }
 
 void perf_pmu_disable(struct pmu *pmu)
@@ -1935,7 +1911,7 @@ group_sched_in(struct perf_event *group_event,
 
        if (event_sched_in(group_event, cpuctx, ctx)) {
                pmu->cancel_txn(pmu);
-               perf_cpu_hrtimer_restart(cpuctx);
+               perf_mux_hrtimer_restart(cpuctx);
                return -EAGAIN;
        }
 
@@ -1982,7 +1958,7 @@ group_sched_in(struct perf_event *group_event,
 
        pmu->cancel_txn(pmu);
 
-       perf_cpu_hrtimer_restart(cpuctx);
+       perf_mux_hrtimer_restart(cpuctx);
 
        return -EAGAIN;
 }
@@ -2255,7 +2231,7 @@ static int __perf_event_enable(void *info)
                 */
                if (leader != event) {
                        group_sched_out(leader, cpuctx, ctx);
-                       perf_cpu_hrtimer_restart(cpuctx);
+                       perf_mux_hrtimer_restart(cpuctx);
                }
                if (leader->attr.pinned) {
                        update_group_times(leader);
@@ -6863,9 +6839,8 @@ static void perf_swevent_start_hrtimer(struct perf_event *event)
        } else {
                period = max_t(u64, 10000, hwc->sample_period);
        }
-       __hrtimer_start_range_ns(&hwc->hrtimer,
-                               ns_to_ktime(period), 0,
-                               HRTIMER_MODE_REL_PINNED, 0);
+       hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
+                     HRTIMER_MODE_REL_PINNED);
 }
 
 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
@@ -7166,6 +7141,8 @@ perf_event_mux_interval_ms_show(struct device *dev,
        return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
 }
 
+static DEFINE_MUTEX(mux_interval_mutex);
+
 static ssize_t
 perf_event_mux_interval_ms_store(struct device *dev,
                                 struct device_attribute *attr,
@@ -7185,17 +7162,21 @@ perf_event_mux_interval_ms_store(struct device *dev,
        if (timer == pmu->hrtimer_interval_ms)
                return count;
 
+       mutex_lock(&mux_interval_mutex);
        pmu->hrtimer_interval_ms = timer;
 
        /* update all cpuctx for this PMU */
-       for_each_possible_cpu(cpu) {
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
                struct perf_cpu_context *cpuctx;
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
                cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
 
-               if (hrtimer_active(&cpuctx->hrtimer))
-                       hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
+               cpu_function_call(cpu,
+                       (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
        }
+       put_online_cpus();
+       mutex_unlock(&mux_interval_mutex);
 
        return count;
 }
@@ -7300,7 +7281,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
                lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
                cpuctx->ctx.pmu = pmu;
 
-               __perf_cpu_hrtimer_init(cpuctx, cpu);
+               __perf_mux_hrtimer_init(cpuctx, cpu);
 
                cpuctx->unique_pmu = pmu;
        }
index 2579e407ff67d039106207f78a466f824e515db6..720eacff6b581d3d16638a86aaa42dcc0be5dce7 100644 (file)
@@ -2063,11 +2063,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
        queue_me(q, hb);
 
        /* Arm the timer */
-       if (timeout) {
+       if (timeout)
                hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-               if (!hrtimer_active(&timeout->timer))
-                       timeout->task = NULL;
-       }
 
        /*
         * If we have been removed from the hash list, then another task
index b025295f49662469d1f3b4257f3835d2f40f01e1..8b678cac7fbe389553272a417a3d82c2ddb39406 100644 (file)
@@ -1182,11 +1182,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
        set_current_state(state);
 
        /* Setup the timer, when timeout != NULL */
-       if (unlikely(timeout)) {
+       if (unlikely(timeout))
                hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-               if (!hrtimer_active(&timeout->timer))
-                       timeout->task = NULL;
-       }
 
        ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
 
index 8c0ec0f5a02702f1a3c5ed5db0bdf346ac7ae140..0ef80a0bbabbc6736533b49e1daf68691794e206 100644 (file)
@@ -1368,9 +1368,9 @@ static void rcu_prepare_kthreads(int cpu)
  * any flavor of RCU.
  */
 #ifndef CONFIG_RCU_NOCB_CPU_ALL
-int rcu_needs_cpu(unsigned long *delta_jiffies)
+int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 {
-       *delta_jiffies = ULONG_MAX;
+       *nextevt = KTIME_MAX;
        return rcu_cpu_has_callbacks(NULL);
 }
 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
@@ -1481,16 +1481,17 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
  * The caller must have disabled interrupts.
  */
 #ifndef CONFIG_RCU_NOCB_CPU_ALL
-int rcu_needs_cpu(unsigned long *dj)
+int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 {
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       unsigned long dj;
 
        /* Snapshot to detect later posting of non-lazy callback. */
        rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
 
        /* If no callbacks, RCU doesn't need the CPU. */
        if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
-               *dj = ULONG_MAX;
+               *nextevt = KTIME_MAX;
                return 0;
        }
 
@@ -1504,11 +1505,12 @@ int rcu_needs_cpu(unsigned long *dj)
 
        /* Request timer delay depending on laziness, and round. */
        if (!rdtp->all_lazy) {
-               *dj = round_up(rcu_idle_gp_delay + jiffies,
+               dj = round_up(rcu_idle_gp_delay + jiffies,
                               rcu_idle_gp_delay) - jiffies;
        } else {
-               *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
+               dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
        }
+       *nextevt = basemono + dj * TICK_NSEC;
        return 0;
 }
 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
index 57bd333bc4ab3e070356e7a3b9b9b2a5e742c91f..ecb7c4216350cf00f3be979246fc44b8a14a0c23 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
-void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
-{
-       unsigned long delta;
-       ktime_t soft, hard, now;
-
-       for (;;) {
-               if (hrtimer_active(period_timer))
-                       break;
-
-               now = hrtimer_cb_get_time(period_timer);
-               hrtimer_forward(period_timer, now, period);
-
-               soft = hrtimer_get_softexpires(period_timer);
-               hard = hrtimer_get_expires(period_timer);
-               delta = ktime_to_ns(ktime_sub(hard, soft));
-               __hrtimer_start_range_ns(period_timer, soft, delta,
-                                        HRTIMER_MODE_ABS_PINNED, 0);
-       }
-}
-
 DEFINE_MUTEX(sched_domains_mutex);
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
@@ -355,12 +335,11 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
 
 #ifdef CONFIG_SMP
 
-static int __hrtick_restart(struct rq *rq)
+static void __hrtick_restart(struct rq *rq)
 {
        struct hrtimer *timer = &rq->hrtick_timer;
-       ktime_t time = hrtimer_get_softexpires(timer);
 
-       return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
+       hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
 }
 
 /*
@@ -440,8 +419,8 @@ void hrtick_start(struct rq *rq, u64 delay)
         * doesn't make sense. Rely on vruntime for fairness.
         */
        delay = max_t(u64, delay, 10000LL);
-       __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
-                       HRTIMER_MODE_REL_PINNED, 0);
+       hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
+                     HRTIMER_MODE_REL_PINNED);
 }
 
 static inline void init_hrtick(void)
@@ -8108,10 +8087,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
 
        __refill_cfs_bandwidth_runtime(cfs_b);
        /* restart the period timer (if active) to handle new period expiry */
-       if (runtime_enabled && cfs_b->timer_active) {
-               /* force a reprogram */
-               __start_cfs_bandwidth(cfs_b, true);
-       }
+       if (runtime_enabled)
+               start_cfs_bandwidth(cfs_b);
        raw_spin_unlock_irq(&cfs_b->lock);
 
        for_each_online_cpu(i) {
index 5e95145088fd37b3d07ccac66c3cd58f7effe10a..21d6907d2b9fd07c47d9e9c9125d6d5b47f1b499 100644 (file)
@@ -503,8 +503,6 @@ static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
        struct rq *rq = rq_of_dl_rq(dl_rq);
        ktime_t now, act;
-       ktime_t soft, hard;
-       unsigned long range;
        s64 delta;
 
        if (boosted)
@@ -527,15 +525,9 @@ static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
        if (ktime_us_delta(act, now) < 0)
                return 0;
 
-       hrtimer_set_expires(&dl_se->dl_timer, act);
+       hrtimer_start(&dl_se->dl_timer, act, HRTIMER_MODE_ABS);
 
-       soft = hrtimer_get_softexpires(&dl_se->dl_timer);
-       hard = hrtimer_get_expires(&dl_se->dl_timer);
-       range = ktime_to_ns(ktime_sub(hard, soft));
-       __hrtimer_start_range_ns(&dl_se->dl_timer, soft,
-                                range, HRTIMER_MODE_ABS, 0);
-
-       return hrtimer_active(&dl_se->dl_timer);
+       return 1;
 }
 
 /*
index a245c1fc6f0a610f17e2d13635306d681e2ef821..f94724eda407ecc656217c4b6ce0ebeb656f8e8b 100644 (file)
@@ -230,8 +230,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #endif
 #endif
 #ifdef CONFIG_CFS_BANDWIDTH
-       SEQ_printf(m, "  .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
-                       cfs_rq->tg->cfs_bandwidth.timer_active);
        SEQ_printf(m, "  .%-30s: %d\n", "throttled",
                        cfs_rq->throttled);
        SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
index ffeaa4105e48a36105ecaea8967082e1e7a7af98..69be2825262d5df3e4d859e3faabefe3064863e2 100644 (file)
@@ -3476,16 +3476,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
        if (cfs_b->quota == RUNTIME_INF)
                amount = min_amount;
        else {
-               /*
-                * If the bandwidth pool has become inactive, then at least one
-                * period must have elapsed since the last consumption.
-                * Refresh the global state and ensure bandwidth timer becomes
-                * active.
-                */
-               if (!cfs_b->timer_active) {
-                       __refill_cfs_bandwidth_runtime(cfs_b);
-                       __start_cfs_bandwidth(cfs_b, false);
-               }
+               start_cfs_bandwidth(cfs_b);
 
                if (cfs_b->runtime > 0) {
                        amount = min(cfs_b->runtime, min_amount);
@@ -3634,6 +3625,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
        long task_delta, dequeue = 1;
+       bool empty;
 
        se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
 
@@ -3663,13 +3655,21 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        cfs_rq->throttled = 1;
        cfs_rq->throttled_clock = rq_clock(rq);
        raw_spin_lock(&cfs_b->lock);
+       empty = list_empty(&cfs_rq->throttled_list);
+
        /*
         * Add to the _head_ of the list, so that an already-started
         * distribute_cfs_runtime will not see us
         */
        list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
-       if (!cfs_b->timer_active)
-               __start_cfs_bandwidth(cfs_b, false);
+
+       /*
+        * If we're the first throttled task, make sure the bandwidth
+        * timer is running.
+        */
+       if (empty)
+               start_cfs_bandwidth(cfs_b);
+
        raw_spin_unlock(&cfs_b->lock);
 }
 
@@ -3784,13 +3784,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
        if (cfs_b->idle && !throttled)
                goto out_deactivate;
 
-       /*
-        * if we have relooped after returning idle once, we need to update our
-        * status as actually running, so that other cpus doing
-        * __start_cfs_bandwidth will stop trying to cancel us.
-        */
-       cfs_b->timer_active = 1;
-
        __refill_cfs_bandwidth_runtime(cfs_b);
 
        if (!throttled) {
@@ -3835,7 +3828,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
        return 0;
 
 out_deactivate:
-       cfs_b->timer_active = 0;
        return 1;
 }
 
@@ -3850,7 +3842,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
  * Are we near the end of the current quota period?
  *
  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
- * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
+ * hrtimer base being cleared by hrtimer_start. In the case of
  * migrate_hrtimers, base is never cleared, so we are fine.
  */
 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
@@ -3878,8 +3870,9 @@ static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
        if (runtime_refresh_within(cfs_b, min_left))
                return;
 
-       start_bandwidth_timer(&cfs_b->slack_timer,
-                               ns_to_ktime(cfs_bandwidth_slack_period));
+       hrtimer_start(&cfs_b->slack_timer,
+                       ns_to_ktime(cfs_bandwidth_slack_period),
+                       HRTIMER_MODE_REL);
 }
 
 /* we know any runtime found here is valid as update_curr() precedes return */
@@ -3999,6 +3992,7 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
 {
        struct cfs_bandwidth *cfs_b =
                container_of(timer, struct cfs_bandwidth, slack_timer);
+
        do_sched_cfs_slack_timer(cfs_b);
 
        return HRTIMER_NORESTART;
@@ -4008,20 +4002,19 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 {
        struct cfs_bandwidth *cfs_b =
                container_of(timer, struct cfs_bandwidth, period_timer);
-       ktime_t now;
        int overrun;
        int idle = 0;
 
        raw_spin_lock(&cfs_b->lock);
        for (;;) {
-               now = hrtimer_cb_get_time(timer);
-               overrun = hrtimer_forward(timer, now, cfs_b->period);
-
+               overrun = hrtimer_forward_now(timer, cfs_b->period);
                if (!overrun)
                        break;
 
                idle = do_sched_cfs_period_timer(cfs_b, overrun);
        }
+       if (idle)
+               cfs_b->period_active = 0;
        raw_spin_unlock(&cfs_b->lock);
 
        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
@@ -4035,7 +4028,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
        cfs_b->period = ns_to_ktime(default_cfs_period());
 
        INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
-       hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
        cfs_b->period_timer.function = sched_cfs_period_timer;
        hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        cfs_b->slack_timer.function = sched_cfs_slack_timer;
@@ -4047,28 +4040,15 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
        INIT_LIST_HEAD(&cfs_rq->throttled_list);
 }
 
-/* requires cfs_b->lock, may release to reprogram timer */
-void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
+void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
-       /*
-        * The timer may be active because we're trying to set a new bandwidth
-        * period or because we're racing with the tear-down path
-        * (timer_active==0 becomes visible before the hrtimer call-back
-        * terminates).  In either case we ensure that it's re-programmed
-        */
-       while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
-              hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
-               /* bounce the lock to allow do_sched_cfs_period_timer to run */
-               raw_spin_unlock(&cfs_b->lock);
-               cpu_relax();
-               raw_spin_lock(&cfs_b->lock);
-               /* if someone else restarted the timer then we're done */
-               if (!force && cfs_b->timer_active)
-                       return;
-       }
+       lockdep_assert_held(&cfs_b->lock);
 
-       cfs_b->timer_active = 1;
-       start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
+       if (!cfs_b->period_active) {
+               cfs_b->period_active = 1;
+               hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
+               hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
+       }
 }
 
 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
index 575da76a3874a8c1b2ddd0f518e5ecea7a805262..e43da5391dcdd785ed39d335f3d8056889924d87 100644 (file)
@@ -18,19 +18,22 @@ static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
 {
        struct rt_bandwidth *rt_b =
                container_of(timer, struct rt_bandwidth, rt_period_timer);
-       ktime_t now;
-       int overrun;
        int idle = 0;
+       int overrun;
 
+       raw_spin_lock(&rt_b->rt_runtime_lock);
        for (;;) {
-               now = hrtimer_cb_get_time(timer);
-               overrun = hrtimer_forward(timer, now, rt_b->rt_period);
-
+               overrun = hrtimer_forward_now(timer, rt_b->rt_period);
                if (!overrun)
                        break;
 
+               raw_spin_unlock(&rt_b->rt_runtime_lock);
                idle = do_sched_rt_period_timer(rt_b, overrun);
+               raw_spin_lock(&rt_b->rt_runtime_lock);
        }
+       if (idle)
+               rt_b->rt_period_active = 0;
+       raw_spin_unlock(&rt_b->rt_runtime_lock);
 
        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
 }
@@ -52,11 +55,12 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
        if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
                return;
 
-       if (hrtimer_active(&rt_b->rt_period_timer))
-               return;
-
        raw_spin_lock(&rt_b->rt_runtime_lock);
-       start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
+       if (!rt_b->rt_period_active) {
+               rt_b->rt_period_active = 1;
+               hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
+               hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
+       }
        raw_spin_unlock(&rt_b->rt_runtime_lock);
 }
 
index e0e1299939588ac47f08b13b45f1a6e2e9cf4d7f..f9a58ef373b4b2240f521882d0b0d29da71d58b7 100644 (file)
@@ -131,6 +131,7 @@ struct rt_bandwidth {
        ktime_t                 rt_period;
        u64                     rt_runtime;
        struct hrtimer          rt_period_timer;
+       unsigned int            rt_period_active;
 };
 
 void __dl_clear_params(struct task_struct *p);
@@ -215,7 +216,7 @@ struct cfs_bandwidth {
        s64 hierarchical_quota;
        u64 runtime_expires;
 
-       int idle, timer_active;
+       int idle, period_active;
        struct hrtimer period_timer, slack_timer;
        struct list_head throttled_cfs_rq;
 
@@ -306,7 +307,7 @@ extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 
 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
-extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force);
+extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
 
 extern void free_rt_sched_group(struct task_group *tg);
@@ -1406,8 +1407,6 @@ static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
 static inline void sched_avg_update(struct rq *rq) { }
 #endif
 
-extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
-
 /*
  * __task_rq_lock - lock the rq @p resides on.
  */
index 01f0312419b3cb44d8fa455d8cfaa2ad14d5ef0d..ffc4cc3dcd47b4e277df56735e11d51abdd87f6d 100644 (file)
@@ -13,19 +13,4 @@ obj-$(CONFIG_TIMER_STATS)                    += timer_stats.o
 obj-$(CONFIG_DEBUG_FS)                         += timekeeping_debug.o
 obj-$(CONFIG_TEST_UDELAY)                      += test_udelay.o
 
-$(obj)/time.o: $(obj)/timeconst.h
-
-quiet_cmd_hzfile = HZFILE  $@
-      cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
-
-targets += hz.bc
-$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
-       $(call if_changed,hzfile)
-
-quiet_cmd_bc  = BC      $@
-      cmd_bc  = bc -q $(filter-out FORCE,$^) > $@
-
-targets += timeconst.h
-$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
-       $(call if_changed,bc)
-
+$(obj)/time.o: $(objtree)/include/config/
index 1b001ed1edb945cd5a95238fc563f93e8971041e..7fbba635a5499805c316c36e99910f1d96eb6fb6 100644 (file)
@@ -317,19 +317,16 @@ EXPORT_SYMBOL_GPL(alarm_init);
  * @alarm: ptr to alarm to set
  * @start: time to run the alarm
  */
-int alarm_start(struct alarm *alarm, ktime_t start)
+void alarm_start(struct alarm *alarm, ktime_t start)
 {
        struct alarm_base *base = &alarm_bases[alarm->type];
        unsigned long flags;
-       int ret;
 
        spin_lock_irqsave(&base->lock, flags);
        alarm->node.expires = start;
        alarmtimer_enqueue(base, alarm);
-       ret = hrtimer_start(&alarm->timer, alarm->node.expires,
-                               HRTIMER_MODE_ABS);
+       hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS);
        spin_unlock_irqrestore(&base->lock, flags);
-       return ret;
 }
 EXPORT_SYMBOL_GPL(alarm_start);
 
@@ -338,12 +335,12 @@ EXPORT_SYMBOL_GPL(alarm_start);
  * @alarm: ptr to alarm to set
  * @start: time relative to now to run the alarm
  */
-int alarm_start_relative(struct alarm *alarm, ktime_t start)
+void alarm_start_relative(struct alarm *alarm, ktime_t start)
 {
        struct alarm_base *base = &alarm_bases[alarm->type];
 
        start = ktime_add(start, base->gettime());
-       return alarm_start(alarm, start);
+       alarm_start(alarm, start);
 }
 EXPORT_SYMBOL_GPL(alarm_start_relative);
 
@@ -495,12 +492,12 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
  */
 static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
 {
-       clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
-
        if (!alarmtimer_get_rtcdev())
                return -EINVAL;
 
-       return hrtimer_get_res(baseid, tp);
+       tp->tv_sec = 0;
+       tp->tv_nsec = hrtimer_resolution;
+       return 0;
 }
 
 /**
index 76d4bd962b19b3bab345460676954ef6f7c14568..4adf320678627cde1cb9bd75b3dbaddaf3d4d50f 100644 (file)
@@ -66,7 +66,6 @@
  */
 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
 {
-
        .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
        .clock_base =
        {
@@ -74,25 +73,21 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
                        .index = HRTIMER_BASE_MONOTONIC,
                        .clockid = CLOCK_MONOTONIC,
                        .get_time = &ktime_get,
-                       .resolution = KTIME_LOW_RES,
                },
                {
                        .index = HRTIMER_BASE_REALTIME,
                        .clockid = CLOCK_REALTIME,
                        .get_time = &ktime_get_real,
-                       .resolution = KTIME_LOW_RES,
                },
                {
                        .index = HRTIMER_BASE_BOOTTIME,
                        .clockid = CLOCK_BOOTTIME,
                        .get_time = &ktime_get_boottime,
-                       .resolution = KTIME_LOW_RES,
                },
                {
                        .index = HRTIMER_BASE_TAI,
                        .clockid = CLOCK_TAI,
                        .get_time = &ktime_get_clocktai,
-                       .resolution = KTIME_LOW_RES,
                },
        }
 };
@@ -109,27 +104,6 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
        return hrtimer_clock_to_base_table[clock_id];
 }
 
-
-/*
- * Get the coarse grained time at the softirq based on xtime and
- * wall_to_monotonic.
- */
-static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
-{
-       ktime_t xtim, mono, boot, tai;
-       ktime_t off_real, off_boot, off_tai;
-
-       mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai);
-       boot = ktime_add(mono, off_boot);
-       xtim = ktime_add(mono, off_real);
-       tai = ktime_add(mono, off_tai);
-
-       base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
-       base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
-       base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
-       base->clock_base[HRTIMER_BASE_TAI].softirq_time = tai;
-}
-
 /*
  * Functions and macros which are different for UP/SMP systems are kept in a
  * single place
@@ -441,24 +415,35 @@ static inline void debug_deactivate(struct hrtimer *timer)
 }
 
 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
+                                            struct hrtimer *timer)
+{
+#ifdef CONFIG_HIGH_RES_TIMERS
+       cpu_base->next_timer = timer;
+#endif
+}
+
 static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
 {
        struct hrtimer_clock_base *base = cpu_base->clock_base;
        ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
-       int i;
+       unsigned int active = cpu_base->active_bases;
 
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
+       hrtimer_update_next_timer(cpu_base, NULL);
+       for (; active; base++, active >>= 1) {
                struct timerqueue_node *next;
                struct hrtimer *timer;
 
-               next = timerqueue_getnext(&base->active);
-               if (!next)
+               if (!(active & 0x01))
                        continue;
 
+               next = timerqueue_getnext(&base->active);
                timer = container_of(next, struct hrtimer, node);
                expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-               if (expires.tv64 < expires_next.tv64)
+               if (expires.tv64 < expires_next.tv64) {
                        expires_next = expires;
+                       hrtimer_update_next_timer(cpu_base, timer);
+               }
        }
        /*
         * clock_was_set() might have changed base->offset of any of
@@ -471,6 +456,16 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
 }
 #endif
 
+static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+{
+       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
+       ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
+
+       return ktime_get_update_offsets_now(&base->clock_was_set_seq,
+                                           offs_real, offs_boot, offs_tai);
+}
+
 /* High resolution timer related functions */
 #ifdef CONFIG_HIGH_RES_TIMERS
 
@@ -478,6 +473,8 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
  * High resolution timer enabled ?
  */
 static int hrtimer_hres_enabled __read_mostly  = 1;
+unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
+EXPORT_SYMBOL_GPL(hrtimer_resolution);
 
 /*
  * Enable / Disable high resolution mode
@@ -506,9 +503,14 @@ static inline int hrtimer_is_hres_enabled(void)
 /*
  * Is the high resolution mode active ?
  */
+static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
+{
+       return cpu_base->hres_active;
+}
+
 static inline int hrtimer_hres_active(void)
 {
-       return __this_cpu_read(hrtimer_bases.hres_active);
+       return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
 }
 
 /*
@@ -519,7 +521,12 @@ static inline int hrtimer_hres_active(void)
 static void
 hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 {
-       ktime_t expires_next = __hrtimer_get_next_event(cpu_base);
+       ktime_t expires_next;
+
+       if (!cpu_base->hres_active)
+               return;
+
+       expires_next = __hrtimer_get_next_event(cpu_base);
 
        if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
                return;
@@ -548,58 +555,49 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 }
 
 /*
- * Shared reprogramming for clock_realtime and clock_monotonic
- *
  * When a timer is enqueued and expires earlier than the already enqueued
  * timers, we have to check, whether it expires earlier than the timer for
  * which the clock event device was armed.
  *
- * Note, that in case the state has HRTIMER_STATE_CALLBACK set, no reprogramming
- * and no expiry check happens. The timer gets enqueued into the rbtree. The
- * reprogramming and expiry check is done in the hrtimer_interrupt or in the
- * softirq.
- *
  * Called with interrupts disabled and base->cpu_base.lock held
  */
-static int hrtimer_reprogram(struct hrtimer *timer,
-                            struct hrtimer_clock_base *base)
+static void hrtimer_reprogram(struct hrtimer *timer,
+                             struct hrtimer_clock_base *base)
 {
        struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
        ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-       int res;
 
        WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
 
        /*
-        * When the callback is running, we do not reprogram the clock event
-        * device. The timer callback is either running on a different CPU or
-        * the callback is executed in the hrtimer_interrupt context. The
-        * reprogramming is handled either by the softirq, which called the
-        * callback or at the end of the hrtimer_interrupt.
+        * If the timer is not on the current cpu, we cannot reprogram
+        * the other cpus clock event device.
         */
-       if (hrtimer_callback_running(timer))
-               return 0;
+       if (base->cpu_base != cpu_base)
+               return;
+
+       /*
+        * If the hrtimer interrupt is running, then it will
+        * reevaluate the clock bases and reprogram the clock event
+        * device. The callbacks are always executed in hard interrupt
+        * context so we don't need an extra check for a running
+        * callback.
+        */
+       if (cpu_base->in_hrtirq)
+               return;
 
        /*
         * CLOCK_REALTIME timer might be requested with an absolute
-        * expiry time which is less than base->offset. Nothing wrong
-        * about that, just avoid to call into the tick code, which
-        * has now objections against negative expiry values.
+        * expiry time which is less than base->offset. Set it to 0.
         */
        if (expires.tv64 < 0)
-               return -ETIME;
+               expires.tv64 = 0;
 
        if (expires.tv64 >= cpu_base->expires_next.tv64)
-               return 0;
+               return;
 
-       /*
-        * When the target cpu of the timer is currently executing
-        * hrtimer_interrupt(), then we do not touch the clock event
-        * device. hrtimer_interrupt() will reevaluate all clock bases
-        * before reprogramming the device.
-        */
-       if (cpu_base->in_hrtirq)
-               return 0;
+       /* Update the pointer to the next expiring timer */
+       cpu_base->next_timer = timer;
 
        /*
         * If a hang was detected in the last timer interrupt then we
@@ -608,15 +606,14 @@ static int hrtimer_reprogram(struct hrtimer *timer,
         * to make progress.
         */
        if (cpu_base->hang_detected)
-               return 0;
+               return;
 
        /*
-        * Clockevents returns -ETIME, when the event was in the past.
+        * Program the timer hardware. We enforce the expiry for
+        * events which are already in the past.
         */
-       res = tick_program_event(expires, 0);
-       if (!IS_ERR_VALUE(res))
-               cpu_base->expires_next = expires;
-       return res;
+       cpu_base->expires_next = expires;
+       tick_program_event(expires, 1);
 }
 
 /*
@@ -628,15 +625,6 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
        base->hres_active = 0;
 }
 
-static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
-{
-       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
-       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
-       ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
-
-       return ktime_get_update_offsets_now(offs_real, offs_boot, offs_tai);
-}
-
 /*
  * Retrigger next event is called after clock was set
  *
@@ -646,7 +634,7 @@ static void retrigger_next_event(void *arg)
 {
        struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
-       if (!hrtimer_hres_active())
+       if (!base->hres_active)
                return;
 
        raw_spin_lock(&base->lock);
@@ -660,29 +648,19 @@ static void retrigger_next_event(void *arg)
  */
 static int hrtimer_switch_to_hres(void)
 {
-       int i, cpu = smp_processor_id();
-       struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
-       unsigned long flags;
-
-       if (base->hres_active)
-               return 1;
-
-       local_irq_save(flags);
+       struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
        if (tick_init_highres()) {
-               local_irq_restore(flags);
                printk(KERN_WARNING "Could not switch to high resolution "
-                                   "mode on CPU %d\n", cpu);
+                                   "mode on CPU %d\n", base->cpu);
                return 0;
        }
        base->hres_active = 1;
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
-               base->clock_base[i].resolution = KTIME_HIGH_RES;
+       hrtimer_resolution = HIGH_RES_NSEC;
 
        tick_setup_sched_timer();
        /* "Retrigger" the interrupt to get things going */
        retrigger_next_event(NULL);
-       local_irq_restore(flags);
        return 1;
 }
 
@@ -704,6 +682,7 @@ void clock_was_set_delayed(void)
 
 #else
 
+static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; }
 static inline int hrtimer_hres_active(void) { return 0; }
 static inline int hrtimer_is_hres_enabled(void) { return 0; }
 static inline int hrtimer_switch_to_hres(void) { return 0; }
@@ -801,6 +780,14 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  *
  * Forward the timer expiry so it will expire in the future.
  * Returns the number of overruns.
+ *
+ * Can be safely called from the callback function of @timer. If
+ * called from other contexts @timer must neither be enqueued nor
+ * running the callback and the caller needs to take care of
+ * serialization.
+ *
+ * Note: This only updates the timer expiry value and does not requeue
+ * the timer.
  */
 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
 {
@@ -812,8 +799,11 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
        if (delta.tv64 < 0)
                return 0;
 
-       if (interval.tv64 < timer->base->resolution.tv64)
-               interval.tv64 = timer->base->resolution.tv64;
+       if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
+               return 0;
+
+       if (interval.tv64 < hrtimer_resolution)
+               interval.tv64 = hrtimer_resolution;
 
        if (unlikely(delta.tv64 >= interval.tv64)) {
                s64 incr = ktime_to_ns(interval);
@@ -847,7 +837,6 @@ static int enqueue_hrtimer(struct hrtimer *timer,
 {
        debug_activate(timer);
 
-       timerqueue_add(&base->active, &timer->node);
        base->cpu_base->active_bases |= 1 << base->index;
 
        /*
@@ -856,7 +845,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
         */
        timer->state |= HRTIMER_STATE_ENQUEUED;
 
-       return (&timer->node == base->active.next);
+       return timerqueue_add(&base->active, &timer->node);
 }
 
 /*
@@ -873,29 +862,28 @@ static void __remove_hrtimer(struct hrtimer *timer,
                             struct hrtimer_clock_base *base,
                             unsigned long newstate, int reprogram)
 {
-       struct timerqueue_node *next_timer;
-       if (!(timer->state & HRTIMER_STATE_ENQUEUED))
-               goto out;
+       struct hrtimer_cpu_base *cpu_base = base->cpu_base;
+       unsigned int state = timer->state;
+
+       timer->state = newstate;
+       if (!(state & HRTIMER_STATE_ENQUEUED))
+               return;
+
+       if (!timerqueue_del(&base->active, &timer->node))
+               cpu_base->active_bases &= ~(1 << base->index);
 
-       next_timer = timerqueue_getnext(&base->active);
-       timerqueue_del(&base->active, &timer->node);
-       if (&timer->node == next_timer) {
 #ifdef CONFIG_HIGH_RES_TIMERS
-               /* Reprogram the clock event device. if enabled */
-               if (reprogram && hrtimer_hres_active()) {
-                       ktime_t expires;
-
-                       expires = ktime_sub(hrtimer_get_expires(timer),
-                                           base->offset);
-                       if (base->cpu_base->expires_next.tv64 == expires.tv64)
-                               hrtimer_force_reprogram(base->cpu_base, 1);
-               }
+       /*
+        * Note: If reprogram is false we do not update
+        * cpu_base->next_timer. This happens when we remove the first
+        * timer on a remote cpu. No harm as we never dereference
+        * cpu_base->next_timer. So the worst thing what can happen is
+        * an superflous call to hrtimer_force_reprogram() on the
+        * remote cpu later on if the same timer gets enqueued again.
+        */
+       if (reprogram && timer == cpu_base->next_timer)
+               hrtimer_force_reprogram(cpu_base, 1);
 #endif
-       }
-       if (!timerqueue_getnext(&base->active))
-               base->cpu_base->active_bases &= ~(1 << base->index);
-out:
-       timer->state = newstate;
 }
 
 /*
@@ -931,18 +919,25 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
        return 0;
 }
 
-int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-               unsigned long delta_ns, const enum hrtimer_mode mode,
-               int wakeup)
+/**
+ * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
+ * @timer:     the timer to be added
+ * @tim:       expiry time
+ * @delta_ns:  "slack" range for the timer
+ * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
+ *             relative (HRTIMER_MODE_REL)
+ */
+void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+                           unsigned long delta_ns, const enum hrtimer_mode mode)
 {
        struct hrtimer_clock_base *base, *new_base;
        unsigned long flags;
-       int ret, leftmost;
+       int leftmost;
 
        base = lock_hrtimer_base(timer, &flags);
 
        /* Remove an active timer from the queue: */
-       ret = remove_hrtimer(timer, base);
+       remove_hrtimer(timer, base);
 
        if (mode & HRTIMER_MODE_REL) {
                tim = ktime_add_safe(tim, base->get_time());
@@ -954,7 +949,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                 * timeouts. This will go away with the GTOD framework.
                 */
 #ifdef CONFIG_TIME_LOW_RES
-               tim = ktime_add_safe(tim, base->resolution);
+               tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
 #endif
        }
 
@@ -966,11 +961,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        timer_stats_hrtimer_set_start_info(timer);
 
        leftmost = enqueue_hrtimer(timer, new_base);
-
-       if (!leftmost) {
-               unlock_hrtimer_base(timer, &flags);
-               return ret;
-       }
+       if (!leftmost)
+               goto unlock;
 
        if (!hrtimer_is_hres_active(timer)) {
                /*
@@ -978,72 +970,14 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                 * on dynticks target.
                 */
                wake_up_nohz_cpu(new_base->cpu_base->cpu);
-       } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
-                       hrtimer_reprogram(timer, new_base)) {
-               /*
-                * Only allow reprogramming if the new base is on this CPU.
-                * (it might still be on another CPU if the timer was pending)
-                *
-                * XXX send_remote_softirq() ?
-                */
-               if (wakeup) {
-                       /*
-                        * We need to drop cpu_base->lock to avoid a
-                        * lock ordering issue vs. rq->lock.
-                        */
-                       raw_spin_unlock(&new_base->cpu_base->lock);
-                       raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-                       local_irq_restore(flags);
-                       return ret;
-               } else {
-                       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-               }
+       } else {
+               hrtimer_reprogram(timer, new_base);
        }
-
+unlock:
        unlock_hrtimer_base(timer, &flags);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(__hrtimer_start_range_ns);
-
-/**
- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
- * @timer:     the timer to be added
- * @tim:       expiry time
- * @delta_ns:  "slack" range for the timer
- * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
- *             relative (HRTIMER_MODE_REL)
- *
- * Returns:
- *  0 on success
- *  1 when the timer was active
- */
-int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-               unsigned long delta_ns, const enum hrtimer_mode mode)
-{
-       return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
 }
 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
 
-/**
- * hrtimer_start - (re)start an hrtimer on the current CPU
- * @timer:     the timer to be added
- * @tim:       expiry time
- * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
- *             relative (HRTIMER_MODE_REL)
- *
- * Returns:
- *  0 on success
- *  1 when the timer was active
- */
-int
-hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
-{
-       return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
-}
-EXPORT_SYMBOL_GPL(hrtimer_start);
-
-
 /**
  * hrtimer_try_to_cancel - try to deactivate a timer
  * @timer:     hrtimer to stop
@@ -1060,6 +994,15 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
        unsigned long flags;
        int ret = -1;
 
+       /*
+        * Check lockless first. If the timer is not active (neither
+        * enqueued nor running the callback, nothing to do here.  The
+        * base lock does not serialize against a concurrent enqueue,
+        * so we can avoid taking it.
+        */
+       if (!hrtimer_active(timer))
+               return 0;
+
        base = lock_hrtimer_base(timer, &flags);
 
        if (!hrtimer_callback_running(timer))
@@ -1113,26 +1056,22 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
 /**
  * hrtimer_get_next_event - get the time until next expiry event
  *
- * Returns the delta to the next expiry event or KTIME_MAX if no timer
- * is pending.
+ * Returns the next expiry time or KTIME_MAX if no timer is pending.
  */
-ktime_t hrtimer_get_next_event(void)
+u64 hrtimer_get_next_event(void)
 {
        struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
-       ktime_t mindelta = { .tv64 = KTIME_MAX };
+       u64 expires = KTIME_MAX;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&cpu_base->lock, flags);
 
-       if (!hrtimer_hres_active())
-               mindelta = ktime_sub(__hrtimer_get_next_event(cpu_base),
-                                    ktime_get());
+       if (!__hrtimer_hres_active(cpu_base))
+               expires = __hrtimer_get_next_event(cpu_base).tv64;
 
        raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 
-       if (mindelta.tv64 < 0)
-               mindelta.tv64 = 0;
-       return mindelta;
+       return expires;
 }
 #endif
 
@@ -1174,30 +1113,10 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 }
 EXPORT_SYMBOL_GPL(hrtimer_init);
 
-/**
- * hrtimer_get_res - get the timer resolution for a clock
- * @which_clock: which clock to query
- * @tp:                 pointer to timespec variable to store the resolution
- *
- * Store the resolution of the clock selected by @which_clock in the
- * variable pointed to by @tp.
- */
-int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
-{
-       struct hrtimer_cpu_base *cpu_base;
-       int base = hrtimer_clockid_to_base(which_clock);
-
-       cpu_base = raw_cpu_ptr(&hrtimer_bases);
-       *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(hrtimer_get_res);
-
-static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
+static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
+                         struct hrtimer_clock_base *base,
+                         struct hrtimer *timer, ktime_t *now)
 {
-       struct hrtimer_clock_base *base = timer->base;
-       struct hrtimer_cpu_base *cpu_base = base->cpu_base;
        enum hrtimer_restart (*fn)(struct hrtimer *);
        int restart;
 
@@ -1223,55 +1142,32 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
         * Note: We clear the CALLBACK bit after enqueue_hrtimer and
         * we do not reprogramm the event hardware. Happens either in
         * hrtimer_start_range_ns() or in hrtimer_interrupt()
+        *
+        * Note: Because we dropped the cpu_base->lock above,
+        * hrtimer_start_range_ns() can have popped in and enqueued the timer
+        * for us already.
         */
-       if (restart != HRTIMER_NORESTART) {
-               BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
+       if (restart != HRTIMER_NORESTART &&
+           !(timer->state & HRTIMER_STATE_ENQUEUED))
                enqueue_hrtimer(timer, base);
-       }
 
        WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
 
        timer->state &= ~HRTIMER_STATE_CALLBACK;
 }
 
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * High resolution timer interrupt
- * Called with interrupts disabled
- */
-void hrtimer_interrupt(struct clock_event_device *dev)
+static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
 {
-       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
-       ktime_t expires_next, now, entry_time, delta;
-       int i, retries = 0;
-
-       BUG_ON(!cpu_base->hres_active);
-       cpu_base->nr_events++;
-       dev->next_event.tv64 = KTIME_MAX;
-
-       raw_spin_lock(&cpu_base->lock);
-       entry_time = now = hrtimer_update_base(cpu_base);
-retry:
-       cpu_base->in_hrtirq = 1;
-       /*
-        * We set expires_next to KTIME_MAX here with cpu_base->lock
-        * held to prevent that a timer is enqueued in our queue via
-        * the migration code. This does not affect enqueueing of
-        * timers which run their callback and need to be requeued on
-        * this CPU.
-        */
-       cpu_base->expires_next.tv64 = KTIME_MAX;
+       struct hrtimer_clock_base *base = cpu_base->clock_base;
+       unsigned int active = cpu_base->active_bases;
 
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
-               struct hrtimer_clock_base *base;
+       for (; active; base++, active >>= 1) {
                struct timerqueue_node *node;
                ktime_t basenow;
 
-               if (!(cpu_base->active_bases & (1 << i)))
+               if (!(active & 0x01))
                        continue;
 
-               base = cpu_base->clock_base + i;
                basenow = ktime_add(now, base->offset);
 
                while ((node = timerqueue_getnext(&base->active))) {
@@ -1294,9 +1190,42 @@ void hrtimer_interrupt(struct clock_event_device *dev)
                        if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
                                break;
 
-                       __run_hrtimer(timer, &basenow);
+                       __run_hrtimer(cpu_base, base, timer, &basenow);
                }
        }
+}
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+
+/*
+ * High resolution timer interrupt
+ * Called with interrupts disabled
+ */
+void hrtimer_interrupt(struct clock_event_device *dev)
+{
+       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+       ktime_t expires_next, now, entry_time, delta;
+       int retries = 0;
+
+       BUG_ON(!cpu_base->hres_active);
+       cpu_base->nr_events++;
+       dev->next_event.tv64 = KTIME_MAX;
+
+       raw_spin_lock(&cpu_base->lock);
+       entry_time = now = hrtimer_update_base(cpu_base);
+retry:
+       cpu_base->in_hrtirq = 1;
+       /*
+        * We set expires_next to KTIME_MAX here with cpu_base->lock
+        * held to prevent that a timer is enqueued in our queue via
+        * the migration code. This does not affect enqueueing of
+        * timers which run their callback and need to be requeued on
+        * this CPU.
+        */
+       cpu_base->expires_next.tv64 = KTIME_MAX;
+
+       __hrtimer_run_queues(cpu_base, now);
+
        /* Reevaluate the clock bases for the next expiry */
        expires_next = __hrtimer_get_next_event(cpu_base);
        /*
@@ -1342,8 +1271,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        cpu_base->hang_detected = 1;
        raw_spin_unlock(&cpu_base->lock);
        delta = ktime_sub(now, entry_time);
-       if (delta.tv64 > cpu_base->max_hang_time.tv64)
-               cpu_base->max_hang_time = delta;
+       if ((unsigned int)delta.tv64 > cpu_base->max_hang_time)
+               cpu_base->max_hang_time = (unsigned int) delta.tv64;
        /*
         * Limit it to a sensible value as we enforce a longer
         * delay. Give the CPU at least 100ms to catch up.
@@ -1361,7 +1290,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
  * local version of hrtimer_peek_ahead_timers() called with interrupts
  * disabled.
  */
-static void __hrtimer_peek_ahead_timers(void)
+static inline void __hrtimer_peek_ahead_timers(void)
 {
        struct tick_device *td;
 
@@ -1373,29 +1302,6 @@ static void __hrtimer_peek_ahead_timers(void)
                hrtimer_interrupt(td->evtdev);
 }
 
-/**
- * hrtimer_peek_ahead_timers -- run soft-expired timers now
- *
- * hrtimer_peek_ahead_timers will peek at the timer queue of
- * the current cpu and check if there are any timers for which
- * the soft expires time has passed. If any such timers exist,
- * they are run immediately and then removed from the timer queue.
- *
- */
-void hrtimer_peek_ahead_timers(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __hrtimer_peek_ahead_timers();
-       local_irq_restore(flags);
-}
-
-static void run_hrtimer_softirq(struct softirq_action *h)
-{
-       hrtimer_peek_ahead_timers();
-}
-
 #else /* CONFIG_HIGH_RES_TIMERS */
 
 static inline void __hrtimer_peek_ahead_timers(void) { }
@@ -1403,66 +1309,32 @@ static inline void __hrtimer_peek_ahead_timers(void) { }
 #endif /* !CONFIG_HIGH_RES_TIMERS */
 
 /*
- * Called from timer softirq every jiffy, expire hrtimers:
- *
- * For HRT its the fall back code to run the softirq in the timer
- * softirq context in case the hrtimer initialization failed or has
- * not been done yet.
+ * Called from run_local_timers in hardirq context every jiffy
  */
-void hrtimer_run_pending(void)
+void hrtimer_run_queues(void)
 {
-       if (hrtimer_hres_active())
+       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+       ktime_t now;
+
+       if (__hrtimer_hres_active(cpu_base))
                return;
 
        /*
-        * This _is_ ugly: We have to check in the softirq context,
-        * whether we can switch to highres and / or nohz mode. The
-        * clocksource switch happens in the timer interrupt with
-        * xtime_lock held. Notification from there only sets the
-        * check bit in the tick_oneshot code, otherwise we might
-        * deadlock vs. xtime_lock.
+        * This _is_ ugly: We have to check periodically, whether we
+        * can switch to highres and / or nohz mode. The clocksource
+        * switch happens with xtime_lock held. Notification from
+        * there only sets the check bit in the tick_oneshot code,
+        * otherwise we might deadlock vs. xtime_lock.
         */
-       if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
+       if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
                hrtimer_switch_to_hres();
-}
-
-/*
- * Called from hardirq context every jiffy
- */
-void hrtimer_run_queues(void)
-{
-       struct timerqueue_node *node;
-       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
-       struct hrtimer_clock_base *base;
-       int index, gettime = 1;
-
-       if (hrtimer_hres_active())
                return;
-
-       for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
-               base = &cpu_base->clock_base[index];
-               if (!timerqueue_getnext(&base->active))
-                       continue;
-
-               if (gettime) {
-                       hrtimer_get_softirq_time(cpu_base);
-                       gettime = 0;
-               }
-
-               raw_spin_lock(&cpu_base->lock);
-
-               while ((node = timerqueue_getnext(&base->active))) {
-                       struct hrtimer *timer;
-
-                       timer = container_of(node, struct hrtimer, node);
-                       if (base->softirq_time.tv64 <=
-                                       hrtimer_get_expires_tv64(timer))
-                               break;
-
-                       __run_hrtimer(timer, &base->softirq_time);
-               }
-               raw_spin_unlock(&cpu_base->lock);
        }
+
+       raw_spin_lock(&cpu_base->lock);
+       now = hrtimer_update_base(cpu_base);
+       __hrtimer_run_queues(cpu_base, now);
+       raw_spin_unlock(&cpu_base->lock);
 }
 
 /*
@@ -1495,8 +1367,6 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
        do {
                set_current_state(TASK_INTERRUPTIBLE);
                hrtimer_start_expires(&t->timer, mode);
-               if (!hrtimer_active(&t->timer))
-                       t->task = NULL;
 
                if (likely(t->task))
                        freezable_schedule();
@@ -1729,9 +1599,6 @@ void __init hrtimers_init(void)
        hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
                          (void *)(long)smp_processor_id());
        register_cpu_notifier(&hrtimers_nb);
-#ifdef CONFIG_HIGH_RES_TIMERS
-       open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-#endif
 }
 
 /**
@@ -1770,8 +1637,6 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
        hrtimer_init_sleeper(&t, current);
 
        hrtimer_start_expires(&t.timer, mode);
-       if (!hrtimer_active(&t.timer))
-               t.task = NULL;
 
        if (likely(t.task))
                schedule();
index 31ea01f42e1f088786a291199cc54e9bde4658c9..31d11ac9fa4739789728c44470b82115ec307d11 100644 (file)
@@ -272,13 +272,20 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
        return 0;
 }
 
+static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec *tp)
+{
+       tp->tv_sec = 0;
+       tp->tv_nsec = hrtimer_resolution;
+       return 0;
+}
+
 /*
  * Initialize everything, well, just everything in Posix clocks/timers ;)
  */
 static __init int init_posix_timers(void)
 {
        struct k_clock clock_realtime = {
-               .clock_getres   = hrtimer_get_res,
+               .clock_getres   = posix_get_hrtimer_res,
                .clock_get      = posix_clock_realtime_get,
                .clock_set      = posix_clock_realtime_set,
                .clock_adj      = posix_clock_realtime_adj,
@@ -290,7 +297,7 @@ static __init int init_posix_timers(void)
                .timer_del      = common_timer_del,
        };
        struct k_clock clock_monotonic = {
-               .clock_getres   = hrtimer_get_res,
+               .clock_getres   = posix_get_hrtimer_res,
                .clock_get      = posix_ktime_get_ts,
                .nsleep         = common_nsleep,
                .nsleep_restart = hrtimer_nanosleep_restart,
@@ -300,7 +307,7 @@ static __init int init_posix_timers(void)
                .timer_del      = common_timer_del,
        };
        struct k_clock clock_monotonic_raw = {
-               .clock_getres   = hrtimer_get_res,
+               .clock_getres   = posix_get_hrtimer_res,
                .clock_get      = posix_get_monotonic_raw,
        };
        struct k_clock clock_realtime_coarse = {
@@ -312,7 +319,7 @@ static __init int init_posix_timers(void)
                .clock_get      = posix_get_monotonic_coarse,
        };
        struct k_clock clock_tai = {
-               .clock_getres   = hrtimer_get_res,
+               .clock_getres   = posix_get_hrtimer_res,
                .clock_get      = posix_get_tai,
                .nsleep         = common_nsleep,
                .nsleep_restart = hrtimer_nanosleep_restart,
@@ -322,7 +329,7 @@ static __init int init_posix_timers(void)
                .timer_del      = common_timer_del,
        };
        struct k_clock clock_boottime = {
-               .clock_getres   = hrtimer_get_res,
+               .clock_getres   = posix_get_hrtimer_res,
                .clock_get      = posix_get_boottime,
                .nsleep         = common_nsleep,
                .nsleep_restart = hrtimer_nanosleep_restart,
index 6aac4beedbbe235951c0671336e52b2459a047fb..3e7db49a2381d14506a37c69b45268feeec56bef 100644 (file)
@@ -22,6 +22,7 @@ static void bc_set_mode(enum clock_event_mode mode,
                        struct clock_event_device *bc)
 {
        switch (mode) {
+       case CLOCK_EVT_MODE_UNUSED:
        case CLOCK_EVT_MODE_SHUTDOWN:
                /*
                 * Note, we cannot cancel the timer here as we might
@@ -66,9 +67,11 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
         * hrtimer_{start/cancel} functions call into tracing,
         * calls to these functions must be bound within RCU_NONIDLE.
         */
-       RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
-               !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
-                       0);
+       RCU_NONIDLE({
+                       bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
+                       if (bc_moved)
+                               hrtimer_start(&bctimer, expires,
+                                             HRTIMER_MODE_ABS_PINNED);});
        if (bc_moved) {
                /* Bind the "device" to the cpu */
                bc->bound_on = smp_processor_id();
@@ -99,10 +102,13 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
 {
        ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
 
-       if (ce_broadcast_hrtimer.next_event.tv64 == KTIME_MAX)
+       switch (ce_broadcast_hrtimer.mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX)
+                       return HRTIMER_RESTART;
+       default:
                return HRTIMER_NORESTART;
-
-       return HRTIMER_RESTART;
+       }
 }
 
 void tick_setup_hrtimer_broadcast(void)
index 7e8ca4f448a88c5ad5708106bbd889e22715b3ad..12fcc55d607a6877ac1b0c1eead59242061a9d08 100644 (file)
@@ -255,18 +255,18 @@ int tick_receive_broadcast(void)
 /*
  * Broadcast the event to the cpus, which are set in the mask (mangled).
  */
-static void tick_do_broadcast(struct cpumask *mask)
+static bool tick_do_broadcast(struct cpumask *mask)
 {
        int cpu = smp_processor_id();
        struct tick_device *td;
+       bool local = false;
 
        /*
         * Check, if the current cpu is in the mask
         */
        if (cpumask_test_cpu(cpu, mask)) {
                cpumask_clear_cpu(cpu, mask);
-               td = &per_cpu(tick_cpu_device, cpu);
-               td->evtdev->event_handler(td->evtdev);
+               local = true;
        }
 
        if (!cpumask_empty(mask)) {
@@ -279,16 +279,17 @@ static void tick_do_broadcast(struct cpumask *mask)
                td = &per_cpu(tick_cpu_device, cpumask_first(mask));
                td->evtdev->broadcast(mask);
        }
+       return local;
 }
 
 /*
  * Periodic broadcast:
  * - invoke the broadcast handlers
  */
-static void tick_do_periodic_broadcast(void)
+static bool tick_do_periodic_broadcast(void)
 {
        cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
-       tick_do_broadcast(tmpmask);
+       return tick_do_broadcast(tmpmask);
 }
 
 /*
@@ -296,34 +297,26 @@ static void tick_do_periodic_broadcast(void)
  */
 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
 {
-       ktime_t next;
+       struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+       bool bc_local;
 
        raw_spin_lock(&tick_broadcast_lock);
+       bc_local = tick_do_periodic_broadcast();
 
-       tick_do_periodic_broadcast();
+       if (dev->state == CLOCK_EVT_STATE_ONESHOT) {
+               ktime_t next = ktime_add(dev->next_event, tick_period);
 
-       /*
-        * The device is in periodic mode. No reprogramming necessary:
-        */
-       if (dev->state == CLOCK_EVT_STATE_PERIODIC)
-               goto unlock;
+               clockevents_program_event(dev, next, true);
+       }
+       raw_spin_unlock(&tick_broadcast_lock);
 
        /*
-        * Setup the next period for devices, which do not have
-        * periodic mode. We read dev->next_event first and add to it
-        * when the event already expired. clockevents_program_event()
-        * sets dev->next_event only when the event is really
-        * programmed to the device.
+        * We run the handler of the local cpu after dropping
+        * tick_broadcast_lock because the handler might deadlock when
+        * trying to switch to oneshot mode.
         */
-       for (next = dev->next_event; ;) {
-               next = ktime_add(next, tick_period);
-
-               if (!clockevents_program_event(dev, next, false))
-                       goto unlock;
-               tick_do_periodic_broadcast();
-       }
-unlock:
-       raw_spin_unlock(&tick_broadcast_lock);
+       if (bc_local)
+               td->evtdev->event_handler(td->evtdev);
 }
 
 /**
@@ -532,18 +525,14 @@ static void tick_broadcast_set_affinity(struct clock_event_device *bc,
        irq_set_affinity(bc->irq, bc->cpumask);
 }
 
-static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
-                                   ktime_t expires, int force)
+static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
+                                    ktime_t expires)
 {
-       int ret;
-
        if (bc->state != CLOCK_EVT_STATE_ONESHOT)
                clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
 
-       ret = clockevents_program_event(bc, expires, force);
-       if (!ret)
-               tick_broadcast_set_affinity(bc, cpumask_of(cpu));
-       return ret;
+       clockevents_program_event(bc, expires, 1);
+       tick_broadcast_set_affinity(bc, cpumask_of(cpu));
 }
 
 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
@@ -580,9 +569,9 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
        struct tick_device *td;
        ktime_t now, next_event;
        int cpu, next_cpu = 0;
+       bool bc_local;
 
        raw_spin_lock(&tick_broadcast_lock);
-again:
        dev->next_event.tv64 = KTIME_MAX;
        next_event.tv64 = KTIME_MAX;
        cpumask_clear(tmpmask);
@@ -624,7 +613,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
        /*
         * Wakeup the cpus which have an expired event.
         */
-       tick_do_broadcast(tmpmask);
+       bc_local = tick_do_broadcast(tmpmask);
 
        /*
         * Two reasons for reprogram:
@@ -636,15 +625,15 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
         * - There are pending events on sleeping CPUs which were not
         * in the event mask
         */
-       if (next_event.tv64 != KTIME_MAX) {
-               /*
-                * Rearm the broadcast device. If event expired,
-                * repeat the above
-                */
-               if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
-                       goto again;
-       }
+       if (next_event.tv64 != KTIME_MAX)
+               tick_broadcast_set_event(dev, next_cpu, next_event);
+
        raw_spin_unlock(&tick_broadcast_lock);
+
+       if (bc_local) {
+               td = this_cpu_ptr(&tick_cpu_device);
+               td->evtdev->event_handler(td->evtdev);
+       }
 }
 
 static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
@@ -726,7 +715,7 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
                         */
                        if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
                            dev->next_event.tv64 < bc->next_event.tv64)
-                               tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
+                               tick_broadcast_set_event(bc, cpu, dev->next_event);
                }
                /*
                 * If the current CPU owns the hrtimer broadcast
@@ -861,7 +850,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
                        clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
                        tick_broadcast_init_next_event(tmpmask,
                                                       tick_next_period);
-                       tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
+                       tick_broadcast_set_event(bc, cpu, tick_next_period);
                } else
                        bc->next_event.tv64 = KTIME_MAX;
        } else {
index 3ae6afa1eb98e71cc82272cd0a79a25101eff429..ea5f9eae8f741ae40e1313b6c62db69ebdbada9f 100644 (file)
@@ -102,6 +102,16 @@ void tick_handle_periodic(struct clock_event_device *dev)
 
        tick_periodic(cpu);
 
+#if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
+       /*
+        * The cpu might have transitioned to HIGHRES or NOHZ mode via
+        * update_process_times() -> run_local_timers() ->
+        * hrtimer_run_queues().
+        */
+       if (dev->event_handler != tick_handle_periodic)
+               return;
+#endif
+
        if (dev->state != CLOCK_EVT_STATE_ONESHOT)
                return;
        for (;;) {
index b64fdd8054c56b042784fdce988ebad64f2ea803..65273f0a11ed34d90ceb03a5061b6a1510e3220d 100644 (file)
@@ -137,3 +137,5 @@ extern void tick_nohz_init(void);
 # else
 static inline void tick_nohz_init(void) { }
 #endif
+
+extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
index 914259128145e2394e65bd36f18aaf9a81f78843..812f7a3b9898368d85952f1e15d732fb588ad6dc 100644 (file)
@@ -565,156 +565,144 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 }
 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 
+static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
+{
+       hrtimer_cancel(&ts->sched_timer);
+       hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
+
+       /* Forward the time to expire in the future */
+       hrtimer_forward(&ts->sched_timer, now, tick_period);
+
+       if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
+               hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
+       else
+               tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
+}
+
 static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
                                         ktime_t now, int cpu)
 {
-       unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
-       ktime_t last_update, expires, ret = { .tv64 = 0 };
-       unsigned long rcu_delta_jiffies;
        struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
-       u64 time_delta;
-
-       time_delta = timekeeping_max_deferment();
+       u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
+       unsigned long seq, basejiff;
+       ktime_t tick;
 
        /* Read jiffies and the time when jiffies were updated last */
        do {
                seq = read_seqbegin(&jiffies_lock);
-               last_update = last_jiffies_update;
-               last_jiffies = jiffies;
+               basemono = last_jiffies_update.tv64;
+               basejiff = jiffies;
        } while (read_seqretry(&jiffies_lock, seq));
+       ts->last_jiffies = basejiff;
 
-       if (rcu_needs_cpu(&rcu_delta_jiffies) ||
+       if (rcu_needs_cpu(basemono, &next_rcu) ||
            arch_needs_cpu() || irq_work_needs_cpu()) {
-               next_jiffies = last_jiffies + 1;
-               delta_jiffies = 1;
+               next_tick = basemono + TICK_NSEC;
        } else {
-               /* Get the next timer wheel timer */
-               next_jiffies = get_next_timer_interrupt(last_jiffies);
-               delta_jiffies = next_jiffies - last_jiffies;
-               if (rcu_delta_jiffies < delta_jiffies) {
-                       next_jiffies = last_jiffies + rcu_delta_jiffies;
-                       delta_jiffies = rcu_delta_jiffies;
-               }
+               /*
+                * Get the next pending timer. If high resolution
+                * timers are enabled this only takes the timer wheel
+                * timers into account. If high resolution timers are
+                * disabled this also looks at the next expiring
+                * hrtimer.
+                */
+               next_tmr = get_next_timer_interrupt(basejiff, basemono);
+               ts->next_timer = next_tmr;
+               /* Take the next rcu event into account */
+               next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
        }
 
        /*
-        * Do not stop the tick, if we are only one off (or less)
-        * or if the cpu is required for RCU:
+        * If the tick is due in the next period, keep it ticking or
+        * restart it proper.
         */
-       if (!ts->tick_stopped && delta_jiffies <= 1)
-               goto out;
-
-       /* Schedule the tick, if we are at least one jiffie off */
-       if ((long)delta_jiffies >= 1) {
-
-               /*
-                * If this cpu is the one which updates jiffies, then
-                * give up the assignment and let it be taken by the
-                * cpu which runs the tick timer next, which might be
-                * this cpu as well. If we don't drop this here the
-                * jiffies might be stale and do_timer() never
-                * invoked. Keep track of the fact that it was the one
-                * which had the do_timer() duty last. If this cpu is
-                * the one which had the do_timer() duty last, we
-                * limit the sleep time to the timekeeping
-                * max_deferement value which we retrieved
-                * above. Otherwise we can sleep as long as we want.
-                */
-               if (cpu == tick_do_timer_cpu) {
-                       tick_do_timer_cpu = TICK_DO_TIMER_NONE;
-                       ts->do_timer_last = 1;
-               } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
-                       time_delta = KTIME_MAX;
-                       ts->do_timer_last = 0;
-               } else if (!ts->do_timer_last) {
-                       time_delta = KTIME_MAX;
+       delta = next_tick - basemono;
+       if (delta <= (u64)TICK_NSEC) {
+               tick.tv64 = 0;
+               if (!ts->tick_stopped)
+                       goto out;
+               if (delta == 0) {
+                       /* Tick is stopped, but required now. Enforce it */
+                       tick_nohz_restart(ts, now);
+                       goto out;
                }
+       }
+
+       /*
+        * If this cpu is the one which updates jiffies, then give up
+        * the assignment and let it be taken by the cpu which runs
+        * the tick timer next, which might be this cpu as well. If we
+        * don't drop this here the jiffies might be stale and
+        * do_timer() never invoked. Keep track of the fact that it
+        * was the one which had the do_timer() duty last. If this cpu
+        * is the one which had the do_timer() duty last, we limit the
+        * sleep time to the timekeeping max_deferement value.
+        * Otherwise we can sleep as long as we want.
+        */
+       delta = timekeeping_max_deferment();
+       if (cpu == tick_do_timer_cpu) {
+               tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+               ts->do_timer_last = 1;
+       } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
+               delta = KTIME_MAX;
+               ts->do_timer_last = 0;
+       } else if (!ts->do_timer_last) {
+               delta = KTIME_MAX;
+       }
 
 #ifdef CONFIG_NO_HZ_FULL
-               if (!ts->inidle) {
-                       time_delta = min(time_delta,
-                                        scheduler_tick_max_deferment());
-               }
+       /* Limit the tick delta to the maximum scheduler deferment */
+       if (!ts->inidle)
+               delta = min(delta, scheduler_tick_max_deferment());
 #endif
 
-               /*
-                * calculate the expiry time for the next timer wheel
-                * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
-                * that there is no timer pending or at least extremely
-                * far into the future (12 days for HZ=1000). In this
-                * case we set the expiry to the end of time.
-                */
-               if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
-                       /*
-                        * Calculate the time delta for the next timer event.
-                        * If the time delta exceeds the maximum time delta
-                        * permitted by the current clocksource then adjust
-                        * the time delta accordingly to ensure the
-                        * clocksource does not wrap.
-                        */
-                       time_delta = min_t(u64, time_delta,
-                                          tick_period.tv64 * delta_jiffies);
-               }
-
-               if (time_delta < KTIME_MAX)
-                       expires = ktime_add_ns(last_update, time_delta);
-               else
-                       expires.tv64 = KTIME_MAX;
-
-               /* Skip reprogram of event if its not changed */
-               if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
-                       goto out;
+       /* Calculate the next expiry time */
+       if (delta < (KTIME_MAX - basemono))
+               expires = basemono + delta;
+       else
+               expires = KTIME_MAX;
 
-               ret = expires;
+       expires = min_t(u64, expires, next_tick);
+       tick.tv64 = expires;
 
-               /*
-                * nohz_stop_sched_tick can be called several times before
-                * the nohz_restart_sched_tick is called. This happens when
-                * interrupts arrive which do not cause a reschedule. In the
-                * first call we save the current tick time, so we can restart
-                * the scheduler tick in nohz_restart_sched_tick.
-                */
-               if (!ts->tick_stopped) {
-                       nohz_balance_enter_idle(cpu);
-                       calc_load_enter_idle();
+       /* Skip reprogram of event if its not changed */
+       if (ts->tick_stopped && (expires == dev->next_event.tv64))
+               goto out;
 
-                       ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
-                       ts->tick_stopped = 1;
-                       trace_tick_stop(1, " ");
-               }
+       /*
+        * nohz_stop_sched_tick can be called several times before
+        * the nohz_restart_sched_tick is called. This happens when
+        * interrupts arrive which do not cause a reschedule. In the
+        * first call we save the current tick time, so we can restart
+        * the scheduler tick in nohz_restart_sched_tick.
+        */
+       if (!ts->tick_stopped) {
+               nohz_balance_enter_idle(cpu);
+               calc_load_enter_idle();
 
-               /*
-                * If the expiration time == KTIME_MAX, then
-                * in this case we simply stop the tick timer.
-                */
-                if (unlikely(expires.tv64 == KTIME_MAX)) {
-                       if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
-                               hrtimer_cancel(&ts->sched_timer);
-                       goto out;
-               }
+               ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
+               ts->tick_stopped = 1;
+               trace_tick_stop(1, " ");
+       }
 
-               if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
-                       hrtimer_start(&ts->sched_timer, expires,
-                                     HRTIMER_MODE_ABS_PINNED);
-                       /* Check, if the timer was already in the past */
-                       if (hrtimer_active(&ts->sched_timer))
-                               goto out;
-               } else if (!tick_program_event(expires, 0))
-                               goto out;
-               /*
-                * We are past the event already. So we crossed a
-                * jiffie boundary. Update jiffies and raise the
-                * softirq.
-                */
-               tick_do_update_jiffies64(ktime_get());
+       /*
+        * If the expiration time == KTIME_MAX, then we simply stop
+        * the tick timer.
+        */
+       if (unlikely(expires == KTIME_MAX)) {
+               if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
+                       hrtimer_cancel(&ts->sched_timer);
+               goto out;
        }
-       raise_softirq_irqoff(TIMER_SOFTIRQ);
+
+       if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
+               hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
+       else
+               tick_program_event(tick, 1);
 out:
-       ts->next_jiffies = next_jiffies;
-       ts->last_jiffies = last_jiffies;
+       /* Update the estimated sleep length */
        ts->sleep_length = ktime_sub(dev->next_event, now);
-
-       return ret;
+       return tick;
 }
 
 static void tick_nohz_full_stop_tick(struct tick_sched *ts)
@@ -876,32 +864,6 @@ ktime_t tick_nohz_get_sleep_length(void)
        return ts->sleep_length;
 }
 
-static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
-{
-       hrtimer_cancel(&ts->sched_timer);
-       hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
-
-       while (1) {
-               /* Forward the time to expire in the future */
-               hrtimer_forward(&ts->sched_timer, now, tick_period);
-
-               if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
-                       hrtimer_start_expires(&ts->sched_timer,
-                                             HRTIMER_MODE_ABS_PINNED);
-                       /* Check, if the timer was already in the past */
-                       if (hrtimer_active(&ts->sched_timer))
-                               break;
-               } else {
-                       if (!tick_program_event(
-                               hrtimer_get_expires(&ts->sched_timer), 0))
-                               break;
-               }
-               /* Reread time and update jiffies */
-               now = ktime_get();
-               tick_do_update_jiffies64(now);
-       }
-}
-
 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
 {
        /* Update jiffies first */
@@ -972,12 +934,6 @@ void tick_nohz_idle_exit(void)
        local_irq_enable();
 }
 
-static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
-{
-       hrtimer_forward(&ts->sched_timer, now, tick_period);
-       return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
-}
-
 /*
  * The nohz low res interrupt handler
  */
@@ -996,10 +952,8 @@ static void tick_nohz_handler(struct clock_event_device *dev)
        if (unlikely(ts->tick_stopped))
                return;
 
-       while (tick_nohz_reprogram(ts, now)) {
-               now = ktime_get();
-               tick_do_update_jiffies64(now);
-       }
+       hrtimer_forward(&ts->sched_timer, now, tick_period);
+       tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
 }
 
 /**
@@ -1013,11 +967,9 @@ static void tick_nohz_switch_to_nohz(void)
        if (!tick_nohz_enabled)
                return;
 
-       local_irq_disable();
-       if (tick_switch_to_oneshot(tick_nohz_handler)) {
-               local_irq_enable();
+       if (tick_switch_to_oneshot(tick_nohz_handler))
                return;
-       }
+
        tick_nohz_active = 1;
        ts->nohz_mode = NOHZ_MODE_LOWRES;
 
@@ -1029,13 +981,9 @@ static void tick_nohz_switch_to_nohz(void)
        /* Get the next period */
        next = tick_init_jiffy_update();
 
-       for (;;) {
-               hrtimer_set_expires(&ts->sched_timer, next);
-               if (!tick_program_event(next, 0))
-                       break;
-               next = ktime_add(next, tick_period);
-       }
-       local_irq_enable();
+       hrtimer_forward_now(&ts->sched_timer, tick_period);
+       hrtimer_set_expires(&ts->sched_timer, next);
+       tick_program_event(next, 1);
 }
 
 /*
@@ -1167,15 +1115,8 @@ void tick_setup_sched_timer(void)
                hrtimer_add_expires_ns(&ts->sched_timer, offset);
        }
 
-       for (;;) {
-               hrtimer_forward(&ts->sched_timer, now, tick_period);
-               hrtimer_start_expires(&ts->sched_timer,
-                                     HRTIMER_MODE_ABS_PINNED);
-               /* Check, if the timer was already in the past */
-               if (hrtimer_active(&ts->sched_timer))
-                       break;
-               now = ktime_get();
-       }
+       hrtimer_forward(&ts->sched_timer, now, tick_period);
+       hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
 
 #ifdef CONFIG_NO_HZ_COMMON
        if (tick_nohz_enabled) {
@@ -1227,7 +1168,7 @@ void tick_oneshot_notify(void)
  * Called cyclic from the hrtimer softirq (driven by the timer
  * softirq) allow_nohz signals, that we can switch into low-res nohz
  * mode, because high resolution timers are disabled (either compile
- * or runtime).
+ * or runtime). Called with interrupts disabled.
  */
 int tick_check_oneshot_change(int allow_nohz)
 {
index 28b5da3e1a176e62c081e965fbfc32090d3f1e74..42fdf4958bccd1c5d1593f4ebe578ea99477ddbf 100644 (file)
@@ -57,7 +57,7 @@ struct tick_sched {
        ktime_t                         iowait_sleeptime;
        ktime_t                         sleep_length;
        unsigned long                   last_jiffies;
-       unsigned long                   next_jiffies;
+       u64                             next_timer;
        ktime_t                         idle_expires;
        int                             do_timer_last;
 };
index 2c85b7724af4b0081a112e1b12cbcce4ef831117..c42c2c3214fe7270168ba394d239d8a84aa985ed 100644 (file)
@@ -41,7 +41,7 @@
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 
-#include "timeconst.h"
+#include <generated/timeconst.h>
 #include "timekeeping.h"
 
 /*
@@ -483,9 +483,11 @@ struct timespec64 ns_to_timespec64(const s64 nsec)
 }
 EXPORT_SYMBOL(ns_to_timespec64);
 #endif
-/*
- * When we convert to jiffies then we interpret incoming values
- * the following way:
+/**
+ * msecs_to_jiffies: - convert milliseconds to jiffies
+ * @m: time in milliseconds
+ *
+ * conversion is done as follows:
  *
  * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
  *
@@ -493,51 +495,28 @@ EXPORT_SYMBOL(ns_to_timespec64);
  *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
  *
  * - all other values are converted to jiffies by either multiplying
- *   the input value by a factor or dividing it with a factor
- *
- * We must also be careful about 32-bit overflows.
+ *   the input value by a factor or dividing it with a factor and
+ *   handling any 32-bit overflows.
+ *   for the details see __msecs_to_jiffies()
+ *
+ * msecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __msecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the _msecs_to_jiffies helpers are the HZ dependent conversion
+ * routines found in include/linux/jiffies.h
  */
-unsigned long msecs_to_jiffies(const unsigned int m)
+unsigned long __msecs_to_jiffies(const unsigned int m)
 {
        /*
         * Negative value, means infinite timeout:
         */
        if ((int)m < 0)
                return MAX_JIFFY_OFFSET;
-
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
-       /*
-        * HZ is equal to or smaller than 1000, and 1000 is a nice
-        * round multiple of HZ, divide with the factor between them,
-        * but round upwards:
-        */
-       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
-       /*
-        * HZ is larger than 1000, and HZ is a nice round multiple of
-        * 1000 - simply multiply with the factor between them.
-        *
-        * But first make sure the multiplication result cannot
-        * overflow:
-        */
-       if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
-               return MAX_JIFFY_OFFSET;
-
-       return m * (HZ / MSEC_PER_SEC);
-#else
-       /*
-        * Generic case - multiply, round and divide. But first
-        * check that if we are doing a net multiplication, that
-        * we wouldn't overflow:
-        */
-       if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
-               return MAX_JIFFY_OFFSET;
-
-       return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
-               >> MSEC_TO_HZ_SHR32;
-#endif
+       return _msecs_to_jiffies(m);
 }
-EXPORT_SYMBOL(msecs_to_jiffies);
+EXPORT_SYMBOL(__msecs_to_jiffies);
 
 unsigned long usecs_to_jiffies(const unsigned int u)
 {
index 511bdf2cafdaa2794834ec1695ace5a587cf2022..c7388dee86358ae46967a20f39ffab6f44080d16 100644 (file)
@@ -50,7 +50,7 @@ define timeconst(hz) {
        print "#include <linux/types.h>\n\n"
 
        print "#if HZ != ", hz, "\n"
-       print "#error \qkernel/timeconst.h has the wrong HZ value!\q\n"
+       print "#error \qinclude/generated/timeconst.h has the wrong HZ value!\q\n"
        print "#endif\n\n"
 
        if (hz < 2) {
@@ -105,4 +105,5 @@ define timeconst(hz) {
        halt
 }
 
+hz = read();
 timeconst(hz)
index 946acb72179facb1c173e54592b3c1c3637f8abd..3365e32dc2086f4afc82d3147be7826e634f0bf8 100644 (file)
@@ -602,6 +602,9 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
 
        update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
        update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
+
+       if (action & TK_CLOCK_WAS_SET)
+               tk->clock_was_set_seq++;
 }
 
 /**
@@ -1925,48 +1928,21 @@ void do_timer(unsigned long ticks)
        calc_global_load(ticks);
 }
 
-/**
- * ktime_get_update_offsets_tick - hrtimer helper
- * @offs_real: pointer to storage for monotonic -> realtime offset
- * @offs_boot: pointer to storage for monotonic -> boottime offset
- * @offs_tai:  pointer to storage for monotonic -> clock tai offset
- *
- * Returns monotonic time at last tick and various offsets
- */
-ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
-                                                       ktime_t *offs_tai)
-{
-       struct timekeeper *tk = &tk_core.timekeeper;
-       unsigned int seq;
-       ktime_t base;
-       u64 nsecs;
-
-       do {
-               seq = read_seqcount_begin(&tk_core.seq);
-
-               base = tk->tkr_mono.base;
-               nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
-
-               *offs_real = tk->offs_real;
-               *offs_boot = tk->offs_boot;
-               *offs_tai = tk->offs_tai;
-       } while (read_seqcount_retry(&tk_core.seq, seq));
-
-       return ktime_add_ns(base, nsecs);
-}
-
-#ifdef CONFIG_HIGH_RES_TIMERS
 /**
  * ktime_get_update_offsets_now - hrtimer helper
+ * @cwsseq:    pointer to check and store the clock was set sequence number
  * @offs_real: pointer to storage for monotonic -> realtime offset
  * @offs_boot: pointer to storage for monotonic -> boottime offset
  * @offs_tai:  pointer to storage for monotonic -> clock tai offset
  *
- * Returns current monotonic time and updates the offsets
+ * Returns current monotonic time and updates the offsets if the
+ * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
+ * different.
+ *
  * Called from hrtimer_interrupt() or retrigger_next_event()
  */
-ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
-                                                       ktime_t *offs_tai)
+ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
+                                    ktime_t *offs_boot, ktime_t *offs_tai)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        unsigned int seq;
@@ -1978,15 +1954,16 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
 
                base = tk->tkr_mono.base;
                nsecs = timekeeping_get_ns(&tk->tkr_mono);
-
-               *offs_real = tk->offs_real;
-               *offs_boot = tk->offs_boot;
-               *offs_tai = tk->offs_tai;
+               if (*cwsseq != tk->clock_was_set_seq) {
+                       *cwsseq = tk->clock_was_set_seq;
+                       *offs_real = tk->offs_real;
+                       *offs_boot = tk->offs_boot;
+                       *offs_tai = tk->offs_tai;
+               }
        } while (read_seqcount_retry(&tk_core.seq, seq));
 
        return ktime_add_ns(base, nsecs);
 }
-#endif
 
 /**
  * do_adjtimex() - Accessor function to NTP __do_adjtimex function
index ead8794b9a4e470242d37684fd04079ffbd70dec..704f595ce83f03090f3f6d63a4d792703b4084a4 100644 (file)
@@ -3,19 +3,16 @@
 /*
  * Internal interfaces for kernel/time/
  */
-extern ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real,
-                                               ktime_t *offs_boot,
-                                               ktime_t *offs_tai);
-extern ktime_t ktime_get_update_offsets_now(ktime_t *offs_real,
-                                               ktime_t *offs_boot,
-                                               ktime_t *offs_tai);
+extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
+                                           ktime_t *offs_real,
+                                           ktime_t *offs_boot,
+                                           ktime_t *offs_tai);
 
 extern int timekeeping_valid_for_hres(void);
 extern u64 timekeeping_max_deferment(void);
 extern int timekeeping_inject_offset(struct timespec *ts);
 extern s32 timekeeping_get_tai_offset(void);
 extern void timekeeping_set_tai_offset(s32 tai_offset);
-extern void timekeeping_clocktai(struct timespec *ts);
 extern int timekeeping_suspend(void);
 extern void timekeeping_resume(void);
 
index 2ece3aa5069cade64b8c4982e920a45bea5ba232..d4af7c56c95dbf093c4f24286dcc1f889e444d2c 100644 (file)
@@ -49,6 +49,8 @@
 #include <asm/timex.h>
 #include <asm/io.h>
 
+#include "tick-internal.h"
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/timer.h>
 
@@ -434,7 +436,7 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
         * require special care against races with idle_cpu(), lets deal
         * with that later.
         */
-       if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu))
+       if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(base->cpu))
                wake_up_nohz_cpu(base->cpu);
 }
 
@@ -1311,54 +1313,48 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
  * Check, if the next hrtimer event is before the next timer wheel
  * event:
  */
-static unsigned long cmp_next_hrtimer_event(unsigned long now,
-                                           unsigned long expires)
+static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
 {
-       ktime_t hr_delta = hrtimer_get_next_event();
-       struct timespec tsdelta;
-       unsigned long delta;
-
-       if (hr_delta.tv64 == KTIME_MAX)
-               return expires;
+       u64 nextevt = hrtimer_get_next_event();
 
        /*
-        * Expired timer available, let it expire in the next tick
+        * If high resolution timers are enabled
+        * hrtimer_get_next_event() returns KTIME_MAX.
         */
-       if (hr_delta.tv64 <= 0)
-               return now + 1;
-
-       tsdelta = ktime_to_timespec(hr_delta);
-       delta = timespec_to_jiffies(&tsdelta);
+       if (expires <= nextevt)
+               return expires;
 
        /*
-        * Limit the delta to the max value, which is checked in
-        * tick_nohz_stop_sched_tick():
+        * If the next timer is already expired, return the tick base
+        * time so the tick is fired immediately.
         */
-       if (delta > NEXT_TIMER_MAX_DELTA)
-               delta = NEXT_TIMER_MAX_DELTA;
+       if (nextevt <= basem)
+               return basem;
 
        /*
-        * Take rounding errors in to account and make sure, that it
-        * expires in the next tick. Otherwise we go into an endless
-        * ping pong due to tick_nohz_stop_sched_tick() retriggering
-        * the timer softirq
+        * Round up to the next jiffie. High resolution timers are
+        * off, so the hrtimers are expired in the tick and we need to
+        * make sure that this tick really expires the timer to avoid
+        * a ping pong of the nohz stop code.
+        *
+        * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
         */
-       if (delta < 1)
-               delta = 1;
-       now += delta;
-       if (time_before(now, expires))
-               return now;
-       return expires;
+       return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
 }
 
 /**
- * get_next_timer_interrupt - return the jiffy of the next pending timer
- * @now: current time (in jiffies)
+ * get_next_timer_interrupt - return the time (clock mono) of the next timer
+ * @basej:     base time jiffies
+ * @basem:     base time clock monotonic
+ *
+ * Returns the tick aligned clock monotonic time of the next pending
+ * timer or KTIME_MAX if no timer is pending.
  */
-unsigned long get_next_timer_interrupt(unsigned long now)
+u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 {
        struct tvec_base *base = __this_cpu_read(tvec_bases);
-       unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
+       u64 expires = KTIME_MAX;
+       unsigned long nextevt;
 
        /*
         * Pretend that there is no timer pending if the cpu is offline.
@@ -1371,14 +1367,15 @@ unsigned long get_next_timer_interrupt(unsigned long now)
        if (base->active_timers) {
                if (time_before_eq(base->next_timer, base->timer_jiffies))
                        base->next_timer = __next_timer_interrupt(base);
-               expires = base->next_timer;
+               nextevt = base->next_timer;
+               if (time_before_eq(nextevt, basej))
+                       expires = basem;
+               else
+                       expires = basem + (nextevt - basej) * TICK_NSEC;
        }
        spin_unlock(&base->lock);
 
-       if (time_before_eq(expires, now))
-               return now;
-
-       return cmp_next_hrtimer_event(now, expires);
+       return cmp_next_hrtimer_event(basem, expires);
 }
 #endif
 
@@ -1409,8 +1406,6 @@ static void run_timer_softirq(struct softirq_action *h)
 {
        struct tvec_base *base = __this_cpu_read(tvec_bases);
 
-       hrtimer_run_pending();
-
        if (time_after_eq(jiffies, base->timer_jiffies))
                __run_timers(base);
 }
@@ -1697,14 +1692,14 @@ unsigned long msleep_interruptible(unsigned int msecs)
 
 EXPORT_SYMBOL(msleep_interruptible);
 
-static int __sched do_usleep_range(unsigned long min, unsigned long max)
+static void __sched do_usleep_range(unsigned long min, unsigned long max)
 {
        ktime_t kmin;
        unsigned long delta;
 
        kmin = ktime_set(0, min * NSEC_PER_USEC);
        delta = (max - min) * NSEC_PER_USEC;
-       return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
+       schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
 }
 
 /**
@@ -1712,7 +1707,7 @@ static int __sched do_usleep_range(unsigned long min, unsigned long max)
  * @min: Minimum time in usecs to sleep
  * @max: Maximum time in usecs to sleep
  */
-void usleep_range(unsigned long min, unsigned long max)
+void __sched usleep_range(unsigned long min, unsigned long max)
 {
        __set_current_state(TASK_UNINTERRUPTIBLE);
        do_usleep_range(min, max);
index e878c2e0ba45e06c4690646a8853406e11dd1a15..18b074b215b0a171b9411cd1c8154c3a47b59ce8 100644 (file)
@@ -35,13 +35,20 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
  * This allows printing both to /proc/timer_list and
  * to the console (on SysRq-Q):
  */
-#define SEQ_printf(m, x...)                    \
- do {                                          \
-       if (m)                                  \
-               seq_printf(m, x);               \
-       else                                    \
-               printk(x);                      \
- } while (0)
+__printf(2, 3)
+static void SEQ_printf(struct seq_file *m, const char *fmt, ...)
+{
+       va_list args;
+
+       va_start(args, fmt);
+
+       if (m)
+               seq_vprintf(m, fmt, args);
+       else
+               vprintk(fmt, args);
+
+       va_end(args);
+}
 
 static void print_name_offset(struct seq_file *m, void *sym)
 {
@@ -120,10 +127,10 @@ static void
 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
 {
        SEQ_printf(m, "  .base:       %pK\n", base);
-       SEQ_printf(m, "  .index:      %d\n",
-                       base->index);
-       SEQ_printf(m, "  .resolution: %Lu nsecs\n",
-                       (unsigned long long)ktime_to_ns(base->resolution));
+       SEQ_printf(m, "  .index:      %d\n", base->index);
+
+       SEQ_printf(m, "  .resolution: %u nsecs\n", (unsigned) hrtimer_resolution);
+
        SEQ_printf(m,   "  .get_time:   ");
        print_name_offset(m, base->get_time);
        SEQ_printf(m,   "\n");
@@ -158,7 +165,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
        P(nr_events);
        P(nr_retries);
        P(nr_hangs);
-       P_ns(max_hang_time);
+       P(max_hang_time);
 #endif
 #undef P
 #undef P_ns
@@ -184,7 +191,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
                P_ns(idle_sleeptime);
                P_ns(iowait_sleeptime);
                P(last_jiffies);
-               P(next_jiffies);
+               P(next_timer);
                P_ns(idle_expires);
                SEQ_printf(m, "jiffies: %Lu\n",
                           (unsigned long long)jiffies);
@@ -269,11 +276,11 @@ static void timer_list_show_tickdevices_header(struct seq_file *m)
 {
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
        print_tickdevice(m, tick_get_broadcast_device(), -1);
-       SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
-                  cpumask_bits(tick_get_broadcast_mask())[0]);
+       SEQ_printf(m, "tick_broadcast_mask: %*pb\n",
+                  cpumask_pr_args(tick_get_broadcast_mask()));
 #ifdef CONFIG_TICK_ONESHOT
-       SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
-                  cpumask_bits(tick_get_broadcast_oneshot_mask())[0]);
+       SEQ_printf(m, "tick_broadcast_oneshot_mask: %*pb\n",
+                  cpumask_pr_args(tick_get_broadcast_oneshot_mask()));
 #endif
        SEQ_printf(m, "\n");
 #endif
@@ -282,7 +289,7 @@ static void timer_list_show_tickdevices_header(struct seq_file *m)
 
 static inline void timer_list_header(struct seq_file *m, u64 now)
 {
-       SEQ_printf(m, "Timer List Version: v0.7\n");
+       SEQ_printf(m, "Timer List Version: v0.8\n");
        SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
        SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
        SEQ_printf(m, "\n");
index a382e4a326091691a617a7c3387a09c813c50792..782ae8ca2c06f2b3439b10592cef6a43c31223cd 100644 (file)
@@ -36,7 +36,7 @@
  * Adds the timer node to the timerqueue, sorted by the
  * node's expires value.
  */
-void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
+bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
 {
        struct rb_node **p = &head->head.rb_node;
        struct rb_node *parent = NULL;
@@ -56,8 +56,11 @@ void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
        rb_link_node(&node->node, parent, p);
        rb_insert_color(&node->node, &head->head);
 
-       if (!head->next || node->expires.tv64 < head->next->expires.tv64)
+       if (!head->next || node->expires.tv64 < head->next->expires.tv64) {
                head->next = node;
+               return true;
+       }
+       return false;
 }
 EXPORT_SYMBOL_GPL(timerqueue_add);
 
@@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(timerqueue_add);
  *
  * Removes the timer node from the timerqueue.
  */
-void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
+bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
 {
        WARN_ON_ONCE(RB_EMPTY_NODE(&node->node));
 
@@ -82,6 +85,7 @@ void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
        }
        rb_erase(&node->node, &head->head);
        RB_CLEAR_NODE(&node->node);
+       return head->next != NULL;
 }
 EXPORT_SYMBOL_GPL(timerqueue_del);
 
index 508155b283ddcc73a967a2bc8068e67cb8cada7d..54817d365366f8f7b408f0b790d9c92628147026 100644 (file)
@@ -2212,8 +2212,6 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
                do {
                        set_current_state(TASK_INTERRUPTIBLE);
                        hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
-                       if (!hrtimer_active(&t.timer))
-                               t.task = NULL;
 
                        if (likely(t.task))
                                schedule();
index ad9eed70bc8f8e16c3118c6527374a952823e2c0..45bc63ae18e3ae9a5fe2a80e9de4763cac39cebd 100644 (file)
@@ -1883,13 +1883,10 @@ EXPORT_SYMBOL(tcf_destroy_chain);
 #ifdef CONFIG_PROC_FS
 static int psched_show(struct seq_file *seq, void *v)
 {
-       struct timespec ts;
-
-       hrtimer_get_res(CLOCK_MONOTONIC, &ts);
        seq_printf(seq, "%08x %08x %08x %08x\n",
                   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
                   1000000,
-                  (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
+                  (u32)NSEC_PER_SEC / hrtimer_resolution);
 
        return 0;
 }
index 886be7da989d1ab52d647105ef3648a8045facbe..f845ecf7e172935f938bc52ecfe9c95c7bac4e11 100644 (file)
@@ -121,16 +121,9 @@ static struct snd_timer *mytimer;
 static int __init snd_hrtimer_init(void)
 {
        struct snd_timer *timer;
-       struct timespec tp;
        int err;
 
-       hrtimer_get_res(CLOCK_MONOTONIC, &tp);
-       if (tp.tv_sec > 0 || !tp.tv_nsec) {
-               pr_err("snd-hrtimer: Invalid resolution %u.%09u",
-                          (unsigned)tp.tv_sec, (unsigned)tp.tv_nsec);
-               return -EINVAL;
-       }
-       resolution = tp.tv_nsec;
+       resolution = hrtimer_resolution;
 
        /* Create a new timer and set up the fields */
        err = snd_timer_global_new("hrtimer", SNDRV_TIMER_GLOBAL_HRTIMER,
index d9647bd84d0f49e9b532fa1cbeb685645dfe33b9..27e25bb78c9782a5ba9fff5933d495eb4f200878 100644 (file)
@@ -42,16 +42,13 @@ struct snd_pcsp pcsp_chip;
 static int snd_pcsp_create(struct snd_card *card)
 {
        static struct snd_device_ops ops = { };
-       struct timespec tp;
-       int err;
-       int div, min_div, order;
-
-       hrtimer_get_res(CLOCK_MONOTONIC, &tp);
+       unsigned int resolution = hrtimer_resolution;
+       int err, div, min_div, order;
 
        if (!nopcm) {
-               if (tp.tv_sec || tp.tv_nsec > PCSP_MAX_PERIOD_NS) {
+               if (resolution > PCSP_MAX_PERIOD_NS) {
                        printk(KERN_ERR "PCSP: Timer resolution is not sufficient "
-                               "(%linS)\n", tp.tv_nsec);
+                               "(%unS)\n", resolution);
                        printk(KERN_ERR "PCSP: Make sure you have HPET and ACPI "
                                "enabled.\n");
                        printk(KERN_ERR "PCSP: Turned into nopcm mode.\n");
@@ -59,13 +56,13 @@ static int snd_pcsp_create(struct snd_card *card)
                }
        }
 
-       if (loops_per_jiffy >= PCSP_MIN_LPJ && tp.tv_nsec <= PCSP_MIN_PERIOD_NS)
+       if (loops_per_jiffy >= PCSP_MIN_LPJ && resolution <= PCSP_MIN_PERIOD_NS)
                min_div = MIN_DIV;
        else
                min_div = MAX_DIV;
 #if PCSP_DEBUG
-       printk(KERN_DEBUG "PCSP: lpj=%li, min_div=%i, res=%li\n",
-              loops_per_jiffy, min_div, tp.tv_nsec);
+       printk(KERN_DEBUG "PCSP: lpj=%li, min_div=%i, res=%u\n",
+              loops_per_jiffy, min_div, resolution);
 #endif
 
        div = MAX_DIV / min_div;