]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'linus' into timers/core
authorThomas Gleixner <tglx@linutronix.de>
Tue, 19 May 2015 14:12:32 +0000 (16:12 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 19 May 2015 14:12:32 +0000 (16:12 +0200)
Make sure the upstream fixes are applied before adding further
modifications.

1  2 
arch/x86/kernel/cpu/perf_event_intel_rapl.c
include/linux/clocksource.h
kernel/events/core.c
kernel/locking/rtmutex.c
kernel/sched/core.c

index 10190c044a7e2454b932257eb1400d3da19c58a0,358c54ad20d4084db807a05ae49def561aa4dd32..5cbd4e64feb582d927c6751914757a6784d68a60
@@@ -204,8 -204,9 +204,8 @@@ again
  
  static void rapl_start_hrtimer(struct rapl_pmu *pmu)
  {
 -      __hrtimer_start_range_ns(&pmu->hrtimer,
 -                      pmu->timer_interval, 0,
 -                      HRTIMER_MODE_REL_PINNED, 0);
 +       hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
 +                   HRTIMER_MODE_REL_PINNED);
  }
  
  static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
@@@ -721,6 -722,7 +721,7 @@@ static int __init rapl_pmu_init(void
                break;
        case 60: /* Haswell */
        case 69: /* Haswell-Celeron */
+       case 61: /* Broadwell */
                rapl_cntr_mask = RAPL_IDX_HSW;
                rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
                break;
index a25fc6e873b8c40e8ce734c423eaa9e7003acf7d,d27d0152271f9e8b487a48a9f2d74f51fe9a58a5..278dd279a7a8035e8be073a9664ea88f7357984a
@@@ -181,6 -181,7 +181,6 @@@ static inline s64 clocksource_cyc2ns(cy
  
  extern int clocksource_unregister(struct clocksource*);
  extern void clocksource_touch_watchdog(void);
 -extern struct clocksource* clocksource_get_next(void);
  extern void clocksource_change_rating(struct clocksource *cs, int rating);
  extern void clocksource_suspend(void);
  extern void clocksource_resume(void);
@@@ -252,4 -253,10 +252,10 @@@ extern void clocksource_of_init(void)
  static inline void clocksource_of_init(void) {}
  #endif
  
+ #ifdef CONFIG_ACPI
+ void acpi_generic_timer_init(void);
+ #else
+ static inline void acpi_generic_timer_init(void) { }
+ #endif
  #endif /* _LINUX_CLOCKSOURCE_H */
diff --combined kernel/events/core.c
index d9c93f36e379e3facc9dccb0339bcbd510c6db51,1a3bf48743ce1c62c26077d642084cbdc8b40d6b..1c6c2826af1ee0bfa6e8d1c9ba1284fb065550c3
  
  static struct workqueue_struct *perf_wq;
  
 +typedef int (*remote_function_f)(void *);
 +
  struct remote_function_call {
        struct task_struct      *p;
 -      int                     (*func)(void *info);
 +      remote_function_f       func;
        void                    *info;
        int                     ret;
  };
@@@ -88,7 -86,7 +88,7 @@@ static void remote_function(void *data
   *        -EAGAIN - when the process moved away
   */
  static int
 -task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
 +task_function_call(struct task_struct *p, remote_function_f func, void *info)
  {
        struct remote_function_call data = {
                .p      = p,
   *
   * returns: @func return value or -ENXIO when the cpu is offline
   */
 -static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
 +static int cpu_function_call(int cpu, remote_function_f func, void *info)
  {
        struct remote_function_call data = {
                .p      = NULL,
@@@ -749,31 -747,62 +749,31 @@@ perf_cgroup_mark_enabled(struct perf_ev
  /*
   * function must be called with interrupts disbled
   */
 -static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
 +static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
  {
        struct perf_cpu_context *cpuctx;
 -      enum hrtimer_restart ret = HRTIMER_NORESTART;
        int rotations = 0;
  
        WARN_ON(!irqs_disabled());
  
        cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
 -
        rotations = perf_rotate_context(cpuctx);
  
 -      /*
 -       * arm timer if needed
 -       */
 -      if (rotations) {
 +      raw_spin_lock(&cpuctx->hrtimer_lock);
 +      if (rotations)
                hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
 -              ret = HRTIMER_RESTART;
 -      }
 -
 -      return ret;
 -}
 -
 -/* CPU is going down */
 -void perf_cpu_hrtimer_cancel(int cpu)
 -{
 -      struct perf_cpu_context *cpuctx;
 -      struct pmu *pmu;
 -      unsigned long flags;
 -
 -      if (WARN_ON(cpu != smp_processor_id()))
 -              return;
 -
 -      local_irq_save(flags);
 -
 -      rcu_read_lock();
 -
 -      list_for_each_entry_rcu(pmu, &pmus, entry) {
 -              cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 -
 -              if (pmu->task_ctx_nr == perf_sw_context)
 -                      continue;
 -
 -              hrtimer_cancel(&cpuctx->hrtimer);
 -      }
 -
 -      rcu_read_unlock();
 +      else
 +              cpuctx->hrtimer_active = 0;
 +      raw_spin_unlock(&cpuctx->hrtimer_lock);
  
 -      local_irq_restore(flags);
 +      return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
  }
  
 -static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
 +static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
  {
 -      struct hrtimer *hr = &cpuctx->hrtimer;
 +      struct hrtimer *timer = &cpuctx->hrtimer;
        struct pmu *pmu = cpuctx->ctx.pmu;
 -      int timer;
 +      u64 interval;
  
        /* no multiplexing needed for SW PMU */
        if (pmu->task_ctx_nr == perf_sw_context)
         * check default is sane, if not set then force to
         * default interval (1/tick)
         */
 -      timer = pmu->hrtimer_interval_ms;
 -      if (timer < 1)
 -              timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
 +      interval = pmu->hrtimer_interval_ms;
 +      if (interval < 1)
 +              interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
  
 -      cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
 +      cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
  
 -      hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
 -      hr->function = perf_cpu_hrtimer_handler;
 +      raw_spin_lock_init(&cpuctx->hrtimer_lock);
 +      hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
 +      timer->function = perf_mux_hrtimer_handler;
  }
  
 -static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
 +static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
  {
 -      struct hrtimer *hr = &cpuctx->hrtimer;
 +      struct hrtimer *timer = &cpuctx->hrtimer;
        struct pmu *pmu = cpuctx->ctx.pmu;
 +      unsigned long flags;
  
        /* not for SW PMU */
        if (pmu->task_ctx_nr == perf_sw_context)
 -              return;
 +              return 0;
  
 -      if (hrtimer_active(hr))
 -              return;
 +      raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
 +      if (!cpuctx->hrtimer_active) {
 +              cpuctx->hrtimer_active = 1;
 +              hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
 +              hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
 +      }
 +      raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
  
 -      if (!hrtimer_callback_running(hr))
 -              __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
 -                                       0, HRTIMER_MODE_REL_PINNED, 0);
 +      return 0;
  }
  
  void perf_pmu_disable(struct pmu *pmu)
@@@ -889,10 -913,30 +889,30 @@@ static void put_ctx(struct perf_event_c
   * Those places that change perf_event::ctx will hold both
   * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
   *
-  * Lock ordering is by mutex address. There is one other site where
-  * perf_event_context::mutex nests and that is put_event(). But remember that
-  * that is a parent<->child context relation, and migration does not affect
-  * children, therefore these two orderings should not interact.
+  * Lock ordering is by mutex address. There are two other sites where
+  * perf_event_context::mutex nests and those are:
+  *
+  *  - perf_event_exit_task_context()  [ child , 0 ]
+  *      __perf_event_exit_task()
+  *        sync_child_event()
+  *          put_event()                       [ parent, 1 ]
+  *
+  *  - perf_event_init_context()               [ parent, 0 ]
+  *      inherit_task_group()
+  *        inherit_group()
+  *          inherit_event()
+  *            perf_event_alloc()
+  *              perf_init_event()
+  *                perf_try_init_event()       [ child , 1 ]
+  *
+  * While it appears there is an obvious deadlock here -- the parent and child
+  * nesting levels are inverted between the two. This is in fact safe because
+  * life-time rules separate them. That is an exiting task cannot fork, and a
+  * spawning task cannot (yet) exit.
+  *
+  * But remember that that these are parent<->child context relations, and
+  * migration does not affect children, therefore these two orderings should not
+  * interact.
   *
   * The change in perf_event::ctx does not affect children (as claimed above)
   * because the sys_perf_event_open() case will install a new event and break
@@@ -1891,7 -1935,7 +1911,7 @@@ group_sched_in(struct perf_event *group
  
        if (event_sched_in(group_event, cpuctx, ctx)) {
                pmu->cancel_txn(pmu);
 -              perf_cpu_hrtimer_restart(cpuctx);
 +              perf_mux_hrtimer_restart(cpuctx);
                return -EAGAIN;
        }
  
@@@ -1938,7 -1982,7 +1958,7 @@@ group_error
  
        pmu->cancel_txn(pmu);
  
 -      perf_cpu_hrtimer_restart(cpuctx);
 +      perf_mux_hrtimer_restart(cpuctx);
  
        return -EAGAIN;
  }
@@@ -2211,7 -2255,7 +2231,7 @@@ static int __perf_event_enable(void *in
                 */
                if (leader != event) {
                        group_sched_out(leader, cpuctx, ctx);
 -                      perf_cpu_hrtimer_restart(cpuctx);
 +                      perf_mux_hrtimer_restart(cpuctx);
                }
                if (leader->attr.pinned) {
                        update_group_times(leader);
@@@ -3633,9 -3677,6 +3653,6 @@@ static void perf_remove_from_owner(stru
        }
  }
  
- /*
-  * Called when the last reference to the file is gone.
-  */
  static void put_event(struct perf_event *event)
  {
        struct perf_event_context *ctx;
@@@ -3673,6 -3714,9 +3690,9 @@@ int perf_event_release_kernel(struct pe
  }
  EXPORT_SYMBOL_GPL(perf_event_release_kernel);
  
+ /*
+  * Called when the last reference to the file is gone.
+  */
  static int perf_release(struct inode *inode, struct file *file)
  {
        put_event(file->private_data);
@@@ -6819,8 -6863,9 +6839,8 @@@ static void perf_swevent_start_hrtimer(
        } else {
                period = max_t(u64, 10000, hwc->sample_period);
        }
 -      __hrtimer_start_range_ns(&hwc->hrtimer,
 -                              ns_to_ktime(period), 0,
 -                              HRTIMER_MODE_REL_PINNED, 0);
 +      hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
 +                    HRTIMER_MODE_REL_PINNED);
  }
  
  static void perf_swevent_cancel_hrtimer(struct perf_event *event)
@@@ -7121,8 -7166,6 +7141,8 @@@ perf_event_mux_interval_ms_show(struct 
        return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
  }
  
 +static DEFINE_MUTEX(mux_interval_mutex);
 +
  static ssize_t
  perf_event_mux_interval_ms_store(struct device *dev,
                                 struct device_attribute *attr,
        if (timer == pmu->hrtimer_interval_ms)
                return count;
  
 +      mutex_lock(&mux_interval_mutex);
        pmu->hrtimer_interval_ms = timer;
  
        /* update all cpuctx for this PMU */
 -      for_each_possible_cpu(cpu) {
 +      get_online_cpus();
 +      for_each_online_cpu(cpu) {
                struct perf_cpu_context *cpuctx;
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
                cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
  
 -              if (hrtimer_active(&cpuctx->hrtimer))
 -                      hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
 +              cpu_function_call(cpu,
 +                      (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
        }
 +      put_online_cpus();
 +      mutex_unlock(&mux_interval_mutex);
  
        return count;
  }
@@@ -7261,7 -7300,7 +7281,7 @@@ skip_type
                lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
                cpuctx->ctx.pmu = pmu;
  
 -              __perf_cpu_hrtimer_init(cpuctx, cpu);
 +              __perf_mux_hrtimer_init(cpuctx, cpu);
  
                cpuctx->unique_pmu = pmu;
        }
@@@ -7345,7 -7384,12 +7365,12 @@@ static int perf_try_init_event(struct p
                return -ENODEV;
  
        if (event->group_leader != event) {
-               ctx = perf_event_ctx_lock(event->group_leader);
+               /*
+                * This ctx->mutex can nest when we're called through
+                * inheritance. See the perf_event_ctx_lock_nested() comment.
+                */
+               ctx = perf_event_ctx_lock_nested(event->group_leader,
+                                                SINGLE_DEPTH_NESTING);
                BUG_ON(!ctx);
        }
  
diff --combined kernel/locking/rtmutex.c
index 8626437acf0cd638e297470baf04bc2faf8bfc8f,b025295f49662469d1f3b4257f3835d2f40f01e1..8b678cac7fbe389553272a417a3d82c2ddb39406
@@@ -265,15 -265,17 +265,17 @@@ struct task_struct *rt_mutex_get_top_ta
  }
  
  /*
-  * Called by sched_setscheduler() to check whether the priority change
-  * is overruled by a possible priority boosting.
+  * Called by sched_setscheduler() to get the priority which will be
+  * effective after the change.
   */
- int rt_mutex_check_prio(struct task_struct *task, int newprio)
+ int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
  {
        if (!task_has_pi_waiters(task))
-               return 0;
+               return newprio;
  
-       return task_top_pi_waiter(task)->task->prio <= newprio;
+       if (task_top_pi_waiter(task)->task->prio <= newprio)
+               return task_top_pi_waiter(task)->task->prio;
+       return newprio;
  }
  
  /*
@@@ -1180,8 -1182,11 +1182,8 @@@ rt_mutex_slowlock(struct rt_mutex *lock
        set_current_state(state);
  
        /* Setup the timer, when timeout != NULL */
 -      if (unlikely(timeout)) {
 +      if (unlikely(timeout))
                hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
 -              if (!hrtimer_active(&timeout->timer))
 -                      timeout->task = NULL;
 -      }
  
        ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
  
diff --combined kernel/sched/core.c
index e84aeb280777e422c3db39af4cc8b0ae28f4639a,57bd333bc4ab3e070356e7a3b9b9b2a5e742c91f..ecb7c4216350cf00f3be979246fc44b8a14a0c23
  #define CREATE_TRACE_POINTS
  #include <trace/events/sched.h>
  
 -void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
 -{
 -      unsigned long delta;
 -      ktime_t soft, hard, now;
 -
 -      for (;;) {
 -              if (hrtimer_active(period_timer))
 -                      break;
 -
 -              now = hrtimer_cb_get_time(period_timer);
 -              hrtimer_forward(period_timer, now, period);
 -
 -              soft = hrtimer_get_softexpires(period_timer);
 -              hard = hrtimer_get_expires(period_timer);
 -              delta = ktime_to_ns(ktime_sub(hard, soft));
 -              __hrtimer_start_range_ns(period_timer, soft, delta,
 -                                       HRTIMER_MODE_ABS_PINNED, 0);
 -      }
 -}
 -
  DEFINE_MUTEX(sched_domains_mutex);
  DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
  
@@@ -335,11 -355,12 +335,11 @@@ static enum hrtimer_restart hrtick(stru
  
  #ifdef CONFIG_SMP
  
 -static int __hrtick_restart(struct rq *rq)
 +static void __hrtick_restart(struct rq *rq)
  {
        struct hrtimer *timer = &rq->hrtick_timer;
 -      ktime_t time = hrtimer_get_softexpires(timer);
  
 -      return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
 +      hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
  }
  
  /*
@@@ -419,8 -440,8 +419,8 @@@ void hrtick_start(struct rq *rq, u64 de
         * doesn't make sense. Rely on vruntime for fairness.
         */
        delay = max_t(u64, delay, 10000LL);
 -      __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
 -                      HRTIMER_MODE_REL_PINNED, 0);
 +      hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
 +                    HRTIMER_MODE_REL_PINNED);
  }
  
  static inline void init_hrtick(void)
@@@ -995,13 -1016,6 +995,6 @@@ void check_preempt_curr(struct rq *rq, 
                rq_clock_skip_update(rq, true);
  }
  
- static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
- void register_task_migration_notifier(struct notifier_block *n)
- {
-       atomic_notifier_chain_register(&task_migration_notifier, n);
- }
  #ifdef CONFIG_SMP
  void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
  {
        trace_sched_migrate_task(p, new_cpu);
  
        if (task_cpu(p) != new_cpu) {
-               struct task_migration_notifier tmn;
                if (p->sched_class->migrate_task_rq)
                        p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
                perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
-               tmn.task = p;
-               tmn.from_cpu = task_cpu(p);
-               tmn.to_cpu = new_cpu;
-               atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
        }
  
        __set_task_cpu(p, new_cpu);
@@@ -3294,15 -3300,18 +3279,18 @@@ static void __setscheduler_params(struc
  
  /* Actually do priority change: must hold pi & rq lock. */
  static void __setscheduler(struct rq *rq, struct task_struct *p,
-                          const struct sched_attr *attr)
+                          const struct sched_attr *attr, bool keep_boost)
  {
        __setscheduler_params(p, attr);
  
        /*
-        * If we get here, there was no pi waiters boosting the
-        * task. It is safe to use the normal prio.
+        * Keep a potential priority boosting if called from
+        * sched_setscheduler().
         */
-       p->prio = normal_prio(p);
+       if (keep_boost)
+               p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+       else
+               p->prio = normal_prio(p);
  
        if (dl_prio(p->prio))
                p->sched_class = &dl_sched_class;
@@@ -3402,7 -3411,7 +3390,7 @@@ static int __sched_setscheduler(struct 
        int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
                      MAX_RT_PRIO - 1 - attr->sched_priority;
        int retval, oldprio, oldpolicy = -1, queued, running;
-       int policy = attr->sched_policy;
+       int new_effective_prio, policy = attr->sched_policy;
        unsigned long flags;
        const struct sched_class *prev_class;
        struct rq *rq;
@@@ -3584,15 -3593,14 +3572,14 @@@ change
        oldprio = p->prio;
  
        /*
-        * Special case for priority boosted tasks.
-        *
-        * If the new priority is lower or equal (user space view)
-        * than the current (boosted) priority, we just store the new
+        * Take priority boosted tasks into account. If the new
+        * effective priority is unchanged, we just store the new
         * normal parameters and do not touch the scheduler class and
         * the runqueue. This will be done when the task deboost
         * itself.
         */
-       if (rt_mutex_check_prio(p, newprio)) {
+       new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
+       if (new_effective_prio == oldprio) {
                __setscheduler_params(p, attr);
                task_rq_unlock(rq, p, &flags);
                return 0;
                put_prev_task(rq, p);
  
        prev_class = p->sched_class;
-       __setscheduler(rq, p, attr);
+       __setscheduler(rq, p, attr, true);
  
        if (running)
                p->sched_class->set_curr_task(rq);
@@@ -6991,27 -6999,23 +6978,23 @@@ static int cpuset_cpu_inactive(struct n
        unsigned long flags;
        long cpu = (long)hcpu;
        struct dl_bw *dl_b;
+       bool overflow;
+       int cpus;
  
-       switch (action & ~CPU_TASKS_FROZEN) {
+       switch (action) {
        case CPU_DOWN_PREPARE:
-               /* explicitly allow suspend */
-               if (!(action & CPU_TASKS_FROZEN)) {
-                       bool overflow;
-                       int cpus;
-                       rcu_read_lock_sched();
-                       dl_b = dl_bw_of(cpu);
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
  
-                       raw_spin_lock_irqsave(&dl_b->lock, flags);
-                       cpus = dl_bw_cpus(cpu);
-                       overflow = __dl_overflow(dl_b, cpus, 0, 0);
-                       raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               cpus = dl_bw_cpus(cpu);
+               overflow = __dl_overflow(dl_b, cpus, 0, 0);
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
  
-                       rcu_read_unlock_sched();
+               rcu_read_unlock_sched();
  
-                       if (overflow)
-                               return notifier_from_errno(-EBUSY);
-               }
+               if (overflow)
+                       return notifier_from_errno(-EBUSY);
                cpuset_update_active_cpus(false);
                break;
        case CPU_DOWN_PREPARE_FROZEN:
@@@ -7340,7 -7344,7 +7323,7 @@@ static void normalize_task(struct rq *r
        queued = task_on_rq_queued(p);
        if (queued)
                dequeue_task(rq, p, 0);
-       __setscheduler(rq, p, &attr);
+       __setscheduler(rq, p, &attr, false);
        if (queued) {
                enqueue_task(rq, p, 0);
                resched_curr(rq);
@@@ -8104,8 -8108,10 +8087,8 @@@ static int tg_set_cfs_bandwidth(struct 
  
        __refill_cfs_bandwidth_runtime(cfs_b);
        /* restart the period timer (if active) to handle new period expiry */
 -      if (runtime_enabled && cfs_b->timer_active) {
 -              /* force a reprogram */
 -              __start_cfs_bandwidth(cfs_b, true);
 -      }
 +      if (runtime_enabled)
 +              start_cfs_bandwidth(cfs_b);
        raw_spin_unlock_irq(&cfs_b->lock);
  
        for_each_online_cpu(i) {