]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branches 'pm-cpufreq', 'pm-pci' and 'pm-sleep'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 21 Aug 2018 20:39:24 +0000 (22:39 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 21 Aug 2018 20:39:24 +0000 (22:39 +0200)
Merge fixes for the ondemand and conservative cpufreq governors,
PCI power management and system wakeup framework.

* pm-cpufreq:
  cpufreq: governor: Avoid accessing invalid governor_data

* pm-pci:
  PCI / ACPI / PM: Resume all bridges on suspend-to-RAM

* pm-sleep:
  PM / sleep: wakeup: Fix build error caused by missing SRCU support

arch/x86/kernel/acpi/cstate.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpuidle/governors/menu.c
drivers/pci/pci-acpi.c
kernel/sched/idle.c

index dde437f5d14ff828dccff19275033ff2330acc60..158ad1483c4352b2f93c7cbb931dfdb8dd40c3bf 100644 (file)
@@ -108,7 +108,7 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
                        cx->type);
        }
        snprintf(cx->desc,
-                       ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
+                       ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
                        cx->address);
 out:
        return retval;
index 1d50e97d49f192cd8bf1c5eb0ce569e9641cf43e..6d53f7d9fc7a92d415b507f9b8facdb3f4143b17 100644 (file)
@@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
 
 void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
 {
-       struct policy_dbs_info *policy_dbs = policy->governor_data;
+       struct policy_dbs_info *policy_dbs;
+
+       /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
+       mutex_lock(&gov_dbs_data_mutex);
+       policy_dbs = policy->governor_data;
+       if (!policy_dbs)
+               goto out;
 
        mutex_lock(&policy_dbs->update_mutex);
        cpufreq_policy_apply_limits(policy);
        gov_update_sample_delay(policy_dbs, 0);
-
        mutex_unlock(&policy_dbs->update_mutex);
+
+out:
+       mutex_unlock(&gov_dbs_data_mutex);
 }
 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
index 1aef60d160eb0c6236bee855a9a2cdd244679418..110483f0e3fbad97c1304b633b5d55c940e1fe70 100644 (file)
@@ -328,9 +328,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                unsigned int polling_threshold;
 
                /*
-                * We want to default to C1 (hlt), not to busy polling
-                * unless the timer is happening really really soon, or
-                * C1's exit latency exceeds the user configured limit.
+                * Default to a physical idle state, not to busy polling, unless
+                * a timer is going to trigger really really soon.
                 */
                polling_threshold = max_t(unsigned int, 20, s->target_residency);
                if (data->next_timer_us > polling_threshold &&
@@ -349,14 +348,12 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                 * If the tick is already stopped, the cost of possible short
                 * idle duration misprediction is much higher, because the CPU
                 * may be stuck in a shallow idle state for a long time as a
-                * result of it.  In that case say we might mispredict and try
-                * to force the CPU into a state for which we would have stopped
-                * the tick, unless a timer is going to expire really soon
-                * anyway.
+                * result of it.  In that case say we might mispredict and use
+                * the known time till the closest timer event for the idle
+                * state selection.
                 */
                if (data->predicted_us < TICK_USEC)
-                       data->predicted_us = min_t(unsigned int, TICK_USEC,
-                                                  ktime_to_us(delta_next));
+                       data->predicted_us = ktime_to_us(delta_next);
        } else {
                /*
                 * Use the performance multiplier and the user-configurable
@@ -381,8 +378,22 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                        continue;
                if (idx == -1)
                        idx = i; /* first enabled state */
-               if (s->target_residency > data->predicted_us)
-                       break;
+               if (s->target_residency > data->predicted_us) {
+                       if (!tick_nohz_tick_stopped())
+                               break;
+
+                       /*
+                        * If the state selected so far is shallow and this
+                        * state's target residency matches the time till the
+                        * closest timer event, select this one to avoid getting
+                        * stuck in the shallow one for too long.
+                        */
+                       if (drv->states[idx].target_residency < TICK_USEC &&
+                           s->target_residency <= ktime_to_us(delta_next))
+                               idx = i;
+
+                       goto out;
+               }
                if (s->exit_latency > latency_req) {
                        /*
                         * If we break out of the loop for latency reasons, use
@@ -403,14 +414,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
         * Don't stop the tick if the selected state is a polling one or if the
         * expected idle duration is shorter than the tick period length.
         */
-       if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
-           expected_interval < TICK_USEC) {
+       if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
+            expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) {
                unsigned int delta_next_us = ktime_to_us(delta_next);
 
                *stop_tick = false;
 
-               if (!tick_nohz_tick_stopped() && idx > 0 &&
-                   drv->states[idx].target_residency > delta_next_us) {
+               if (idx > 0 && drv->states[idx].target_residency > delta_next_us) {
                        /*
                         * The tick is not going to be stopped and the target
                         * residency of the state to be returned is not within
@@ -418,8 +428,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                         * tick, so try to correct that.
                         */
                        for (i = idx - 1; i >= 0; i--) {
-                           if (drv->states[i].disabled ||
-                               dev->states_usage[i].disable)
+                               if (drv->states[i].disabled ||
+                                   dev->states_usage[i].disable)
                                        continue;
 
                                idx = i;
@@ -429,6 +439,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                }
        }
 
+out:
        data->last_state_idx = idx;
 
        return data->last_state_idx;
index 89ee6a2b6eb838f426d6d9d3773f70769d1a489f..5d1698265da5211e998729bb3499f9e81131dfb3 100644 (file)
@@ -632,13 +632,11 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
        /*
         * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
         * system-wide suspend/resume confuses the platform firmware, so avoid
-        * doing that, unless the bridge has a driver that should take care of
-        * the PM handling.  According to Section 16.1.6 of ACPI 6.2, endpoint
+        * doing that.  According to Section 16.1.6 of ACPI 6.2, endpoint
         * devices are expected to be in D3 before invoking the S3 entry path
         * from the firmware, so they should not be affected by this issue.
         */
-       if (pci_is_bridge(dev) && !dev->driver &&
-           acpi_target_system_state() != ACPI_STATE_S0)
+       if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
                return true;
 
        if (!adev || !acpi_device_power_manageable(adev))
index 1a3e9bddd17b67955f5f0446310a090872d7eacf..16f84142f2f492633b6068f8f7536cdae40c6e0d 100644 (file)
@@ -190,7 +190,7 @@ static void cpuidle_idle_call(void)
                 */
                next_state = cpuidle_select(drv, dev, &stop_tick);
 
-               if (stop_tick)
+               if (stop_tick || tick_nohz_tick_stopped())
                        tick_nohz_idle_stop_tick();
                else
                        tick_nohz_idle_retain_tick();