2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/cpu_cooling.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <linux/suspend.h>
31 #include <linux/syscore_ops.h>
32 #include <linux/tick.h>
33 #include <trace/events/power.h>
35 static LIST_HEAD(cpufreq_policy_list);
37 /* Macros to iterate over CPU policies */
38 #define for_each_suitable_policy(__policy, __active) \
39 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
40 if ((__active) == !policy_is_inactive(__policy))
42 #define for_each_active_policy(__policy) \
43 for_each_suitable_policy(__policy, true)
44 #define for_each_inactive_policy(__policy) \
45 for_each_suitable_policy(__policy, false)
47 #define for_each_policy(__policy) \
48 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
50 /* Iterate over governors */
51 static LIST_HEAD(cpufreq_governor_list);
52 #define for_each_governor(__governor) \
53 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
56 * The "cpufreq driver" - the arch- or hardware-dependent low
57 * level driver of CPUFreq support, and its spinlock. This lock
58 * also protects the cpufreq_cpu_data array.
60 static struct cpufreq_driver *cpufreq_driver;
61 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
62 static DEFINE_RWLOCK(cpufreq_driver_lock);
64 /* Flag to suspend/resume CPUFreq governors */
65 static bool cpufreq_suspended;
67 static inline bool has_target(void)
69 return cpufreq_driver->target_index || cpufreq_driver->target;
72 /* internal prototypes */
73 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
74 static int cpufreq_init_governor(struct cpufreq_policy *policy);
75 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
76 static int cpufreq_start_governor(struct cpufreq_policy *policy);
77 static void cpufreq_stop_governor(struct cpufreq_policy *policy);
78 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
81 * Two notifier lists: the "policy" list is involved in the
82 * validation process for a new CPU frequency policy; the
83 * "transition" list for kernel code that needs to handle
84 * changes to devices when the CPU clock speed changes.
85 * The mutex locks both lists.
87 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
88 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
90 static int off __read_mostly;
91 static int cpufreq_disabled(void)
95 void disable_cpufreq(void)
99 static DEFINE_MUTEX(cpufreq_governor_mutex);
101 bool have_governor_per_policy(void)
103 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
105 EXPORT_SYMBOL_GPL(have_governor_per_policy);
107 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
109 if (have_governor_per_policy())
110 return &policy->kobj;
112 return cpufreq_global_kobject;
114 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
116 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
122 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
124 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
128 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
129 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
131 idle_time = cur_wall_time - busy_time;
133 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
135 return div_u64(idle_time, NSEC_PER_USEC);
138 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
140 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
142 if (idle_time == -1ULL)
143 return get_cpu_idle_time_jiffy(cpu, wall);
145 idle_time += get_cpu_iowait_time_us(cpu, wall);
149 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
151 __weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
152 unsigned long max_freq)
155 EXPORT_SYMBOL_GPL(arch_set_freq_scale);
158 * This is a generic cpufreq init() routine which can be used by cpufreq
159 * drivers of SMP systems. It will do following:
160 * - validate & show freq table passed
161 * - set policies transition latency
162 * - policy->cpus with all possible CPUs
164 int cpufreq_generic_init(struct cpufreq_policy *policy,
165 struct cpufreq_frequency_table *table,
166 unsigned int transition_latency)
168 policy->freq_table = table;
169 policy->cpuinfo.transition_latency = transition_latency;
172 * The driver only supports the SMP configuration where all processors
173 * share the clock and voltage and clock.
175 cpumask_setall(policy->cpus);
179 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
181 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
183 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
185 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
187 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
189 unsigned int cpufreq_generic_get(unsigned int cpu)
191 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
193 if (!policy || IS_ERR(policy->clk)) {
194 pr_err("%s: No %s associated to cpu: %d\n",
195 __func__, policy ? "clk" : "policy", cpu);
199 return clk_get_rate(policy->clk) / 1000;
201 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
204 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
205 * @cpu: CPU to find the policy for.
207 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
208 * the kobject reference counter of that policy. Return a valid policy on
209 * success or NULL on failure.
211 * The policy returned by this function has to be released with the help of
212 * cpufreq_cpu_put() to balance its kobject reference counter properly.
214 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
216 struct cpufreq_policy *policy = NULL;
219 if (WARN_ON(cpu >= nr_cpu_ids))
222 /* get the cpufreq driver */
223 read_lock_irqsave(&cpufreq_driver_lock, flags);
225 if (cpufreq_driver) {
227 policy = cpufreq_cpu_get_raw(cpu);
229 kobject_get(&policy->kobj);
232 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
236 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
239 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
240 * @policy: cpufreq policy returned by cpufreq_cpu_get().
242 void cpufreq_cpu_put(struct cpufreq_policy *policy)
244 kobject_put(&policy->kobj);
246 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
249 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
250 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
252 void cpufreq_cpu_release(struct cpufreq_policy *policy)
254 if (WARN_ON(!policy))
257 lockdep_assert_held(&policy->rwsem);
259 up_write(&policy->rwsem);
261 cpufreq_cpu_put(policy);
265 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
266 * @cpu: CPU to find the policy for.
268 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
269 * if the policy returned by it is not NULL, acquire its rwsem for writing.
270 * Return the policy if it is active or release it and return NULL otherwise.
272 * The policy returned by this function has to be released with the help of
273 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
276 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
278 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
283 down_write(&policy->rwsem);
285 if (policy_is_inactive(policy)) {
286 cpufreq_cpu_release(policy);
293 /*********************************************************************
294 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
295 *********************************************************************/
298 * adjust_jiffies - adjust the system "loops_per_jiffy"
300 * This function alters the system "loops_per_jiffy" for the clock
301 * speed change. Note that loops_per_jiffy cannot be updated on SMP
302 * systems as each CPU might be scaled differently. So, use the arch
303 * per-CPU loops_per_jiffy value wherever possible.
305 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
308 static unsigned long l_p_j_ref;
309 static unsigned int l_p_j_ref_freq;
311 if (ci->flags & CPUFREQ_CONST_LOOPS)
314 if (!l_p_j_ref_freq) {
315 l_p_j_ref = loops_per_jiffy;
316 l_p_j_ref_freq = ci->old;
317 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
318 l_p_j_ref, l_p_j_ref_freq);
320 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
321 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
323 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
324 loops_per_jiffy, ci->new);
330 * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
331 * @policy: cpufreq policy to enable fast frequency switching for.
332 * @freqs: contain details of the frequency update.
333 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
335 * This function calls the transition notifiers and the "adjust_jiffies"
336 * function. It is called twice on all CPU frequency changes that have
339 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
340 struct cpufreq_freqs *freqs,
343 BUG_ON(irqs_disabled());
345 if (cpufreq_disabled())
348 freqs->flags = cpufreq_driver->flags;
349 pr_debug("notification %u of frequency transition to %u kHz\n",
353 case CPUFREQ_PRECHANGE:
355 * Detect if the driver reported a value as "old frequency"
356 * which is not equal to what the cpufreq core thinks is
359 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
360 if (policy->cur && (policy->cur != freqs->old)) {
361 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
362 freqs->old, policy->cur);
363 freqs->old = policy->cur;
367 for_each_cpu(freqs->cpu, policy->cpus) {
368 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
369 CPUFREQ_PRECHANGE, freqs);
372 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
375 case CPUFREQ_POSTCHANGE:
376 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
377 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
378 cpumask_pr_args(policy->cpus));
380 for_each_cpu(freqs->cpu, policy->cpus) {
381 trace_cpu_frequency(freqs->new, freqs->cpu);
382 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
383 CPUFREQ_POSTCHANGE, freqs);
386 cpufreq_stats_record_transition(policy, freqs->new);
387 policy->cur = freqs->new;
391 /* Do post notifications when there are chances that transition has failed */
392 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
393 struct cpufreq_freqs *freqs, int transition_failed)
395 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
396 if (!transition_failed)
399 swap(freqs->old, freqs->new);
400 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
401 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
404 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
405 struct cpufreq_freqs *freqs)
409 * Catch double invocations of _begin() which lead to self-deadlock.
410 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
411 * doesn't invoke _begin() on their behalf, and hence the chances of
412 * double invocations are very low. Moreover, there are scenarios
413 * where these checks can emit false-positive warnings in these
414 * drivers; so we avoid that by skipping them altogether.
416 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
417 && current == policy->transition_task);
420 wait_event(policy->transition_wait, !policy->transition_ongoing);
422 spin_lock(&policy->transition_lock);
424 if (unlikely(policy->transition_ongoing)) {
425 spin_unlock(&policy->transition_lock);
429 policy->transition_ongoing = true;
430 policy->transition_task = current;
432 spin_unlock(&policy->transition_lock);
434 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
436 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
438 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
439 struct cpufreq_freqs *freqs, int transition_failed)
441 if (WARN_ON(!policy->transition_ongoing))
444 cpufreq_notify_post_transition(policy, freqs, transition_failed);
446 policy->transition_ongoing = false;
447 policy->transition_task = NULL;
449 wake_up(&policy->transition_wait);
451 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
454 * Fast frequency switching status count. Positive means "enabled", negative
455 * means "disabled" and 0 means "not decided yet".
457 static int cpufreq_fast_switch_count;
458 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
460 static void cpufreq_list_transition_notifiers(void)
462 struct notifier_block *nb;
464 pr_info("Registered transition notifiers:\n");
466 mutex_lock(&cpufreq_transition_notifier_list.mutex);
468 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
469 pr_info("%pS\n", nb->notifier_call);
471 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
475 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
476 * @policy: cpufreq policy to enable fast frequency switching for.
478 * Try to enable fast frequency switching for @policy.
480 * The attempt will fail if there is at least one transition notifier registered
481 * at this point, as fast frequency switching is quite fundamentally at odds
482 * with transition notifiers. Thus if successful, it will make registration of
483 * transition notifiers fail going forward.
485 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
487 lockdep_assert_held(&policy->rwsem);
489 if (!policy->fast_switch_possible)
492 mutex_lock(&cpufreq_fast_switch_lock);
493 if (cpufreq_fast_switch_count >= 0) {
494 cpufreq_fast_switch_count++;
495 policy->fast_switch_enabled = true;
497 pr_warn("CPU%u: Fast frequency switching not enabled\n",
499 cpufreq_list_transition_notifiers();
501 mutex_unlock(&cpufreq_fast_switch_lock);
503 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
506 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
507 * @policy: cpufreq policy to disable fast frequency switching for.
509 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
511 mutex_lock(&cpufreq_fast_switch_lock);
512 if (policy->fast_switch_enabled) {
513 policy->fast_switch_enabled = false;
514 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
515 cpufreq_fast_switch_count--;
517 mutex_unlock(&cpufreq_fast_switch_lock);
519 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
522 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
524 * @target_freq: target frequency to resolve.
526 * The target to driver frequency mapping is cached in the policy.
528 * Return: Lowest driver-supported frequency greater than or equal to the
529 * given target_freq, subject to policy (min/max) and driver limitations.
531 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
532 unsigned int target_freq)
534 target_freq = clamp_val(target_freq, policy->min, policy->max);
535 policy->cached_target_freq = target_freq;
537 if (cpufreq_driver->target_index) {
540 idx = cpufreq_frequency_table_target(policy, target_freq,
542 policy->cached_resolved_idx = idx;
543 return policy->freq_table[idx].frequency;
546 if (cpufreq_driver->resolve_freq)
547 return cpufreq_driver->resolve_freq(policy, target_freq);
551 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
553 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
555 unsigned int latency;
557 if (policy->transition_delay_us)
558 return policy->transition_delay_us;
560 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
563 * For platforms that can change the frequency very fast (< 10
564 * us), the above formula gives a decent transition delay. But
565 * for platforms where transition_latency is in milliseconds, it
566 * ends up giving unrealistic values.
568 * Cap the default transition delay to 10 ms, which seems to be
569 * a reasonable amount of time after which we should reevaluate
572 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
575 return LATENCY_MULTIPLIER;
577 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
579 /*********************************************************************
581 *********************************************************************/
582 static ssize_t show_boost(struct kobject *kobj,
583 struct kobj_attribute *attr, char *buf)
585 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
588 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
589 const char *buf, size_t count)
593 ret = sscanf(buf, "%d", &enable);
594 if (ret != 1 || enable < 0 || enable > 1)
597 if (cpufreq_boost_trigger_state(enable)) {
598 pr_err("%s: Cannot %s BOOST!\n",
599 __func__, enable ? "enable" : "disable");
603 pr_debug("%s: cpufreq BOOST %s\n",
604 __func__, enable ? "enabled" : "disabled");
608 define_one_global_rw(boost);
610 static struct cpufreq_governor *find_governor(const char *str_governor)
612 struct cpufreq_governor *t;
615 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
622 * cpufreq_parse_governor - parse a governor string
624 static int cpufreq_parse_governor(char *str_governor,
625 struct cpufreq_policy *policy)
627 if (cpufreq_driver->setpolicy) {
628 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
629 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
633 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
634 policy->policy = CPUFREQ_POLICY_POWERSAVE;
638 struct cpufreq_governor *t;
640 mutex_lock(&cpufreq_governor_mutex);
642 t = find_governor(str_governor);
646 mutex_unlock(&cpufreq_governor_mutex);
648 ret = request_module("cpufreq_%s", str_governor);
652 mutex_lock(&cpufreq_governor_mutex);
654 t = find_governor(str_governor);
656 if (t && !try_module_get(t->owner))
659 mutex_unlock(&cpufreq_governor_mutex);
662 policy->governor = t;
671 * cpufreq_per_cpu_attr_read() / show_##file_name() -
672 * print out cpufreq information
674 * Write out information from cpufreq_driver->policy[cpu]; object must be
678 #define show_one(file_name, object) \
679 static ssize_t show_##file_name \
680 (struct cpufreq_policy *policy, char *buf) \
682 return sprintf(buf, "%u\n", policy->object); \
685 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
686 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
687 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
688 show_one(scaling_min_freq, min);
689 show_one(scaling_max_freq, max);
691 __weak unsigned int arch_freq_get_on_cpu(int cpu)
696 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
701 freq = arch_freq_get_on_cpu(policy->cpu);
703 ret = sprintf(buf, "%u\n", freq);
704 else if (cpufreq_driver && cpufreq_driver->setpolicy &&
706 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
708 ret = sprintf(buf, "%u\n", policy->cur);
713 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
715 #define store_one(file_name, object) \
716 static ssize_t store_##file_name \
717 (struct cpufreq_policy *policy, const char *buf, size_t count) \
720 struct cpufreq_policy new_policy; \
722 memcpy(&new_policy, policy, sizeof(*policy)); \
723 new_policy.min = policy->user_policy.min; \
724 new_policy.max = policy->user_policy.max; \
726 ret = sscanf(buf, "%u", &new_policy.object); \
730 temp = new_policy.object; \
731 ret = cpufreq_set_policy(policy, &new_policy); \
733 policy->user_policy.object = temp; \
735 return ret ? ret : count; \
738 store_one(scaling_min_freq, min);
739 store_one(scaling_max_freq, max);
742 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
744 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
747 unsigned int cur_freq = __cpufreq_get(policy);
750 return sprintf(buf, "%u\n", cur_freq);
752 return sprintf(buf, "<unknown>\n");
756 * show_scaling_governor - show the current policy for the specified CPU
758 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
760 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
761 return sprintf(buf, "powersave\n");
762 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
763 return sprintf(buf, "performance\n");
764 else if (policy->governor)
765 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
766 policy->governor->name);
771 * store_scaling_governor - store policy for the specified CPU
773 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
774 const char *buf, size_t count)
777 char str_governor[16];
778 struct cpufreq_policy new_policy;
780 memcpy(&new_policy, policy, sizeof(*policy));
782 ret = sscanf(buf, "%15s", str_governor);
786 if (cpufreq_parse_governor(str_governor, &new_policy))
789 ret = cpufreq_set_policy(policy, &new_policy);
791 if (new_policy.governor)
792 module_put(new_policy.governor->owner);
794 return ret ? ret : count;
798 * show_scaling_driver - show the cpufreq driver currently loaded
800 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
802 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
806 * show_scaling_available_governors - show the available CPUfreq governors
808 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
812 struct cpufreq_governor *t;
815 i += sprintf(buf, "performance powersave");
819 for_each_governor(t) {
820 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
821 - (CPUFREQ_NAME_LEN + 2)))
823 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
826 i += sprintf(&buf[i], "\n");
830 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
835 for_each_cpu(cpu, mask) {
837 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
838 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
839 if (i >= (PAGE_SIZE - 5))
842 i += sprintf(&buf[i], "\n");
845 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
848 * show_related_cpus - show the CPUs affected by each transition even if
849 * hw coordination is in use
851 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
853 return cpufreq_show_cpus(policy->related_cpus, buf);
857 * show_affected_cpus - show the CPUs affected by each transition
859 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
861 return cpufreq_show_cpus(policy->cpus, buf);
864 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
865 const char *buf, size_t count)
867 unsigned int freq = 0;
870 if (!policy->governor || !policy->governor->store_setspeed)
873 ret = sscanf(buf, "%u", &freq);
877 policy->governor->store_setspeed(policy, freq);
882 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
884 if (!policy->governor || !policy->governor->show_setspeed)
885 return sprintf(buf, "<unsupported>\n");
887 return policy->governor->show_setspeed(policy, buf);
891 * show_bios_limit - show the current cpufreq HW/BIOS limitation
893 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
897 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
899 return sprintf(buf, "%u\n", limit);
900 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
903 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
904 cpufreq_freq_attr_ro(cpuinfo_min_freq);
905 cpufreq_freq_attr_ro(cpuinfo_max_freq);
906 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
907 cpufreq_freq_attr_ro(scaling_available_governors);
908 cpufreq_freq_attr_ro(scaling_driver);
909 cpufreq_freq_attr_ro(scaling_cur_freq);
910 cpufreq_freq_attr_ro(bios_limit);
911 cpufreq_freq_attr_ro(related_cpus);
912 cpufreq_freq_attr_ro(affected_cpus);
913 cpufreq_freq_attr_rw(scaling_min_freq);
914 cpufreq_freq_attr_rw(scaling_max_freq);
915 cpufreq_freq_attr_rw(scaling_governor);
916 cpufreq_freq_attr_rw(scaling_setspeed);
918 static struct attribute *default_attrs[] = {
919 &cpuinfo_min_freq.attr,
920 &cpuinfo_max_freq.attr,
921 &cpuinfo_transition_latency.attr,
922 &scaling_min_freq.attr,
923 &scaling_max_freq.attr,
926 &scaling_governor.attr,
927 &scaling_driver.attr,
928 &scaling_available_governors.attr,
929 &scaling_setspeed.attr,
933 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
934 #define to_attr(a) container_of(a, struct freq_attr, attr)
936 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
938 struct cpufreq_policy *policy = to_policy(kobj);
939 struct freq_attr *fattr = to_attr(attr);
942 down_read(&policy->rwsem);
943 ret = fattr->show(policy, buf);
944 up_read(&policy->rwsem);
949 static ssize_t store(struct kobject *kobj, struct attribute *attr,
950 const char *buf, size_t count)
952 struct cpufreq_policy *policy = to_policy(kobj);
953 struct freq_attr *fattr = to_attr(attr);
954 ssize_t ret = -EINVAL;
957 * cpus_read_trylock() is used here to work around a circular lock
958 * dependency problem with respect to the cpufreq_register_driver().
960 if (!cpus_read_trylock())
963 if (cpu_online(policy->cpu)) {
964 down_write(&policy->rwsem);
965 ret = fattr->store(policy, buf, count);
966 up_write(&policy->rwsem);
974 static void cpufreq_sysfs_release(struct kobject *kobj)
976 struct cpufreq_policy *policy = to_policy(kobj);
977 pr_debug("last reference is dropped\n");
978 complete(&policy->kobj_unregister);
981 static const struct sysfs_ops sysfs_ops = {
986 static struct kobj_type ktype_cpufreq = {
987 .sysfs_ops = &sysfs_ops,
988 .default_attrs = default_attrs,
989 .release = cpufreq_sysfs_release,
992 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
994 struct device *dev = get_cpu_device(cpu);
999 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1002 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1003 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1004 dev_err(dev, "cpufreq symlink creation failed\n");
1007 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1010 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1011 sysfs_remove_link(&dev->kobj, "cpufreq");
1014 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1016 struct freq_attr **drv_attr;
1019 /* set up files for this cpu device */
1020 drv_attr = cpufreq_driver->attr;
1021 while (drv_attr && *drv_attr) {
1022 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1027 if (cpufreq_driver->get) {
1028 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1033 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1037 if (cpufreq_driver->bios_limit) {
1038 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1046 __weak struct cpufreq_governor *cpufreq_default_governor(void)
1051 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1053 struct cpufreq_governor *gov = NULL;
1054 struct cpufreq_policy new_policy;
1056 memcpy(&new_policy, policy, sizeof(*policy));
1058 /* Update governor of new_policy to the governor used before hotplug */
1059 gov = find_governor(policy->last_governor);
1061 pr_debug("Restoring governor %s for cpu %d\n",
1062 policy->governor->name, policy->cpu);
1064 gov = cpufreq_default_governor();
1069 new_policy.governor = gov;
1071 /* Use the default policy if there is no last_policy. */
1072 if (cpufreq_driver->setpolicy) {
1073 if (policy->last_policy)
1074 new_policy.policy = policy->last_policy;
1076 cpufreq_parse_governor(gov->name, &new_policy);
1078 /* set default policy */
1079 return cpufreq_set_policy(policy, &new_policy);
1082 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1086 /* Has this CPU been taken care of already? */
1087 if (cpumask_test_cpu(cpu, policy->cpus))
1090 down_write(&policy->rwsem);
1092 cpufreq_stop_governor(policy);
1094 cpumask_set_cpu(cpu, policy->cpus);
1097 ret = cpufreq_start_governor(policy);
1099 pr_err("%s: Failed to start governor\n", __func__);
1101 up_write(&policy->rwsem);
1105 static void handle_update(struct work_struct *work)
1107 struct cpufreq_policy *policy =
1108 container_of(work, struct cpufreq_policy, update);
1109 unsigned int cpu = policy->cpu;
1110 pr_debug("handle_update for cpu %u called\n", cpu);
1111 cpufreq_update_policy(cpu);
1114 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1116 struct cpufreq_policy *policy;
1119 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1123 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1124 goto err_free_policy;
1126 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1127 goto err_free_cpumask;
1129 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1130 goto err_free_rcpumask;
1132 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1133 cpufreq_global_kobject, "policy%u", cpu);
1135 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1136 kobject_put(&policy->kobj);
1137 goto err_free_real_cpus;
1140 INIT_LIST_HEAD(&policy->policy_list);
1141 init_rwsem(&policy->rwsem);
1142 spin_lock_init(&policy->transition_lock);
1143 init_waitqueue_head(&policy->transition_wait);
1144 init_completion(&policy->kobj_unregister);
1145 INIT_WORK(&policy->update, handle_update);
1151 free_cpumask_var(policy->real_cpus);
1153 free_cpumask_var(policy->related_cpus);
1155 free_cpumask_var(policy->cpus);
1162 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1164 struct kobject *kobj;
1165 struct completion *cmp;
1167 down_write(&policy->rwsem);
1168 cpufreq_stats_free_table(policy);
1169 kobj = &policy->kobj;
1170 cmp = &policy->kobj_unregister;
1171 up_write(&policy->rwsem);
1175 * We need to make sure that the underlying kobj is
1176 * actually not referenced anymore by anybody before we
1177 * proceed with unloading.
1179 pr_debug("waiting for dropping of refcount\n");
1180 wait_for_completion(cmp);
1181 pr_debug("wait complete\n");
1184 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1186 unsigned long flags;
1189 /* Remove policy from list */
1190 write_lock_irqsave(&cpufreq_driver_lock, flags);
1191 list_del(&policy->policy_list);
1193 for_each_cpu(cpu, policy->related_cpus)
1194 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1195 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1197 cpufreq_policy_put_kobj(policy);
1198 free_cpumask_var(policy->real_cpus);
1199 free_cpumask_var(policy->related_cpus);
1200 free_cpumask_var(policy->cpus);
1204 static int cpufreq_online(unsigned int cpu)
1206 struct cpufreq_policy *policy;
1208 unsigned long flags;
1212 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1214 /* Check if this CPU already has a policy to manage it */
1215 policy = per_cpu(cpufreq_cpu_data, cpu);
1217 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1218 if (!policy_is_inactive(policy))
1219 return cpufreq_add_policy_cpu(policy, cpu);
1221 /* This is the only online CPU for the policy. Start over. */
1223 down_write(&policy->rwsem);
1225 policy->governor = NULL;
1226 up_write(&policy->rwsem);
1229 policy = cpufreq_policy_alloc(cpu);
1234 if (!new_policy && cpufreq_driver->online) {
1235 ret = cpufreq_driver->online(policy);
1237 pr_debug("%s: %d: initialization failed\n", __func__,
1239 goto out_exit_policy;
1242 /* Recover policy->cpus using related_cpus */
1243 cpumask_copy(policy->cpus, policy->related_cpus);
1245 cpumask_copy(policy->cpus, cpumask_of(cpu));
1248 * Call driver. From then on the cpufreq must be able
1249 * to accept all calls to ->verify and ->setpolicy for this CPU.
1251 ret = cpufreq_driver->init(policy);
1253 pr_debug("%s: %d: initialization failed\n", __func__,
1255 goto out_free_policy;
1258 ret = cpufreq_table_validate_and_sort(policy);
1260 goto out_exit_policy;
1262 /* related_cpus should at least include policy->cpus. */
1263 cpumask_copy(policy->related_cpus, policy->cpus);
1266 down_write(&policy->rwsem);
1268 * affected cpus must always be the one, which are online. We aren't
1269 * managing offline cpus here.
1271 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1274 policy->user_policy.min = policy->min;
1275 policy->user_policy.max = policy->max;
1277 for_each_cpu(j, policy->related_cpus) {
1278 per_cpu(cpufreq_cpu_data, j) = policy;
1279 add_cpu_dev_symlink(policy, j);
1282 policy->min = policy->user_policy.min;
1283 policy->max = policy->user_policy.max;
1286 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1287 policy->cur = cpufreq_driver->get(policy->cpu);
1289 pr_err("%s: ->get() failed\n", __func__);
1290 goto out_destroy_policy;
1295 * Sometimes boot loaders set CPU frequency to a value outside of
1296 * frequency table present with cpufreq core. In such cases CPU might be
1297 * unstable if it has to run on that frequency for long duration of time
1298 * and so its better to set it to a frequency which is specified in
1299 * freq-table. This also makes cpufreq stats inconsistent as
1300 * cpufreq-stats would fail to register because current frequency of CPU
1301 * isn't found in freq-table.
1303 * Because we don't want this change to effect boot process badly, we go
1304 * for the next freq which is >= policy->cur ('cur' must be set by now,
1305 * otherwise we will end up setting freq to lowest of the table as 'cur'
1306 * is initialized to zero).
1308 * We are passing target-freq as "policy->cur - 1" otherwise
1309 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1310 * equal to target-freq.
1312 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1314 /* Are we running at unknown frequency ? */
1315 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1316 if (ret == -EINVAL) {
1317 /* Warn user and fix it */
1318 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1319 __func__, policy->cpu, policy->cur);
1320 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1321 CPUFREQ_RELATION_L);
1324 * Reaching here after boot in a few seconds may not
1325 * mean that system will remain stable at "unknown"
1326 * frequency for longer duration. Hence, a BUG_ON().
1329 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1330 __func__, policy->cpu, policy->cur);
1335 ret = cpufreq_add_dev_interface(policy);
1337 goto out_destroy_policy;
1339 cpufreq_stats_create_table(policy);
1341 write_lock_irqsave(&cpufreq_driver_lock, flags);
1342 list_add(&policy->policy_list, &cpufreq_policy_list);
1343 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1346 ret = cpufreq_init_policy(policy);
1348 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1349 __func__, cpu, ret);
1350 goto out_destroy_policy;
1353 up_write(&policy->rwsem);
1355 kobject_uevent(&policy->kobj, KOBJ_ADD);
1357 /* Callback for handling stuff after policy is ready */
1358 if (cpufreq_driver->ready)
1359 cpufreq_driver->ready(policy);
1361 if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
1362 cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV)
1363 policy->cdev = of_cpufreq_cooling_register(policy);
1365 pr_debug("initialization complete\n");
1370 for_each_cpu(j, policy->real_cpus)
1371 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1373 up_write(&policy->rwsem);
1376 if (cpufreq_driver->exit)
1377 cpufreq_driver->exit(policy);
1380 cpufreq_policy_free(policy);
1385 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1387 * @sif: Subsystem interface structure pointer (not used)
1389 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1391 struct cpufreq_policy *policy;
1392 unsigned cpu = dev->id;
1395 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1397 if (cpu_online(cpu)) {
1398 ret = cpufreq_online(cpu);
1403 /* Create sysfs link on CPU registration */
1404 policy = per_cpu(cpufreq_cpu_data, cpu);
1406 add_cpu_dev_symlink(policy, cpu);
1411 static int cpufreq_offline(unsigned int cpu)
1413 struct cpufreq_policy *policy;
1416 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1418 policy = cpufreq_cpu_get_raw(cpu);
1420 pr_debug("%s: No cpu_data found\n", __func__);
1424 down_write(&policy->rwsem);
1426 cpufreq_stop_governor(policy);
1428 cpumask_clear_cpu(cpu, policy->cpus);
1430 if (policy_is_inactive(policy)) {
1432 strncpy(policy->last_governor, policy->governor->name,
1435 policy->last_policy = policy->policy;
1436 } else if (cpu == policy->cpu) {
1437 /* Nominate new CPU */
1438 policy->cpu = cpumask_any(policy->cpus);
1441 /* Start governor again for active policy */
1442 if (!policy_is_inactive(policy)) {
1444 ret = cpufreq_start_governor(policy);
1446 pr_err("%s: Failed to start governor\n", __func__);
1452 if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
1453 cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) {
1454 cpufreq_cooling_unregister(policy->cdev);
1455 policy->cdev = NULL;
1458 if (cpufreq_driver->stop_cpu)
1459 cpufreq_driver->stop_cpu(policy);
1462 cpufreq_exit_governor(policy);
1465 * Perform the ->offline() during light-weight tear-down, as
1466 * that allows fast recovery when the CPU comes back.
1468 if (cpufreq_driver->offline) {
1469 cpufreq_driver->offline(policy);
1470 } else if (cpufreq_driver->exit) {
1471 cpufreq_driver->exit(policy);
1472 policy->freq_table = NULL;
1476 up_write(&policy->rwsem);
1481 * cpufreq_remove_dev - remove a CPU device
1483 * Removes the cpufreq interface for a CPU device.
1485 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1487 unsigned int cpu = dev->id;
1488 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1493 if (cpu_online(cpu))
1494 cpufreq_offline(cpu);
1496 cpumask_clear_cpu(cpu, policy->real_cpus);
1497 remove_cpu_dev_symlink(policy, dev);
1499 if (cpumask_empty(policy->real_cpus)) {
1500 /* We did light-weight exit earlier, do full tear down now */
1501 if (cpufreq_driver->offline)
1502 cpufreq_driver->exit(policy);
1504 cpufreq_policy_free(policy);
1509 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1511 * @policy: policy managing CPUs
1512 * @new_freq: CPU frequency the CPU actually runs at
1514 * We adjust to current frequency first, and need to clean up later.
1515 * So either call to cpufreq_update_policy() or schedule handle_update()).
1517 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1518 unsigned int new_freq)
1520 struct cpufreq_freqs freqs;
1522 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1523 policy->cur, new_freq);
1525 freqs.old = policy->cur;
1526 freqs.new = new_freq;
1528 cpufreq_freq_transition_begin(policy, &freqs);
1529 cpufreq_freq_transition_end(policy, &freqs, 0);
1533 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1536 * This is the last known freq, without actually getting it from the driver.
1537 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1539 unsigned int cpufreq_quick_get(unsigned int cpu)
1541 struct cpufreq_policy *policy;
1542 unsigned int ret_freq = 0;
1543 unsigned long flags;
1545 read_lock_irqsave(&cpufreq_driver_lock, flags);
1547 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1548 ret_freq = cpufreq_driver->get(cpu);
1549 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1553 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1555 policy = cpufreq_cpu_get(cpu);
1557 ret_freq = policy->cur;
1558 cpufreq_cpu_put(policy);
1563 EXPORT_SYMBOL(cpufreq_quick_get);
1566 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1569 * Just return the max possible frequency for a given CPU.
1571 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1573 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1574 unsigned int ret_freq = 0;
1577 ret_freq = policy->max;
1578 cpufreq_cpu_put(policy);
1583 EXPORT_SYMBOL(cpufreq_quick_get_max);
1585 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1587 unsigned int ret_freq = 0;
1589 if (unlikely(policy_is_inactive(policy)))
1592 ret_freq = cpufreq_driver->get(policy->cpu);
1595 * If fast frequency switching is used with the given policy, the check
1596 * against policy->cur is pointless, so skip it in that case too.
1598 if (policy->fast_switch_enabled)
1601 if (ret_freq && policy->cur &&
1602 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1603 /* verify no discrepancy between actual and
1604 saved value exists */
1605 if (unlikely(ret_freq != policy->cur)) {
1606 cpufreq_out_of_sync(policy, ret_freq);
1607 schedule_work(&policy->update);
1615 * cpufreq_get - get the current CPU frequency (in kHz)
1618 * Get the CPU current (static) CPU frequency
1620 unsigned int cpufreq_get(unsigned int cpu)
1622 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1623 unsigned int ret_freq = 0;
1626 down_read(&policy->rwsem);
1627 if (cpufreq_driver->get)
1628 ret_freq = __cpufreq_get(policy);
1629 up_read(&policy->rwsem);
1631 cpufreq_cpu_put(policy);
1636 EXPORT_SYMBOL(cpufreq_get);
1638 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1640 unsigned int new_freq;
1642 new_freq = cpufreq_driver->get(policy->cpu);
1647 pr_debug("cpufreq: Driver did not initialize current freq\n");
1648 policy->cur = new_freq;
1649 } else if (policy->cur != new_freq && has_target()) {
1650 cpufreq_out_of_sync(policy, new_freq);
1656 static struct subsys_interface cpufreq_interface = {
1658 .subsys = &cpu_subsys,
1659 .add_dev = cpufreq_add_dev,
1660 .remove_dev = cpufreq_remove_dev,
1664 * In case platform wants some specific frequency to be configured
1667 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1671 if (!policy->suspend_freq) {
1672 pr_debug("%s: suspend_freq not defined\n", __func__);
1676 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1677 policy->suspend_freq);
1679 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1680 CPUFREQ_RELATION_H);
1682 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1683 __func__, policy->suspend_freq, ret);
1687 EXPORT_SYMBOL(cpufreq_generic_suspend);
1690 * cpufreq_suspend() - Suspend CPUFreq governors
1692 * Called during system wide Suspend/Hibernate cycles for suspending governors
1693 * as some platforms can't change frequency after this point in suspend cycle.
1694 * Because some of the devices (like: i2c, regulators, etc) they use for
1695 * changing frequency are suspended quickly after this point.
1697 void cpufreq_suspend(void)
1699 struct cpufreq_policy *policy;
1701 if (!cpufreq_driver)
1704 if (!has_target() && !cpufreq_driver->suspend)
1707 pr_debug("%s: Suspending Governors\n", __func__);
1709 for_each_active_policy(policy) {
1711 down_write(&policy->rwsem);
1712 cpufreq_stop_governor(policy);
1713 up_write(&policy->rwsem);
1716 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1717 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1722 cpufreq_suspended = true;
1726 * cpufreq_resume() - Resume CPUFreq governors
1728 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1729 * are suspended with cpufreq_suspend().
1731 void cpufreq_resume(void)
1733 struct cpufreq_policy *policy;
1736 if (!cpufreq_driver)
1739 if (unlikely(!cpufreq_suspended))
1742 cpufreq_suspended = false;
1744 if (!has_target() && !cpufreq_driver->resume)
1747 pr_debug("%s: Resuming Governors\n", __func__);
1749 for_each_active_policy(policy) {
1750 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1751 pr_err("%s: Failed to resume driver: %p\n", __func__,
1753 } else if (has_target()) {
1754 down_write(&policy->rwsem);
1755 ret = cpufreq_start_governor(policy);
1756 up_write(&policy->rwsem);
1759 pr_err("%s: Failed to start governor for policy: %p\n",
1766 * cpufreq_get_current_driver - return current driver's name
1768 * Return the name string of the currently loaded cpufreq driver
1771 const char *cpufreq_get_current_driver(void)
1774 return cpufreq_driver->name;
1778 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1781 * cpufreq_get_driver_data - return current driver data
1783 * Return the private data of the currently loaded cpufreq
1784 * driver, or NULL if no cpufreq driver is loaded.
1786 void *cpufreq_get_driver_data(void)
1789 return cpufreq_driver->driver_data;
1793 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1795 /*********************************************************************
1796 * NOTIFIER LISTS INTERFACE *
1797 *********************************************************************/
1800 * cpufreq_register_notifier - register a driver with cpufreq
1801 * @nb: notifier function to register
1802 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1804 * Add a driver to one of two lists: either a list of drivers that
1805 * are notified about clock rate changes (once before and once after
1806 * the transition), or a list of drivers that are notified about
1807 * changes in cpufreq policy.
1809 * This function may sleep, and has the same return conditions as
1810 * blocking_notifier_chain_register.
1812 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1816 if (cpufreq_disabled())
1820 case CPUFREQ_TRANSITION_NOTIFIER:
1821 mutex_lock(&cpufreq_fast_switch_lock);
1823 if (cpufreq_fast_switch_count > 0) {
1824 mutex_unlock(&cpufreq_fast_switch_lock);
1827 ret = srcu_notifier_chain_register(
1828 &cpufreq_transition_notifier_list, nb);
1830 cpufreq_fast_switch_count--;
1832 mutex_unlock(&cpufreq_fast_switch_lock);
1834 case CPUFREQ_POLICY_NOTIFIER:
1835 ret = blocking_notifier_chain_register(
1836 &cpufreq_policy_notifier_list, nb);
1844 EXPORT_SYMBOL(cpufreq_register_notifier);
1847 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1848 * @nb: notifier block to be unregistered
1849 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1851 * Remove a driver from the CPU frequency notifier list.
1853 * This function may sleep, and has the same return conditions as
1854 * blocking_notifier_chain_unregister.
1856 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1860 if (cpufreq_disabled())
1864 case CPUFREQ_TRANSITION_NOTIFIER:
1865 mutex_lock(&cpufreq_fast_switch_lock);
1867 ret = srcu_notifier_chain_unregister(
1868 &cpufreq_transition_notifier_list, nb);
1869 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1870 cpufreq_fast_switch_count++;
1872 mutex_unlock(&cpufreq_fast_switch_lock);
1874 case CPUFREQ_POLICY_NOTIFIER:
1875 ret = blocking_notifier_chain_unregister(
1876 &cpufreq_policy_notifier_list, nb);
1884 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1887 /*********************************************************************
1889 *********************************************************************/
1892 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1893 * @policy: cpufreq policy to switch the frequency for.
1894 * @target_freq: New frequency to set (may be approximate).
1896 * Carry out a fast frequency switch without sleeping.
1898 * The driver's ->fast_switch() callback invoked by this function must be
1899 * suitable for being called from within RCU-sched read-side critical sections
1900 * and it is expected to select the minimum available frequency greater than or
1901 * equal to @target_freq (CPUFREQ_RELATION_L).
1903 * This function must not be called if policy->fast_switch_enabled is unset.
1905 * Governors calling this function must guarantee that it will never be invoked
1906 * twice in parallel for the same policy and that it will never be called in
1907 * parallel with either ->target() or ->target_index() for the same policy.
1909 * Returns the actual frequency set for the CPU.
1911 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
1912 * error condition, the hardware configuration must be preserved.
1914 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
1915 unsigned int target_freq)
1917 target_freq = clamp_val(target_freq, policy->min, policy->max);
1919 return cpufreq_driver->fast_switch(policy, target_freq);
1921 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
1923 /* Must set freqs->new to intermediate frequency */
1924 static int __target_intermediate(struct cpufreq_policy *policy,
1925 struct cpufreq_freqs *freqs, int index)
1929 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1931 /* We don't need to switch to intermediate freq */
1935 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1936 __func__, policy->cpu, freqs->old, freqs->new);
1938 cpufreq_freq_transition_begin(policy, freqs);
1939 ret = cpufreq_driver->target_intermediate(policy, index);
1940 cpufreq_freq_transition_end(policy, freqs, ret);
1943 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1949 static int __target_index(struct cpufreq_policy *policy, int index)
1951 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1952 unsigned int intermediate_freq = 0;
1953 unsigned int newfreq = policy->freq_table[index].frequency;
1954 int retval = -EINVAL;
1957 if (newfreq == policy->cur)
1960 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1962 /* Handle switching to intermediate frequency */
1963 if (cpufreq_driver->get_intermediate) {
1964 retval = __target_intermediate(policy, &freqs, index);
1968 intermediate_freq = freqs.new;
1969 /* Set old freq to intermediate */
1970 if (intermediate_freq)
1971 freqs.old = freqs.new;
1974 freqs.new = newfreq;
1975 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1976 __func__, policy->cpu, freqs.old, freqs.new);
1978 cpufreq_freq_transition_begin(policy, &freqs);
1981 retval = cpufreq_driver->target_index(policy, index);
1983 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1987 cpufreq_freq_transition_end(policy, &freqs, retval);
1990 * Failed after setting to intermediate freq? Driver should have
1991 * reverted back to initial frequency and so should we. Check
1992 * here for intermediate_freq instead of get_intermediate, in
1993 * case we haven't switched to intermediate freq at all.
1995 if (unlikely(retval && intermediate_freq)) {
1996 freqs.old = intermediate_freq;
1997 freqs.new = policy->restore_freq;
1998 cpufreq_freq_transition_begin(policy, &freqs);
1999 cpufreq_freq_transition_end(policy, &freqs, 0);
2006 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2007 unsigned int target_freq,
2008 unsigned int relation)
2010 unsigned int old_target_freq = target_freq;
2013 if (cpufreq_disabled())
2016 /* Make sure that target_freq is within supported range */
2017 target_freq = clamp_val(target_freq, policy->min, policy->max);
2019 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2020 policy->cpu, target_freq, relation, old_target_freq);
2023 * This might look like a redundant call as we are checking it again
2024 * after finding index. But it is left intentionally for cases where
2025 * exactly same freq is called again and so we can save on few function
2028 if (target_freq == policy->cur)
2031 /* Save last value to restore later on errors */
2032 policy->restore_freq = policy->cur;
2034 if (cpufreq_driver->target)
2035 return cpufreq_driver->target(policy, target_freq, relation);
2037 if (!cpufreq_driver->target_index)
2040 index = cpufreq_frequency_table_target(policy, target_freq, relation);
2042 return __target_index(policy, index);
2044 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2046 int cpufreq_driver_target(struct cpufreq_policy *policy,
2047 unsigned int target_freq,
2048 unsigned int relation)
2052 down_write(&policy->rwsem);
2054 ret = __cpufreq_driver_target(policy, target_freq, relation);
2056 up_write(&policy->rwsem);
2060 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2062 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2067 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2071 /* Don't start any governor operations if we are entering suspend */
2072 if (cpufreq_suspended)
2075 * Governor might not be initiated here if ACPI _PPC changed
2076 * notification happened, so check it.
2078 if (!policy->governor)
2081 /* Platform doesn't want dynamic frequency switching ? */
2082 if (policy->governor->dynamic_switching &&
2083 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2084 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2087 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2088 policy->governor->name, gov->name);
2089 policy->governor = gov;
2095 if (!try_module_get(policy->governor->owner))
2098 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2100 if (policy->governor->init) {
2101 ret = policy->governor->init(policy);
2103 module_put(policy->governor->owner);
2111 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2113 if (cpufreq_suspended || !policy->governor)
2116 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2118 if (policy->governor->exit)
2119 policy->governor->exit(policy);
2121 module_put(policy->governor->owner);
2124 static int cpufreq_start_governor(struct cpufreq_policy *policy)
2128 if (cpufreq_suspended)
2131 if (!policy->governor)
2134 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2136 if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
2137 cpufreq_update_current_freq(policy);
2139 if (policy->governor->start) {
2140 ret = policy->governor->start(policy);
2145 if (policy->governor->limits)
2146 policy->governor->limits(policy);
2151 static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2153 if (cpufreq_suspended || !policy->governor)
2156 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2158 if (policy->governor->stop)
2159 policy->governor->stop(policy);
2162 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2164 if (cpufreq_suspended || !policy->governor)
2167 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2169 if (policy->governor->limits)
2170 policy->governor->limits(policy);
2173 int cpufreq_register_governor(struct cpufreq_governor *governor)
2180 if (cpufreq_disabled())
2183 mutex_lock(&cpufreq_governor_mutex);
2186 if (!find_governor(governor->name)) {
2188 list_add(&governor->governor_list, &cpufreq_governor_list);
2191 mutex_unlock(&cpufreq_governor_mutex);
2194 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2196 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2198 struct cpufreq_policy *policy;
2199 unsigned long flags;
2204 if (cpufreq_disabled())
2207 /* clear last_governor for all inactive policies */
2208 read_lock_irqsave(&cpufreq_driver_lock, flags);
2209 for_each_inactive_policy(policy) {
2210 if (!strcmp(policy->last_governor, governor->name)) {
2211 policy->governor = NULL;
2212 strcpy(policy->last_governor, "\0");
2215 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2217 mutex_lock(&cpufreq_governor_mutex);
2218 list_del(&governor->governor_list);
2219 mutex_unlock(&cpufreq_governor_mutex);
2221 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2224 /*********************************************************************
2225 * POLICY INTERFACE *
2226 *********************************************************************/
2229 * cpufreq_get_policy - get the current cpufreq_policy
2230 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2233 * Reads the current cpufreq policy.
2235 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2237 struct cpufreq_policy *cpu_policy;
2241 cpu_policy = cpufreq_cpu_get(cpu);
2245 memcpy(policy, cpu_policy, sizeof(*policy));
2247 cpufreq_cpu_put(cpu_policy);
2250 EXPORT_SYMBOL(cpufreq_get_policy);
2253 * cpufreq_set_policy - Modify cpufreq policy parameters.
2254 * @policy: Policy object to modify.
2255 * @new_policy: New policy data.
2257 * Pass @new_policy to the cpufreq driver's ->verify() callback, run the
2258 * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to
2259 * the driver's ->verify() callback again and run the notifiers for it again
2260 * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters
2261 * of @new_policy to @policy and either invoke the driver's ->setpolicy()
2262 * callback (if present) or carry out a governor update for @policy. That is,
2263 * run the current governor's ->limits() callback (if the governor field in
2264 * @new_policy points to the same object as the one in @policy) or replace the
2265 * governor for @policy with the new one stored in @new_policy.
2267 * The cpuinfo part of @policy is not updated by this function.
2269 int cpufreq_set_policy(struct cpufreq_policy *policy,
2270 struct cpufreq_policy *new_policy)
2272 struct cpufreq_governor *old_gov;
2275 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2276 new_policy->cpu, new_policy->min, new_policy->max);
2278 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2281 * This check works well when we store new min/max freq attributes,
2282 * because new_policy is a copy of policy with one field updated.
2284 if (new_policy->min > new_policy->max)
2287 /* verify the cpu speed can be set within this limit */
2288 ret = cpufreq_driver->verify(new_policy);
2292 /* adjust if necessary - all reasons */
2293 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2294 CPUFREQ_ADJUST, new_policy);
2297 * verify the cpu speed can be set within this limit, which might be
2298 * different to the first one
2300 ret = cpufreq_driver->verify(new_policy);
2304 /* notification of the new policy */
2305 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2306 CPUFREQ_NOTIFY, new_policy);
2308 policy->min = new_policy->min;
2309 policy->max = new_policy->max;
2310 trace_cpu_frequency_limits(policy);
2312 policy->cached_target_freq = UINT_MAX;
2314 pr_debug("new min and max freqs are %u - %u kHz\n",
2315 policy->min, policy->max);
2317 if (cpufreq_driver->setpolicy) {
2318 policy->policy = new_policy->policy;
2319 pr_debug("setting range\n");
2320 return cpufreq_driver->setpolicy(policy);
2323 if (new_policy->governor == policy->governor) {
2324 pr_debug("governor limits update\n");
2325 cpufreq_governor_limits(policy);
2329 pr_debug("governor switch\n");
2331 /* save old, working values */
2332 old_gov = policy->governor;
2333 /* end old governor */
2335 cpufreq_stop_governor(policy);
2336 cpufreq_exit_governor(policy);
2339 /* start new governor */
2340 policy->governor = new_policy->governor;
2341 ret = cpufreq_init_governor(policy);
2343 ret = cpufreq_start_governor(policy);
2345 pr_debug("governor change\n");
2346 sched_cpufreq_governor_change(policy, old_gov);
2349 cpufreq_exit_governor(policy);
2352 /* new governor failed, so re-start old one */
2353 pr_debug("starting governor %s failed\n", policy->governor->name);
2355 policy->governor = old_gov;
2356 if (cpufreq_init_governor(policy))
2357 policy->governor = NULL;
2359 cpufreq_start_governor(policy);
2366 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2367 * @cpu: CPU to re-evaluate the policy for.
2369 * Update the current frequency for the cpufreq policy of @cpu and use
2370 * cpufreq_set_policy() to re-apply the min and max limits saved in the
2371 * user_policy sub-structure of that policy, which triggers the evaluation
2372 * of policy notifiers and the cpufreq driver's ->verify() callback for the
2373 * policy in question, among other things.
2375 void cpufreq_update_policy(unsigned int cpu)
2377 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2378 struct cpufreq_policy new_policy;
2384 * BIOS might change freq behind our back
2385 * -> ask driver for current freq and notify governors about a change
2387 if (cpufreq_driver->get && !cpufreq_driver->setpolicy &&
2388 (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy))))
2391 pr_debug("updating policy for CPU %u\n", cpu);
2392 memcpy(&new_policy, policy, sizeof(*policy));
2393 new_policy.min = policy->user_policy.min;
2394 new_policy.max = policy->user_policy.max;
2396 cpufreq_set_policy(policy, &new_policy);
2399 cpufreq_cpu_release(policy);
2401 EXPORT_SYMBOL(cpufreq_update_policy);
2404 * cpufreq_update_limits - Update policy limits for a given CPU.
2405 * @cpu: CPU to update the policy limits for.
2407 * Invoke the driver's ->update_limits callback if present or call
2408 * cpufreq_update_policy() for @cpu.
2410 void cpufreq_update_limits(unsigned int cpu)
2412 if (cpufreq_driver->update_limits)
2413 cpufreq_driver->update_limits(cpu);
2415 cpufreq_update_policy(cpu);
2417 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2419 /*********************************************************************
2421 *********************************************************************/
2422 static int cpufreq_boost_set_sw(int state)
2424 struct cpufreq_policy *policy;
2427 for_each_active_policy(policy) {
2428 if (!policy->freq_table)
2431 ret = cpufreq_frequency_table_cpuinfo(policy,
2432 policy->freq_table);
2434 pr_err("%s: Policy frequency update failed\n",
2439 down_write(&policy->rwsem);
2440 policy->user_policy.max = policy->max;
2441 cpufreq_governor_limits(policy);
2442 up_write(&policy->rwsem);
2448 int cpufreq_boost_trigger_state(int state)
2450 unsigned long flags;
2453 if (cpufreq_driver->boost_enabled == state)
2456 write_lock_irqsave(&cpufreq_driver_lock, flags);
2457 cpufreq_driver->boost_enabled = state;
2458 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2460 ret = cpufreq_driver->set_boost(state);
2462 write_lock_irqsave(&cpufreq_driver_lock, flags);
2463 cpufreq_driver->boost_enabled = !state;
2464 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2466 pr_err("%s: Cannot %s BOOST\n",
2467 __func__, state ? "enable" : "disable");
2473 static bool cpufreq_boost_supported(void)
2475 return cpufreq_driver->set_boost;
2478 static int create_boost_sysfs_file(void)
2482 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2484 pr_err("%s: cannot register global BOOST sysfs file\n",
2490 static void remove_boost_sysfs_file(void)
2492 if (cpufreq_boost_supported())
2493 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2496 int cpufreq_enable_boost_support(void)
2498 if (!cpufreq_driver)
2501 if (cpufreq_boost_supported())
2504 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2506 /* This will get removed on driver unregister */
2507 return create_boost_sysfs_file();
2509 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2511 int cpufreq_boost_enabled(void)
2513 return cpufreq_driver->boost_enabled;
2515 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2517 /*********************************************************************
2518 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2519 *********************************************************************/
2520 static enum cpuhp_state hp_online;
2522 static int cpuhp_cpufreq_online(unsigned int cpu)
2524 cpufreq_online(cpu);
2529 static int cpuhp_cpufreq_offline(unsigned int cpu)
2531 cpufreq_offline(cpu);
2537 * cpufreq_register_driver - register a CPU Frequency driver
2538 * @driver_data: A struct cpufreq_driver containing the values#
2539 * submitted by the CPU Frequency driver.
2541 * Registers a CPU Frequency driver to this core code. This code
2542 * returns zero on success, -EEXIST when another driver got here first
2543 * (and isn't unregistered in the meantime).
2546 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2548 unsigned long flags;
2551 if (cpufreq_disabled())
2554 if (!driver_data || !driver_data->verify || !driver_data->init ||
2555 !(driver_data->setpolicy || driver_data->target_index ||
2556 driver_data->target) ||
2557 (driver_data->setpolicy && (driver_data->target_index ||
2558 driver_data->target)) ||
2559 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2560 (!driver_data->online != !driver_data->offline))
2563 pr_debug("trying to register driver %s\n", driver_data->name);
2565 /* Protect against concurrent CPU online/offline. */
2568 write_lock_irqsave(&cpufreq_driver_lock, flags);
2569 if (cpufreq_driver) {
2570 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2574 cpufreq_driver = driver_data;
2575 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2577 if (driver_data->setpolicy)
2578 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2580 if (cpufreq_boost_supported()) {
2581 ret = create_boost_sysfs_file();
2583 goto err_null_driver;
2586 ret = subsys_interface_register(&cpufreq_interface);
2588 goto err_boost_unreg;
2590 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2591 list_empty(&cpufreq_policy_list)) {
2592 /* if all ->init() calls failed, unregister */
2594 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2599 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2601 cpuhp_cpufreq_online,
2602 cpuhp_cpufreq_offline);
2608 pr_debug("driver %s up and running\n", driver_data->name);
2612 subsys_interface_unregister(&cpufreq_interface);
2614 remove_boost_sysfs_file();
2616 write_lock_irqsave(&cpufreq_driver_lock, flags);
2617 cpufreq_driver = NULL;
2618 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2623 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2626 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2628 * Unregister the current CPUFreq driver. Only call this if you have
2629 * the right to do so, i.e. if you have succeeded in initialising before!
2630 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2631 * currently not initialised.
2633 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2635 unsigned long flags;
2637 if (!cpufreq_driver || (driver != cpufreq_driver))
2640 pr_debug("unregistering driver %s\n", driver->name);
2642 /* Protect against concurrent cpu hotplug */
2644 subsys_interface_unregister(&cpufreq_interface);
2645 remove_boost_sysfs_file();
2646 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2648 write_lock_irqsave(&cpufreq_driver_lock, flags);
2650 cpufreq_driver = NULL;
2652 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2657 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2660 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2661 * or mutexes when secondary CPUs are halted.
2663 static struct syscore_ops cpufreq_syscore_ops = {
2664 .shutdown = cpufreq_suspend,
2667 struct kobject *cpufreq_global_kobject;
2668 EXPORT_SYMBOL(cpufreq_global_kobject);
2670 static int __init cpufreq_core_init(void)
2672 if (cpufreq_disabled())
2675 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2676 BUG_ON(!cpufreq_global_kobject);
2678 register_syscore_ops(&cpufreq_syscore_ops);
2682 module_param(off, int, 0444);
2683 core_initcall(cpufreq_core_init);