2 * acpi-cpufreq.c - ACPI Processor P-States Driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/smp.h>
34 #include <linux/sched.h>
35 #include <linux/cpufreq.h>
36 #include <linux/compiler.h>
37 #include <linux/dmi.h>
38 #include <linux/slab.h>
40 #include <linux/acpi.h>
42 #include <linux/delay.h>
43 #include <linux/uaccess.h>
45 #include <acpi/processor.h>
48 #include <asm/processor.h>
49 #include <asm/cpufeature.h>
51 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
52 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
53 MODULE_LICENSE("GPL");
56 UNDEFINED_CAPABLE = 0,
57 SYSTEM_INTEL_MSR_CAPABLE,
58 SYSTEM_AMD_MSR_CAPABLE,
62 #define INTEL_MSR_RANGE (0xffff)
63 #define AMD_MSR_RANGE (0x7)
64 #define HYGON_MSR_RANGE (0x7)
66 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
68 struct acpi_cpufreq_data {
70 unsigned int cpu_feature;
71 unsigned int acpi_perf_cpu;
72 cpumask_var_t freqdomain_cpus;
73 void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
74 u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
77 /* acpi_perf_data is a pointer to percpu data. */
78 static struct acpi_processor_performance __percpu *acpi_perf_data;
80 static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
82 return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
85 static struct cpufreq_driver acpi_cpufreq_driver;
87 static unsigned int acpi_pstate_strict;
89 static bool boost_state(unsigned int cpu)
94 switch (boot_cpu_data.x86_vendor) {
95 case X86_VENDOR_INTEL:
96 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
97 msr = lo | ((u64)hi << 32);
98 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
99 case X86_VENDOR_HYGON:
101 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
102 msr = lo | ((u64)hi << 32);
103 return !(msr & MSR_K7_HWCR_CPB_DIS);
108 static int boost_set_msr(bool enable)
113 switch (boot_cpu_data.x86_vendor) {
114 case X86_VENDOR_INTEL:
115 msr_addr = MSR_IA32_MISC_ENABLE;
116 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
118 case X86_VENDOR_HYGON:
120 msr_addr = MSR_K7_HWCR;
121 msr_mask = MSR_K7_HWCR_CPB_DIS;
127 rdmsrl(msr_addr, val);
134 wrmsrl(msr_addr, val);
138 static void boost_set_msr_each(void *p_en)
140 bool enable = (bool) p_en;
142 boost_set_msr(enable);
145 static int set_boost(int val)
148 on_each_cpu(boost_set_msr_each, (void *)(long)val, 1);
150 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
155 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
157 struct acpi_cpufreq_data *data = policy->driver_data;
162 return cpufreq_show_cpus(data->freqdomain_cpus, buf);
165 cpufreq_freq_attr_ro(freqdomain_cpus);
167 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
168 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
172 unsigned int val = 0;
174 if (!acpi_cpufreq_driver.set_boost)
177 ret = kstrtouint(buf, 10, &val);
186 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
188 return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
191 cpufreq_freq_attr_rw(cpb);
194 static int check_est_cpu(unsigned int cpuid)
196 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
198 return cpu_has(cpu, X86_FEATURE_EST);
201 static int check_amd_hwpstate_cpu(unsigned int cpuid)
203 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
205 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
208 static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
210 struct acpi_cpufreq_data *data = policy->driver_data;
211 struct acpi_processor_performance *perf;
214 perf = to_perf_data(data);
216 for (i = 0; i < perf->state_count; i++) {
217 if (value == perf->states[i].status)
218 return policy->freq_table[i].frequency;
223 static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
225 struct acpi_cpufreq_data *data = policy->driver_data;
226 struct cpufreq_frequency_table *pos;
227 struct acpi_processor_performance *perf;
229 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
230 msr &= AMD_MSR_RANGE;
231 else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
232 msr &= HYGON_MSR_RANGE;
234 msr &= INTEL_MSR_RANGE;
236 perf = to_perf_data(data);
238 cpufreq_for_each_entry(pos, policy->freq_table)
239 if (msr == perf->states[pos->driver_data].status)
240 return pos->frequency;
241 return policy->freq_table[0].frequency;
244 static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
246 struct acpi_cpufreq_data *data = policy->driver_data;
248 switch (data->cpu_feature) {
249 case SYSTEM_INTEL_MSR_CAPABLE:
250 case SYSTEM_AMD_MSR_CAPABLE:
251 return extract_msr(policy, val);
252 case SYSTEM_IO_CAPABLE:
253 return extract_io(policy, val);
259 static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
263 rdmsr(MSR_IA32_PERF_CTL, val, dummy);
267 static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
271 rdmsr(MSR_IA32_PERF_CTL, lo, hi);
272 lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
273 wrmsr(MSR_IA32_PERF_CTL, lo, hi);
276 static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
280 rdmsr(MSR_AMD_PERF_CTL, val, dummy);
284 static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
286 wrmsr(MSR_AMD_PERF_CTL, val, 0);
289 static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
293 acpi_os_read_port(reg->address, &val, reg->bit_width);
297 static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
299 acpi_os_write_port(reg->address, val, reg->bit_width);
303 struct acpi_pct_register *reg;
306 void (*write)(struct acpi_pct_register *reg, u32 val);
307 u32 (*read)(struct acpi_pct_register *reg);
311 /* Called via smp_call_function_single(), on the target CPU */
312 static void do_drv_read(void *_cmd)
314 struct drv_cmd *cmd = _cmd;
316 cmd->val = cmd->func.read(cmd->reg);
319 static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
321 struct acpi_processor_performance *perf = to_perf_data(data);
322 struct drv_cmd cmd = {
323 .reg = &perf->control_register,
324 .func.read = data->cpu_freq_read,
328 err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
329 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
333 /* Called via smp_call_function_many(), on the target CPUs */
334 static void do_drv_write(void *_cmd)
336 struct drv_cmd *cmd = _cmd;
338 cmd->func.write(cmd->reg, cmd->val);
341 static void drv_write(struct acpi_cpufreq_data *data,
342 const struct cpumask *mask, u32 val)
344 struct acpi_processor_performance *perf = to_perf_data(data);
345 struct drv_cmd cmd = {
346 .reg = &perf->control_register,
348 .func.write = data->cpu_freq_write,
352 this_cpu = get_cpu();
353 if (cpumask_test_cpu(this_cpu, mask))
356 smp_call_function_many(mask, do_drv_write, &cmd, 1);
360 static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
364 if (unlikely(cpumask_empty(mask)))
367 val = drv_read(data, mask);
369 pr_debug("%s = %u\n", __func__, val);
374 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
376 struct acpi_cpufreq_data *data;
377 struct cpufreq_policy *policy;
379 unsigned int cached_freq;
381 pr_debug("%s (%d)\n", __func__, cpu);
383 policy = cpufreq_cpu_get_raw(cpu);
384 if (unlikely(!policy))
387 data = policy->driver_data;
388 if (unlikely(!data || !policy->freq_table))
391 cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
392 freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
393 if (freq != cached_freq) {
395 * The dreaded BIOS frequency change behind our back.
396 * Force set the frequency on next target call.
401 pr_debug("cur freq = %u\n", freq);
406 static unsigned int check_freqs(struct cpufreq_policy *policy,
407 const struct cpumask *mask, unsigned int freq)
409 struct acpi_cpufreq_data *data = policy->driver_data;
410 unsigned int cur_freq;
413 for (i = 0; i < 100; i++) {
414 cur_freq = extract_freq(policy, get_cur_val(mask, data));
415 if (cur_freq == freq)
422 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
425 struct acpi_cpufreq_data *data = policy->driver_data;
426 struct acpi_processor_performance *perf;
427 const struct cpumask *mask;
428 unsigned int next_perf_state = 0; /* Index into perf table */
431 if (unlikely(!data)) {
435 perf = to_perf_data(data);
436 next_perf_state = policy->freq_table[index].driver_data;
437 if (perf->state == next_perf_state) {
438 if (unlikely(data->resume)) {
439 pr_debug("Called after resume, resetting to P%d\n",
443 pr_debug("Already at target state (P%d)\n",
450 * The core won't allow CPUs to go away until the governor has been
451 * stopped, so we can rely on the stability of policy->cpus.
453 mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
454 cpumask_of(policy->cpu) : policy->cpus;
456 drv_write(data, mask, perf->states[next_perf_state].control);
458 if (acpi_pstate_strict) {
459 if (!check_freqs(policy, mask,
460 policy->freq_table[index].frequency)) {
461 pr_debug("%s (%d)\n", __func__, policy->cpu);
467 perf->state = next_perf_state;
472 static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
473 unsigned int target_freq)
475 struct acpi_cpufreq_data *data = policy->driver_data;
476 struct acpi_processor_performance *perf;
477 struct cpufreq_frequency_table *entry;
478 unsigned int next_perf_state, next_freq, index;
481 * Find the closest frequency above target_freq.
483 if (policy->cached_target_freq == target_freq)
484 index = policy->cached_resolved_idx;
486 index = cpufreq_table_find_index_dl(policy, target_freq);
488 entry = &policy->freq_table[index];
489 next_freq = entry->frequency;
490 next_perf_state = entry->driver_data;
492 perf = to_perf_data(data);
493 if (perf->state == next_perf_state) {
494 if (unlikely(data->resume))
500 data->cpu_freq_write(&perf->control_register,
501 perf->states[next_perf_state].control);
502 perf->state = next_perf_state;
507 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
509 struct acpi_processor_performance *perf;
511 perf = to_perf_data(data);
513 /* search the closest match to cpu_khz */
516 unsigned long freqn = perf->states[0].core_frequency * 1000;
518 for (i = 0; i < (perf->state_count-1); i++) {
520 freqn = perf->states[i+1].core_frequency * 1000;
521 if ((2 * cpu_khz) > (freqn + freq)) {
526 perf->state = perf->state_count-1;
529 /* assume CPU is at P0... */
531 return perf->states[0].core_frequency * 1000;
535 static void free_acpi_perf_data(void)
539 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
540 for_each_possible_cpu(i)
541 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
543 free_percpu(acpi_perf_data);
546 static int cpufreq_boost_online(unsigned int cpu)
549 * On the CPU_UP path we simply keep the boost-disable flag
550 * in sync with the current global state.
552 return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
555 static int cpufreq_boost_down_prep(unsigned int cpu)
558 * Clear the boost-disable bit on the CPU_DOWN path so that
559 * this cpu cannot block the remaining ones from boosting.
561 return boost_set_msr(1);
565 * acpi_cpufreq_early_init - initialize ACPI P-States library
567 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
568 * in order to determine correct frequency and voltage pairings. We can
569 * do _PDC and _PSD and find out the processor dependency for the
570 * actual init that will happen later...
572 static int __init acpi_cpufreq_early_init(void)
575 pr_debug("%s\n", __func__);
577 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
578 if (!acpi_perf_data) {
579 pr_debug("Memory allocation error for acpi_perf_data.\n");
582 for_each_possible_cpu(i) {
583 if (!zalloc_cpumask_var_node(
584 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
585 GFP_KERNEL, cpu_to_node(i))) {
587 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
588 free_acpi_perf_data();
593 /* Do initialization in ACPI core */
594 acpi_processor_preregister_performance(acpi_perf_data);
600 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
601 * or do it in BIOS firmware and won't inform about it to OS. If not
602 * detected, this has a side effect of making CPU run at a different speed
603 * than OS intended it to run at. Detect it and handle it cleanly.
605 static int bios_with_sw_any_bug;
607 static int sw_any_bug_found(const struct dmi_system_id *d)
609 bios_with_sw_any_bug = 1;
613 static const struct dmi_system_id sw_any_bug_dmi_table[] = {
615 .callback = sw_any_bug_found,
616 .ident = "Supermicro Server X6DLP",
618 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
619 DMI_MATCH(DMI_BIOS_VERSION, "080010"),
620 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
626 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
628 /* Intel Xeon Processor 7100 Series Specification Update
629 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
630 * AL30: A Machine Check Exception (MCE) Occurring during an
631 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
632 * Both Processor Cores to Lock Up. */
633 if (c->x86_vendor == X86_VENDOR_INTEL) {
634 if ((c->x86 == 15) &&
635 (c->x86_model == 6) &&
636 (c->x86_stepping == 8)) {
637 pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
645 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
648 unsigned int valid_states = 0;
649 unsigned int cpu = policy->cpu;
650 struct acpi_cpufreq_data *data;
651 unsigned int result = 0;
652 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
653 struct acpi_processor_performance *perf;
654 struct cpufreq_frequency_table *freq_table;
656 static int blacklisted;
659 pr_debug("%s\n", __func__);
664 blacklisted = acpi_cpufreq_blacklist(c);
669 data = kzalloc(sizeof(*data), GFP_KERNEL);
673 if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
678 perf = per_cpu_ptr(acpi_perf_data, cpu);
679 data->acpi_perf_cpu = cpu;
680 policy->driver_data = data;
682 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
683 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
685 result = acpi_processor_register_performance(perf, cpu);
689 policy->shared_type = perf->shared_type;
692 * Will let policy->cpus know about dependency only when software
693 * coordination is required.
695 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
696 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
697 cpumask_copy(policy->cpus, perf->shared_cpu_map);
699 cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
702 dmi_check_system(sw_any_bug_dmi_table);
703 if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
704 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
705 cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
708 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
709 cpumask_clear(policy->cpus);
710 cpumask_set_cpu(cpu, policy->cpus);
711 cpumask_copy(data->freqdomain_cpus,
712 topology_sibling_cpumask(cpu));
713 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
714 pr_info_once("overriding BIOS provided _PSD data\n");
718 /* capability check */
719 if (perf->state_count <= 1) {
720 pr_debug("No P-States\n");
725 if (perf->control_register.space_id != perf->status_register.space_id) {
730 switch (perf->control_register.space_id) {
731 case ACPI_ADR_SPACE_SYSTEM_IO:
732 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
733 boot_cpu_data.x86 == 0xf) {
734 pr_debug("AMD K8 systems must use native drivers.\n");
738 pr_debug("SYSTEM IO addr space\n");
739 data->cpu_feature = SYSTEM_IO_CAPABLE;
740 data->cpu_freq_read = cpu_freq_read_io;
741 data->cpu_freq_write = cpu_freq_write_io;
743 case ACPI_ADR_SPACE_FIXED_HARDWARE:
744 pr_debug("HARDWARE addr space\n");
745 if (check_est_cpu(cpu)) {
746 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
747 data->cpu_freq_read = cpu_freq_read_intel;
748 data->cpu_freq_write = cpu_freq_write_intel;
751 if (check_amd_hwpstate_cpu(cpu)) {
752 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
753 data->cpu_freq_read = cpu_freq_read_amd;
754 data->cpu_freq_write = cpu_freq_write_amd;
760 pr_debug("Unknown addr space %d\n",
761 (u32) (perf->control_register.space_id));
766 freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
773 /* detect transition latency */
774 policy->cpuinfo.transition_latency = 0;
775 for (i = 0; i < perf->state_count; i++) {
776 if ((perf->states[i].transition_latency * 1000) >
777 policy->cpuinfo.transition_latency)
778 policy->cpuinfo.transition_latency =
779 perf->states[i].transition_latency * 1000;
782 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
783 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
784 policy->cpuinfo.transition_latency > 20 * 1000) {
785 policy->cpuinfo.transition_latency = 20 * 1000;
786 pr_info_once("P-state transition latency capped at 20 uS\n");
790 for (i = 0; i < perf->state_count; i++) {
791 if (i > 0 && perf->states[i].core_frequency >=
792 freq_table[valid_states-1].frequency / 1000)
795 freq_table[valid_states].driver_data = i;
796 freq_table[valid_states].frequency =
797 perf->states[i].core_frequency * 1000;
800 freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
801 policy->freq_table = freq_table;
804 switch (perf->control_register.space_id) {
805 case ACPI_ADR_SPACE_SYSTEM_IO:
807 * The core will not set policy->cur, because
808 * cpufreq_driver->get is NULL, so we need to set it here.
809 * However, we have to guess it, because the current speed is
810 * unknown and not detectable via IO ports.
812 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
814 case ACPI_ADR_SPACE_FIXED_HARDWARE:
815 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
821 /* notify BIOS that we exist */
822 acpi_processor_notify_smm(THIS_MODULE);
824 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
825 for (i = 0; i < perf->state_count; i++)
826 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
827 (i == perf->state ? '*' : ' '), i,
828 (u32) perf->states[i].core_frequency,
829 (u32) perf->states[i].power,
830 (u32) perf->states[i].transition_latency);
833 * the first call to ->target() should result in us actually
834 * writing something to the appropriate registers.
838 policy->fast_switch_possible = !acpi_pstate_strict &&
839 !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
844 acpi_processor_unregister_performance(cpu);
846 free_cpumask_var(data->freqdomain_cpus);
849 policy->driver_data = NULL;
854 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
856 struct acpi_cpufreq_data *data = policy->driver_data;
858 pr_debug("%s\n", __func__);
860 policy->fast_switch_possible = false;
861 policy->driver_data = NULL;
862 acpi_processor_unregister_performance(data->acpi_perf_cpu);
863 free_cpumask_var(data->freqdomain_cpus);
864 kfree(policy->freq_table);
870 static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
872 struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
875 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
876 pr_warn(FW_WARN "P-state 0 is not max freq\n");
879 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
881 struct acpi_cpufreq_data *data = policy->driver_data;
883 pr_debug("%s\n", __func__);
890 static struct freq_attr *acpi_cpufreq_attr[] = {
891 &cpufreq_freq_attr_scaling_available_freqs,
893 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
899 static struct cpufreq_driver acpi_cpufreq_driver = {
900 .verify = cpufreq_generic_frequency_table_verify,
901 .target_index = acpi_cpufreq_target,
902 .fast_switch = acpi_cpufreq_fast_switch,
903 .bios_limit = acpi_processor_get_bios_limit,
904 .init = acpi_cpufreq_cpu_init,
905 .exit = acpi_cpufreq_cpu_exit,
906 .ready = acpi_cpufreq_cpu_ready,
907 .resume = acpi_cpufreq_resume,
908 .name = "acpi-cpufreq",
909 .attr = acpi_cpufreq_attr,
912 static enum cpuhp_state acpi_cpufreq_online;
914 static void __init acpi_cpufreq_boost_init(void)
918 if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
919 pr_debug("Boost capabilities not present in the processor\n");
923 acpi_cpufreq_driver.set_boost = set_boost;
924 acpi_cpufreq_driver.boost_enabled = boost_state(0);
927 * This calls the online callback on all online cpu and forces all
928 * MSRs to the same value.
930 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
931 cpufreq_boost_online, cpufreq_boost_down_prep);
933 pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
936 acpi_cpufreq_online = ret;
939 static void acpi_cpufreq_boost_exit(void)
941 if (acpi_cpufreq_online > 0)
942 cpuhp_remove_state_nocalls(acpi_cpufreq_online);
945 static int __init acpi_cpufreq_init(void)
952 /* don't keep reloading if cpufreq_driver exists */
953 if (cpufreq_get_current_driver())
956 pr_debug("%s\n", __func__);
958 ret = acpi_cpufreq_early_init();
962 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
963 /* this is a sysfs file with a strange name and an even stranger
964 * semantic - per CPU instantiation, but system global effect.
965 * Lets enable it only on AMD CPUs for compatibility reasons and
966 * only if configured. This is considered legacy code, which
967 * will probably be removed at some point in the future.
969 if (!check_amd_hwpstate_cpu(0)) {
970 struct freq_attr **attr;
972 pr_debug("CPB unsupported, do not expose it\n");
974 for (attr = acpi_cpufreq_attr; *attr; attr++)
981 acpi_cpufreq_boost_init();
983 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
985 free_acpi_perf_data();
986 acpi_cpufreq_boost_exit();
991 static void __exit acpi_cpufreq_exit(void)
993 pr_debug("%s\n", __func__);
995 acpi_cpufreq_boost_exit();
997 cpufreq_unregister_driver(&acpi_cpufreq_driver);
999 free_acpi_perf_data();
1002 module_param(acpi_pstate_strict, uint, 0644);
1003 MODULE_PARM_DESC(acpi_pstate_strict,
1004 "value 0 or non-zero. non-zero -> strict ACPI checks are "
1005 "performed during frequency changes.");
1007 late_initcall(acpi_cpufreq_init);
1008 module_exit(acpi_cpufreq_exit);
1010 static const struct x86_cpu_id acpi_cpufreq_ids[] = {
1011 X86_FEATURE_MATCH(X86_FEATURE_ACPI),
1012 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
1015 MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
1017 static const struct acpi_device_id processor_device_ids[] = {
1018 {ACPI_PROCESSOR_OBJECT_HID, },
1019 {ACPI_PROCESSOR_DEVICE_HID, },
1022 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
1024 MODULE_ALIAS("acpi");