]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/platform/x86/intel-uncore-frequency.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux.git] / drivers / platform / x86 / intel-uncore-frequency.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Uncore Frequency Setting
4  * Copyright (c) 2019, Intel Corporation.
5  * All rights reserved.
6  *
7  * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
8  * one control CPU is identified per die to read/write limit. This control CPU
9  * is changed, if the CPU state is changed to offline. When the last CPU is
10  * offline in a die then remove the sysfs object for that die.
11  * The majority of actual code is related to sysfs create and read/write
12  * attributes.
13  *
14  * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
15  */
16
17 #include <linux/cpu.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/suspend.h>
21 #include <asm/cpu_device_id.h>
22 #include <asm/intel-family.h>
23
24 #define MSR_UNCORE_RATIO_LIMIT                  0x620
25 #define UNCORE_FREQ_KHZ_MULTIPLIER              100000
26
27 /**
28  * struct uncore_data - Encapsulate all uncore data
29  * @stored_uncore_data: Last user changed MSR 620 value, which will be restored
30  *                      on system resume.
31  * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init
32  * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init
33  * @control_cpu:        Designated CPU for a die to read/write
34  * @valid:              Mark the data valid/invalid
35  *
36  * This structure is used to encapsulate all data related to uncore sysfs
37  * settings for a die/package.
38  */
39 struct uncore_data {
40         struct kobject kobj;
41         u64 stored_uncore_data;
42         u32 initial_min_freq_khz;
43         u32 initial_max_freq_khz;
44         int control_cpu;
45         bool valid;
46 };
47
48 #define to_uncore_data(a) container_of(a, struct uncore_data, kobj)
49
50 /* Max instances for uncore data, one for each die */
51 static int uncore_max_entries __read_mostly;
52 /* Storage for uncore data for all instances */
53 static struct uncore_data *uncore_instances;
54 /* Root of the all uncore sysfs kobjs */
55 struct kobject uncore_root_kobj;
56 /* Stores the CPU mask of the target CPUs to use during uncore read/write */
57 static cpumask_t uncore_cpu_mask;
58 /* CPU online callback register instance */
59 static enum cpuhp_state uncore_hp_state __read_mostly;
60 /* Mutex to control all mutual exclusions */
61 static DEFINE_MUTEX(uncore_lock);
62
63 struct uncore_attr {
64         struct attribute attr;
65         ssize_t (*show)(struct kobject *kobj,
66                         struct attribute *attr, char *buf);
67         ssize_t (*store)(struct kobject *kobj,
68                          struct attribute *attr, const char *c, ssize_t count);
69 };
70
71 #define define_one_uncore_ro(_name) \
72 static struct uncore_attr _name = \
73 __ATTR(_name, 0444, show_##_name, NULL)
74
75 #define define_one_uncore_rw(_name) \
76 static struct uncore_attr _name = \
77 __ATTR(_name, 0644, show_##_name, store_##_name)
78
79 #define show_uncore_data(member_name)                                   \
80         static ssize_t show_##member_name(struct kobject *kobj,         \
81                                           struct attribute *attr,       \
82                                           char *buf)                    \
83         {                                                               \
84                 struct uncore_data *data = to_uncore_data(kobj);        \
85                 return scnprintf(buf, PAGE_SIZE, "%u\n",                \
86                                  data->member_name);                    \
87         }                                                               \
88         define_one_uncore_ro(member_name)
89
90 show_uncore_data(initial_min_freq_khz);
91 show_uncore_data(initial_max_freq_khz);
92
93 /* Common function to read MSR 0x620 and read min/max */
94 static int uncore_read_ratio(struct uncore_data *data, unsigned int *min,
95                              unsigned int *max)
96 {
97         u64 cap;
98         int ret;
99
100         ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
101         if (ret)
102                 return ret;
103
104         *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
105         *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
106
107         return 0;
108 }
109
110 /* Common function to set min/max ratios to be used by sysfs callbacks */
111 static int uncore_write_ratio(struct uncore_data *data, unsigned int input,
112                               int set_max)
113 {
114         int ret;
115         u64 cap;
116
117         mutex_lock(&uncore_lock);
118
119         input /= UNCORE_FREQ_KHZ_MULTIPLIER;
120         if (!input || input > 0x7F) {
121                 ret = -EINVAL;
122                 goto finish_write;
123         }
124
125         ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
126         if (ret)
127                 goto finish_write;
128
129         if (set_max) {
130                 cap &= ~0x7F;
131                 cap |= input;
132         } else  {
133                 cap &= ~GENMASK(14, 8);
134                 cap |= (input << 8);
135         }
136
137         ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
138         if (ret)
139                 goto finish_write;
140
141         data->stored_uncore_data = cap;
142
143 finish_write:
144         mutex_unlock(&uncore_lock);
145
146         return ret;
147 }
148
149 static ssize_t store_min_max_freq_khz(struct kobject *kobj,
150                                       struct attribute *attr,
151                                       const char *buf, ssize_t count,
152                                       int min_max)
153 {
154         struct uncore_data *data = to_uncore_data(kobj);
155         unsigned int input;
156
157         if (kstrtouint(buf, 10, &input))
158                 return -EINVAL;
159
160         uncore_write_ratio(data, input, min_max);
161
162         return count;
163 }
164
165 static ssize_t show_min_max_freq_khz(struct kobject *kobj,
166                                      struct attribute *attr,
167                                      char *buf, int min_max)
168 {
169         struct uncore_data *data = to_uncore_data(kobj);
170         unsigned int min, max;
171         int ret;
172
173         mutex_lock(&uncore_lock);
174         ret = uncore_read_ratio(data, &min, &max);
175         mutex_unlock(&uncore_lock);
176         if (ret)
177                 return ret;
178
179         if (min_max)
180                 return sprintf(buf, "%u\n", max);
181
182         return sprintf(buf, "%u\n", min);
183 }
184
185 #define store_uncore_min_max(name, min_max)                             \
186         static ssize_t store_##name(struct kobject *kobj,               \
187                                     struct attribute *attr,             \
188                                     const char *buf, ssize_t count)     \
189         {                                                               \
190                                                                         \
191                 return store_min_max_freq_khz(kobj, attr, buf, count,   \
192                                               min_max);                 \
193         }
194
195 #define show_uncore_min_max(name, min_max)                              \
196         static ssize_t show_##name(struct kobject *kobj,                \
197                                    struct attribute *attr, char *buf)   \
198         {                                                               \
199                                                                         \
200                 return show_min_max_freq_khz(kobj, attr, buf, min_max); \
201         }
202
203 store_uncore_min_max(min_freq_khz, 0);
204 store_uncore_min_max(max_freq_khz, 1);
205
206 show_uncore_min_max(min_freq_khz, 0);
207 show_uncore_min_max(max_freq_khz, 1);
208
209 define_one_uncore_rw(min_freq_khz);
210 define_one_uncore_rw(max_freq_khz);
211
212 static struct attribute *uncore_attrs[] = {
213         &initial_min_freq_khz.attr,
214         &initial_max_freq_khz.attr,
215         &max_freq_khz.attr,
216         &min_freq_khz.attr,
217         NULL
218 };
219
220 static struct kobj_type uncore_ktype = {
221         .sysfs_ops = &kobj_sysfs_ops,
222         .default_attrs = uncore_attrs,
223 };
224
225 static struct kobj_type uncore_root_ktype = {
226         .sysfs_ops = &kobj_sysfs_ops,
227 };
228
229 /* Caller provides protection */
230 static struct uncore_data *uncore_get_instance(unsigned int cpu)
231 {
232         int id = topology_logical_die_id(cpu);
233
234         if (id >= 0 && id < uncore_max_entries)
235                 return &uncore_instances[id];
236
237         return NULL;
238 }
239
240 static void uncore_add_die_entry(int cpu)
241 {
242         struct uncore_data *data;
243
244         mutex_lock(&uncore_lock);
245         data = uncore_get_instance(cpu);
246         if (!data) {
247                 mutex_unlock(&uncore_lock);
248                 return;
249         }
250
251         if (data->valid) {
252                 /* control cpu changed */
253                 data->control_cpu = cpu;
254         } else {
255                 char str[64];
256                 int ret;
257
258                 memset(data, 0, sizeof(*data));
259                 sprintf(str, "package_%02d_die_%02d",
260                         topology_physical_package_id(cpu),
261                         topology_die_id(cpu));
262
263                 uncore_read_ratio(data, &data->initial_min_freq_khz,
264                                   &data->initial_max_freq_khz);
265
266                 ret = kobject_init_and_add(&data->kobj, &uncore_ktype,
267                                            &uncore_root_kobj, str);
268                 if (!ret) {
269                         data->control_cpu = cpu;
270                         data->valid = true;
271                 }
272         }
273         mutex_unlock(&uncore_lock);
274 }
275
276 /* Last CPU in this die is offline, so remove sysfs entries */
277 static void uncore_remove_die_entry(int cpu)
278 {
279         struct uncore_data *data;
280
281         mutex_lock(&uncore_lock);
282         data = uncore_get_instance(cpu);
283         if (data) {
284                 kobject_put(&data->kobj);
285                 data->control_cpu = -1;
286                 data->valid = false;
287         }
288         mutex_unlock(&uncore_lock);
289 }
290
291 static int uncore_event_cpu_online(unsigned int cpu)
292 {
293         int target;
294
295         /* Check if there is an online cpu in the package for uncore MSR */
296         target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
297         if (target < nr_cpu_ids)
298                 return 0;
299
300         /* Use this CPU on this die as a control CPU */
301         cpumask_set_cpu(cpu, &uncore_cpu_mask);
302         uncore_add_die_entry(cpu);
303
304         return 0;
305 }
306
307 static int uncore_event_cpu_offline(unsigned int cpu)
308 {
309         int target;
310
311         /* Check if existing cpu is used for uncore MSRs */
312         if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
313                 return 0;
314
315         /* Find a new cpu to set uncore MSR */
316         target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
317
318         if (target < nr_cpu_ids) {
319                 cpumask_set_cpu(target, &uncore_cpu_mask);
320                 uncore_add_die_entry(target);
321         } else {
322                 uncore_remove_die_entry(cpu);
323         }
324
325         return 0;
326 }
327
328 static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
329                             void *_unused)
330 {
331         int cpu;
332
333         switch (mode) {
334         case PM_POST_HIBERNATION:
335         case PM_POST_RESTORE:
336         case PM_POST_SUSPEND:
337                 for_each_cpu(cpu, &uncore_cpu_mask) {
338                         struct uncore_data *data;
339                         int ret;
340
341                         data = uncore_get_instance(cpu);
342                         if (!data || !data->valid || !data->stored_uncore_data)
343                                 continue;
344
345                         ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT,
346                                             data->stored_uncore_data);
347                         if (ret)
348                                 return ret;
349                 }
350                 break;
351         default:
352                 break;
353         }
354         return 0;
355 }
356
357 static struct notifier_block uncore_pm_nb = {
358         .notifier_call = uncore_pm_notify,
359 };
360
361 #define ICPU(model)     { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
362
363 static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
364         ICPU(INTEL_FAM6_BROADWELL_G),
365         ICPU(INTEL_FAM6_BROADWELL_X),
366         ICPU(INTEL_FAM6_BROADWELL_D),
367         ICPU(INTEL_FAM6_SKYLAKE_X),
368         ICPU(INTEL_FAM6_ICELAKE_X),
369         ICPU(INTEL_FAM6_ICELAKE_D),
370         {}
371 };
372
373 static int __init intel_uncore_init(void)
374 {
375         const struct x86_cpu_id *id;
376         int ret;
377
378         id = x86_match_cpu(intel_uncore_cpu_ids);
379         if (!id)
380                 return -ENODEV;
381
382         uncore_max_entries = topology_max_packages() *
383                                         topology_max_die_per_package();
384         uncore_instances = kcalloc(uncore_max_entries,
385                                    sizeof(*uncore_instances), GFP_KERNEL);
386         if (!uncore_instances)
387                 return -ENOMEM;
388
389         ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype,
390                                    &cpu_subsys.dev_root->kobj,
391                                    "intel_uncore_frequency");
392         if (ret)
393                 goto err_free;
394
395         ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
396                                 "platform/x86/uncore-freq:online",
397                                 uncore_event_cpu_online,
398                                 uncore_event_cpu_offline);
399         if (ret < 0)
400                 goto err_rem_kobj;
401
402         uncore_hp_state = ret;
403
404         ret = register_pm_notifier(&uncore_pm_nb);
405         if (ret)
406                 goto err_rem_state;
407
408         return 0;
409
410 err_rem_state:
411         cpuhp_remove_state(uncore_hp_state);
412 err_rem_kobj:
413         kobject_put(&uncore_root_kobj);
414 err_free:
415         kfree(uncore_instances);
416
417         return ret;
418 }
419 module_init(intel_uncore_init)
420
421 static void __exit intel_uncore_exit(void)
422 {
423         int i;
424
425         unregister_pm_notifier(&uncore_pm_nb);
426         cpuhp_remove_state(uncore_hp_state);
427         for (i = 0; i < uncore_max_entries; ++i) {
428                 if (uncore_instances[i].valid)
429                         kobject_put(&uncore_instances[i].kobj);
430         }
431         kobject_put(&uncore_root_kobj);
432         kfree(uncore_instances);
433 }
434 module_exit(intel_uncore_exit)
435
436 MODULE_LICENSE("GPL v2");
437 MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");