]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/events/amd/uncore.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / arch / x86 / events / amd / uncore.c
1 /*
2  * Copyright (C) 2013 Advanced Micro Devices, Inc.
3  *
4  * Author: Jacob Shin <jacob.shin@amd.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
18
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
21 #include <asm/msr.h>
22
23 #define NUM_COUNTERS_NB         4
24 #define NUM_COUNTERS_L2         4
25 #define NUM_COUNTERS_L3         6
26 #define MAX_COUNTERS            6
27
28 #define RDPMC_BASE_NB           6
29 #define RDPMC_BASE_LLC          10
30
31 #define COUNTER_SHIFT           16
32
33 static int num_counters_llc;
34 static int num_counters_nb;
35
36 static HLIST_HEAD(uncore_unused_list);
37
38 struct amd_uncore {
39         int id;
40         int refcnt;
41         int cpu;
42         int num_counters;
43         int rdpmc_base;
44         u32 msr_base;
45         cpumask_t *active_mask;
46         struct pmu *pmu;
47         struct perf_event *events[MAX_COUNTERS];
48         struct hlist_node node;
49 };
50
51 static struct amd_uncore * __percpu *amd_uncore_nb;
52 static struct amd_uncore * __percpu *amd_uncore_llc;
53
54 static struct pmu amd_nb_pmu;
55 static struct pmu amd_llc_pmu;
56
57 static cpumask_t amd_nb_active_mask;
58 static cpumask_t amd_llc_active_mask;
59
60 static bool is_nb_event(struct perf_event *event)
61 {
62         return event->pmu->type == amd_nb_pmu.type;
63 }
64
65 static bool is_llc_event(struct perf_event *event)
66 {
67         return event->pmu->type == amd_llc_pmu.type;
68 }
69
70 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
71 {
72         if (is_nb_event(event) && amd_uncore_nb)
73                 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
74         else if (is_llc_event(event) && amd_uncore_llc)
75                 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
76
77         return NULL;
78 }
79
80 static void amd_uncore_read(struct perf_event *event)
81 {
82         struct hw_perf_event *hwc = &event->hw;
83         u64 prev, new;
84         s64 delta;
85
86         /*
87          * since we do not enable counter overflow interrupts,
88          * we do not have to worry about prev_count changing on us
89          */
90
91         prev = local64_read(&hwc->prev_count);
92         rdpmcl(hwc->event_base_rdpmc, new);
93         local64_set(&hwc->prev_count, new);
94         delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
95         delta >>= COUNTER_SHIFT;
96         local64_add(delta, &event->count);
97 }
98
99 static void amd_uncore_start(struct perf_event *event, int flags)
100 {
101         struct hw_perf_event *hwc = &event->hw;
102
103         if (flags & PERF_EF_RELOAD)
104                 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
105
106         hwc->state = 0;
107         wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
108         perf_event_update_userpage(event);
109 }
110
111 static void amd_uncore_stop(struct perf_event *event, int flags)
112 {
113         struct hw_perf_event *hwc = &event->hw;
114
115         wrmsrl(hwc->config_base, hwc->config);
116         hwc->state |= PERF_HES_STOPPED;
117
118         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
119                 amd_uncore_read(event);
120                 hwc->state |= PERF_HES_UPTODATE;
121         }
122 }
123
124 static int amd_uncore_add(struct perf_event *event, int flags)
125 {
126         int i;
127         struct amd_uncore *uncore = event_to_amd_uncore(event);
128         struct hw_perf_event *hwc = &event->hw;
129
130         /* are we already assigned? */
131         if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
132                 goto out;
133
134         for (i = 0; i < uncore->num_counters; i++) {
135                 if (uncore->events[i] == event) {
136                         hwc->idx = i;
137                         goto out;
138                 }
139         }
140
141         /* if not, take the first available counter */
142         hwc->idx = -1;
143         for (i = 0; i < uncore->num_counters; i++) {
144                 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
145                         hwc->idx = i;
146                         break;
147                 }
148         }
149
150 out:
151         if (hwc->idx == -1)
152                 return -EBUSY;
153
154         hwc->config_base = uncore->msr_base + (2 * hwc->idx);
155         hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
156         hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
157         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
158
159         if (flags & PERF_EF_START)
160                 amd_uncore_start(event, PERF_EF_RELOAD);
161
162         return 0;
163 }
164
165 static void amd_uncore_del(struct perf_event *event, int flags)
166 {
167         int i;
168         struct amd_uncore *uncore = event_to_amd_uncore(event);
169         struct hw_perf_event *hwc = &event->hw;
170
171         amd_uncore_stop(event, PERF_EF_UPDATE);
172
173         for (i = 0; i < uncore->num_counters; i++) {
174                 if (cmpxchg(&uncore->events[i], event, NULL) == event)
175                         break;
176         }
177
178         hwc->idx = -1;
179 }
180
181 static int amd_uncore_event_init(struct perf_event *event)
182 {
183         struct amd_uncore *uncore;
184         struct hw_perf_event *hwc = &event->hw;
185
186         if (event->attr.type != event->pmu->type)
187                 return -ENOENT;
188
189         /*
190          * NB and Last level cache counters (MSRs) are shared across all cores
191          * that share the same NB / Last level cache. Interrupts can be directed
192          * to a single target core, however, event counts generated by processes
193          * running on other cores cannot be masked out. So we do not support
194          * sampling and per-thread events.
195          */
196         if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
197                 return -EINVAL;
198
199         /* NB and Last level cache counters do not have usr/os/guest/host bits */
200         if (event->attr.exclude_user || event->attr.exclude_kernel ||
201             event->attr.exclude_host || event->attr.exclude_guest)
202                 return -EINVAL;
203
204         /* and we do not enable counter overflow interrupts */
205         hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
206         hwc->idx = -1;
207
208         if (event->cpu < 0)
209                 return -EINVAL;
210
211         uncore = event_to_amd_uncore(event);
212         if (!uncore)
213                 return -ENODEV;
214
215         /*
216          * since request can come in to any of the shared cores, we will remap
217          * to a single common cpu.
218          */
219         event->cpu = uncore->cpu;
220
221         return 0;
222 }
223
224 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
225                                             struct device_attribute *attr,
226                                             char *buf)
227 {
228         cpumask_t *active_mask;
229         struct pmu *pmu = dev_get_drvdata(dev);
230
231         if (pmu->type == amd_nb_pmu.type)
232                 active_mask = &amd_nb_active_mask;
233         else if (pmu->type == amd_llc_pmu.type)
234                 active_mask = &amd_llc_active_mask;
235         else
236                 return 0;
237
238         return cpumap_print_to_pagebuf(true, buf, active_mask);
239 }
240 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
241
242 static struct attribute *amd_uncore_attrs[] = {
243         &dev_attr_cpumask.attr,
244         NULL,
245 };
246
247 static struct attribute_group amd_uncore_attr_group = {
248         .attrs = amd_uncore_attrs,
249 };
250
251 /*
252  * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
253  * on family
254  */
255 #define AMD_FORMAT_ATTR(_dev, _name, _format)                                \
256 static ssize_t                                                               \
257 _dev##_show##_name(struct device *dev,                                       \
258                 struct device_attribute *attr,                               \
259                 char *page)                                                  \
260 {                                                                            \
261         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                          \
262         return sprintf(page, _format "\n");                                  \
263 }                                                                            \
264 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
265
266 /* Used for each uncore counter type */
267 #define AMD_ATTRIBUTE(_name)                                                 \
268 static struct attribute *amd_uncore_format_attr_##_name[] = {                \
269         &format_attr_event_##_name.attr,                                     \
270         &format_attr_umask.attr,                                             \
271         NULL,                                                                \
272 };                                                                           \
273 static struct attribute_group amd_uncore_format_group_##_name = {            \
274         .name = "format",                                                    \
275         .attrs = amd_uncore_format_attr_##_name,                             \
276 };                                                                           \
277 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = {    \
278         &amd_uncore_attr_group,                                              \
279         &amd_uncore_format_group_##_name,                                    \
280         NULL,                                                                \
281 };
282
283 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
284 AMD_FORMAT_ATTR(umask, , "config:8-15");
285 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
286 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
287 AMD_ATTRIBUTE(df);
288 AMD_ATTRIBUTE(l3);
289
290 static struct pmu amd_nb_pmu = {
291         .task_ctx_nr    = perf_invalid_context,
292         .event_init     = amd_uncore_event_init,
293         .add            = amd_uncore_add,
294         .del            = amd_uncore_del,
295         .start          = amd_uncore_start,
296         .stop           = amd_uncore_stop,
297         .read           = amd_uncore_read,
298 };
299
300 static struct pmu amd_llc_pmu = {
301         .task_ctx_nr    = perf_invalid_context,
302         .event_init     = amd_uncore_event_init,
303         .add            = amd_uncore_add,
304         .del            = amd_uncore_del,
305         .start          = amd_uncore_start,
306         .stop           = amd_uncore_stop,
307         .read           = amd_uncore_read,
308 };
309
310 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
311 {
312         return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
313                         cpu_to_node(cpu));
314 }
315
316 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
317 {
318         struct amd_uncore *uncore_nb = NULL, *uncore_llc;
319
320         if (amd_uncore_nb) {
321                 uncore_nb = amd_uncore_alloc(cpu);
322                 if (!uncore_nb)
323                         goto fail;
324                 uncore_nb->cpu = cpu;
325                 uncore_nb->num_counters = num_counters_nb;
326                 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
327                 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
328                 uncore_nb->active_mask = &amd_nb_active_mask;
329                 uncore_nb->pmu = &amd_nb_pmu;
330                 uncore_nb->id = -1;
331                 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
332         }
333
334         if (amd_uncore_llc) {
335                 uncore_llc = amd_uncore_alloc(cpu);
336                 if (!uncore_llc)
337                         goto fail;
338                 uncore_llc->cpu = cpu;
339                 uncore_llc->num_counters = num_counters_llc;
340                 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
341                 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
342                 uncore_llc->active_mask = &amd_llc_active_mask;
343                 uncore_llc->pmu = &amd_llc_pmu;
344                 uncore_llc->id = -1;
345                 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
346         }
347
348         return 0;
349
350 fail:
351         if (amd_uncore_nb)
352                 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
353         kfree(uncore_nb);
354         return -ENOMEM;
355 }
356
357 static struct amd_uncore *
358 amd_uncore_find_online_sibling(struct amd_uncore *this,
359                                struct amd_uncore * __percpu *uncores)
360 {
361         unsigned int cpu;
362         struct amd_uncore *that;
363
364         for_each_online_cpu(cpu) {
365                 that = *per_cpu_ptr(uncores, cpu);
366
367                 if (!that)
368                         continue;
369
370                 if (this == that)
371                         continue;
372
373                 if (this->id == that->id) {
374                         hlist_add_head(&this->node, &uncore_unused_list);
375                         this = that;
376                         break;
377                 }
378         }
379
380         this->refcnt++;
381         return this;
382 }
383
384 static int amd_uncore_cpu_starting(unsigned int cpu)
385 {
386         unsigned int eax, ebx, ecx, edx;
387         struct amd_uncore *uncore;
388
389         if (amd_uncore_nb) {
390                 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
391                 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
392                 uncore->id = ecx & 0xff;
393
394                 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
395                 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
396         }
397
398         if (amd_uncore_llc) {
399                 unsigned int apicid = cpu_data(cpu).apicid;
400                 unsigned int nshared;
401
402                 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
403                 cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
404                 nshared = ((eax >> 14) & 0xfff) + 1;
405                 uncore->id = apicid - (apicid % nshared);
406
407                 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
408                 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
409         }
410
411         return 0;
412 }
413
414 static void uncore_clean_online(void)
415 {
416         struct amd_uncore *uncore;
417         struct hlist_node *n;
418
419         hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
420                 hlist_del(&uncore->node);
421                 kfree(uncore);
422         }
423 }
424
425 static void uncore_online(unsigned int cpu,
426                           struct amd_uncore * __percpu *uncores)
427 {
428         struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
429
430         uncore_clean_online();
431
432         if (cpu == uncore->cpu)
433                 cpumask_set_cpu(cpu, uncore->active_mask);
434 }
435
436 static int amd_uncore_cpu_online(unsigned int cpu)
437 {
438         if (amd_uncore_nb)
439                 uncore_online(cpu, amd_uncore_nb);
440
441         if (amd_uncore_llc)
442                 uncore_online(cpu, amd_uncore_llc);
443
444         return 0;
445 }
446
447 static void uncore_down_prepare(unsigned int cpu,
448                                 struct amd_uncore * __percpu *uncores)
449 {
450         unsigned int i;
451         struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
452
453         if (this->cpu != cpu)
454                 return;
455
456         /* this cpu is going down, migrate to a shared sibling if possible */
457         for_each_online_cpu(i) {
458                 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
459
460                 if (cpu == i)
461                         continue;
462
463                 if (this == that) {
464                         perf_pmu_migrate_context(this->pmu, cpu, i);
465                         cpumask_clear_cpu(cpu, that->active_mask);
466                         cpumask_set_cpu(i, that->active_mask);
467                         that->cpu = i;
468                         break;
469                 }
470         }
471 }
472
473 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
474 {
475         if (amd_uncore_nb)
476                 uncore_down_prepare(cpu, amd_uncore_nb);
477
478         if (amd_uncore_llc)
479                 uncore_down_prepare(cpu, amd_uncore_llc);
480
481         return 0;
482 }
483
484 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
485 {
486         struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
487
488         if (cpu == uncore->cpu)
489                 cpumask_clear_cpu(cpu, uncore->active_mask);
490
491         if (!--uncore->refcnt)
492                 kfree(uncore);
493         *per_cpu_ptr(uncores, cpu) = NULL;
494 }
495
496 static int amd_uncore_cpu_dead(unsigned int cpu)
497 {
498         if (amd_uncore_nb)
499                 uncore_dead(cpu, amd_uncore_nb);
500
501         if (amd_uncore_llc)
502                 uncore_dead(cpu, amd_uncore_llc);
503
504         return 0;
505 }
506
507 static int __init amd_uncore_init(void)
508 {
509         int ret = -ENODEV;
510
511         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
512                 goto fail_nodev;
513
514         switch(boot_cpu_data.x86) {
515                 case 23:
516                         /* Family 17h: */
517                         num_counters_nb = NUM_COUNTERS_NB;
518                         num_counters_llc = NUM_COUNTERS_L3;
519                         /*
520                          * For Family17h, the NorthBridge counters are
521                          * re-purposed as Data Fabric counters. Also, support is
522                          * added for L3 counters. The pmus are exported based on
523                          * family as either L2 or L3 and NB or DF.
524                          */
525                         amd_nb_pmu.name = "amd_df";
526                         amd_llc_pmu.name = "amd_l3";
527                         format_attr_event_df.show = &event_show_df;
528                         format_attr_event_l3.show = &event_show_l3;
529                         break;
530                 case 22:
531                         /* Family 16h - may change: */
532                         num_counters_nb = NUM_COUNTERS_NB;
533                         num_counters_llc = NUM_COUNTERS_L2;
534                         amd_nb_pmu.name = "amd_nb";
535                         amd_llc_pmu.name = "amd_l2";
536                         format_attr_event_df = format_attr_event;
537                         format_attr_event_l3 = format_attr_event;
538                         break;
539                 default:
540                         /*
541                          * All prior families have the same number of
542                          * NorthBridge and Last Level Cache counters
543                          */
544                         num_counters_nb = NUM_COUNTERS_NB;
545                         num_counters_llc = NUM_COUNTERS_L2;
546                         amd_nb_pmu.name = "amd_nb";
547                         amd_llc_pmu.name = "amd_l2";
548                         format_attr_event_df = format_attr_event;
549                         format_attr_event_l3 = format_attr_event;
550                         break;
551         }
552         amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
553         amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
554
555         if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
556                 goto fail_nodev;
557
558         if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
559                 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
560                 if (!amd_uncore_nb) {
561                         ret = -ENOMEM;
562                         goto fail_nb;
563                 }
564                 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
565                 if (ret)
566                         goto fail_nb;
567
568                 pr_info("perf: AMD NB counters detected\n");
569                 ret = 0;
570         }
571
572         if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
573                 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
574                 if (!amd_uncore_llc) {
575                         ret = -ENOMEM;
576                         goto fail_llc;
577                 }
578                 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
579                 if (ret)
580                         goto fail_llc;
581
582                 pr_info("perf: AMD LLC counters detected\n");
583                 ret = 0;
584         }
585
586         /*
587          * Install callbacks. Core will call them for each online cpu.
588          */
589         if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
590                               "perf/x86/amd/uncore:prepare",
591                               amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
592                 goto fail_llc;
593
594         if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
595                               "perf/x86/amd/uncore:starting",
596                               amd_uncore_cpu_starting, NULL))
597                 goto fail_prep;
598         if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
599                               "perf/x86/amd/uncore:online",
600                               amd_uncore_cpu_online,
601                               amd_uncore_cpu_down_prepare))
602                 goto fail_start;
603         return 0;
604
605 fail_start:
606         cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
607 fail_prep:
608         cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
609 fail_llc:
610         if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
611                 perf_pmu_unregister(&amd_nb_pmu);
612         if (amd_uncore_llc)
613                 free_percpu(amd_uncore_llc);
614 fail_nb:
615         if (amd_uncore_nb)
616                 free_percpu(amd_uncore_nb);
617
618 fail_nodev:
619         return ret;
620 }
621 device_initcall(amd_uncore_init);