2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 * Author: Jacob Shin <jacob.shin@amd.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
23 #define NUM_COUNTERS_NB 4
24 #define NUM_COUNTERS_L2 4
25 #define NUM_COUNTERS_L3 6
26 #define MAX_COUNTERS 6
28 #define RDPMC_BASE_NB 6
29 #define RDPMC_BASE_LLC 10
31 #define COUNTER_SHIFT 16
33 static int num_counters_llc;
34 static int num_counters_nb;
36 static HLIST_HEAD(uncore_unused_list);
45 cpumask_t *active_mask;
47 struct perf_event *events[MAX_COUNTERS];
48 struct hlist_node node;
51 static struct amd_uncore * __percpu *amd_uncore_nb;
52 static struct amd_uncore * __percpu *amd_uncore_llc;
54 static struct pmu amd_nb_pmu;
55 static struct pmu amd_llc_pmu;
57 static cpumask_t amd_nb_active_mask;
58 static cpumask_t amd_llc_active_mask;
60 static bool is_nb_event(struct perf_event *event)
62 return event->pmu->type == amd_nb_pmu.type;
65 static bool is_llc_event(struct perf_event *event)
67 return event->pmu->type == amd_llc_pmu.type;
70 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
72 if (is_nb_event(event) && amd_uncore_nb)
73 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
74 else if (is_llc_event(event) && amd_uncore_llc)
75 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
80 static void amd_uncore_read(struct perf_event *event)
82 struct hw_perf_event *hwc = &event->hw;
87 * since we do not enable counter overflow interrupts,
88 * we do not have to worry about prev_count changing on us
91 prev = local64_read(&hwc->prev_count);
92 rdpmcl(hwc->event_base_rdpmc, new);
93 local64_set(&hwc->prev_count, new);
94 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
95 delta >>= COUNTER_SHIFT;
96 local64_add(delta, &event->count);
99 static void amd_uncore_start(struct perf_event *event, int flags)
101 struct hw_perf_event *hwc = &event->hw;
103 if (flags & PERF_EF_RELOAD)
104 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
107 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
108 perf_event_update_userpage(event);
111 static void amd_uncore_stop(struct perf_event *event, int flags)
113 struct hw_perf_event *hwc = &event->hw;
115 wrmsrl(hwc->config_base, hwc->config);
116 hwc->state |= PERF_HES_STOPPED;
118 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
119 amd_uncore_read(event);
120 hwc->state |= PERF_HES_UPTODATE;
124 static int amd_uncore_add(struct perf_event *event, int flags)
127 struct amd_uncore *uncore = event_to_amd_uncore(event);
128 struct hw_perf_event *hwc = &event->hw;
130 /* are we already assigned? */
131 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
134 for (i = 0; i < uncore->num_counters; i++) {
135 if (uncore->events[i] == event) {
141 /* if not, take the first available counter */
143 for (i = 0; i < uncore->num_counters; i++) {
144 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
154 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
155 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
156 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
157 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
159 if (flags & PERF_EF_START)
160 amd_uncore_start(event, PERF_EF_RELOAD);
165 static void amd_uncore_del(struct perf_event *event, int flags)
168 struct amd_uncore *uncore = event_to_amd_uncore(event);
169 struct hw_perf_event *hwc = &event->hw;
171 amd_uncore_stop(event, PERF_EF_UPDATE);
173 for (i = 0; i < uncore->num_counters; i++) {
174 if (cmpxchg(&uncore->events[i], event, NULL) == event)
181 static int amd_uncore_event_init(struct perf_event *event)
183 struct amd_uncore *uncore;
184 struct hw_perf_event *hwc = &event->hw;
186 if (event->attr.type != event->pmu->type)
190 * NB and Last level cache counters (MSRs) are shared across all cores
191 * that share the same NB / Last level cache. Interrupts can be directed
192 * to a single target core, however, event counts generated by processes
193 * running on other cores cannot be masked out. So we do not support
194 * sampling and per-thread events.
196 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
199 /* NB and Last level cache counters do not have usr/os/guest/host bits */
200 if (event->attr.exclude_user || event->attr.exclude_kernel ||
201 event->attr.exclude_host || event->attr.exclude_guest)
204 /* and we do not enable counter overflow interrupts */
205 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
211 uncore = event_to_amd_uncore(event);
216 * since request can come in to any of the shared cores, we will remap
217 * to a single common cpu.
219 event->cpu = uncore->cpu;
224 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
225 struct device_attribute *attr,
228 cpumask_t *active_mask;
229 struct pmu *pmu = dev_get_drvdata(dev);
231 if (pmu->type == amd_nb_pmu.type)
232 active_mask = &amd_nb_active_mask;
233 else if (pmu->type == amd_llc_pmu.type)
234 active_mask = &amd_llc_active_mask;
238 return cpumap_print_to_pagebuf(true, buf, active_mask);
240 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
242 static struct attribute *amd_uncore_attrs[] = {
243 &dev_attr_cpumask.attr,
247 static struct attribute_group amd_uncore_attr_group = {
248 .attrs = amd_uncore_attrs,
251 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
252 PMU_FORMAT_ATTR(umask, "config:8-15");
254 static struct attribute *amd_uncore_format_attr[] = {
255 &format_attr_event.attr,
256 &format_attr_umask.attr,
260 static struct attribute_group amd_uncore_format_group = {
262 .attrs = amd_uncore_format_attr,
265 static const struct attribute_group *amd_uncore_attr_groups[] = {
266 &amd_uncore_attr_group,
267 &amd_uncore_format_group,
271 static struct pmu amd_nb_pmu = {
272 .task_ctx_nr = perf_invalid_context,
273 .attr_groups = amd_uncore_attr_groups,
275 .event_init = amd_uncore_event_init,
276 .add = amd_uncore_add,
277 .del = amd_uncore_del,
278 .start = amd_uncore_start,
279 .stop = amd_uncore_stop,
280 .read = amd_uncore_read,
283 static struct pmu amd_llc_pmu = {
284 .task_ctx_nr = perf_invalid_context,
285 .attr_groups = amd_uncore_attr_groups,
287 .event_init = amd_uncore_event_init,
288 .add = amd_uncore_add,
289 .del = amd_uncore_del,
290 .start = amd_uncore_start,
291 .stop = amd_uncore_stop,
292 .read = amd_uncore_read,
295 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
297 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
301 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
303 struct amd_uncore *uncore_nb = NULL, *uncore_llc;
306 uncore_nb = amd_uncore_alloc(cpu);
309 uncore_nb->cpu = cpu;
310 uncore_nb->num_counters = num_counters_nb;
311 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
312 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
313 uncore_nb->active_mask = &amd_nb_active_mask;
314 uncore_nb->pmu = &amd_nb_pmu;
316 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
319 if (amd_uncore_llc) {
320 uncore_llc = amd_uncore_alloc(cpu);
323 uncore_llc->cpu = cpu;
324 uncore_llc->num_counters = num_counters_llc;
325 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
326 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
327 uncore_llc->active_mask = &amd_llc_active_mask;
328 uncore_llc->pmu = &amd_llc_pmu;
330 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
337 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
342 static struct amd_uncore *
343 amd_uncore_find_online_sibling(struct amd_uncore *this,
344 struct amd_uncore * __percpu *uncores)
347 struct amd_uncore *that;
349 for_each_online_cpu(cpu) {
350 that = *per_cpu_ptr(uncores, cpu);
358 if (this->id == that->id) {
359 hlist_add_head(&this->node, &uncore_unused_list);
369 static int amd_uncore_cpu_starting(unsigned int cpu)
371 unsigned int eax, ebx, ecx, edx;
372 struct amd_uncore *uncore;
375 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
376 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
377 uncore->id = ecx & 0xff;
379 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
380 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
383 if (amd_uncore_llc) {
384 unsigned int apicid = cpu_data(cpu).apicid;
385 unsigned int nshared;
387 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
388 cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
389 nshared = ((eax >> 14) & 0xfff) + 1;
390 uncore->id = apicid - (apicid % nshared);
392 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
393 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
399 static void uncore_clean_online(void)
401 struct amd_uncore *uncore;
402 struct hlist_node *n;
404 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
405 hlist_del(&uncore->node);
410 static void uncore_online(unsigned int cpu,
411 struct amd_uncore * __percpu *uncores)
413 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
415 uncore_clean_online();
417 if (cpu == uncore->cpu)
418 cpumask_set_cpu(cpu, uncore->active_mask);
421 static int amd_uncore_cpu_online(unsigned int cpu)
424 uncore_online(cpu, amd_uncore_nb);
427 uncore_online(cpu, amd_uncore_llc);
432 static void uncore_down_prepare(unsigned int cpu,
433 struct amd_uncore * __percpu *uncores)
436 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
438 if (this->cpu != cpu)
441 /* this cpu is going down, migrate to a shared sibling if possible */
442 for_each_online_cpu(i) {
443 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
449 perf_pmu_migrate_context(this->pmu, cpu, i);
450 cpumask_clear_cpu(cpu, that->active_mask);
451 cpumask_set_cpu(i, that->active_mask);
458 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
461 uncore_down_prepare(cpu, amd_uncore_nb);
464 uncore_down_prepare(cpu, amd_uncore_llc);
469 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
471 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
473 if (cpu == uncore->cpu)
474 cpumask_clear_cpu(cpu, uncore->active_mask);
476 if (!--uncore->refcnt)
478 *per_cpu_ptr(uncores, cpu) = NULL;
481 static int amd_uncore_cpu_dead(unsigned int cpu)
484 uncore_dead(cpu, amd_uncore_nb);
487 uncore_dead(cpu, amd_uncore_llc);
492 static int __init amd_uncore_init(void)
496 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
499 switch(boot_cpu_data.x86) {
502 num_counters_nb = NUM_COUNTERS_NB;
503 num_counters_llc = NUM_COUNTERS_L3;
506 /* Family 16h - may change: */
507 num_counters_nb = NUM_COUNTERS_NB;
508 num_counters_llc = NUM_COUNTERS_L2;
512 * All prior families have the same number of
513 * NorthBridge and Last Level Cache counters
515 num_counters_nb = NUM_COUNTERS_NB;
516 num_counters_llc = NUM_COUNTERS_L2;
520 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
523 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
524 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
525 if (!amd_uncore_nb) {
529 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
533 pr_info("perf: AMD NB counters detected\n");
537 if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
538 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
539 if (!amd_uncore_llc) {
543 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
547 pr_info("perf: AMD LLC counters detected\n");
552 * Install callbacks. Core will call them for each online cpu.
554 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
555 "perf/x86/amd/uncore:prepare",
556 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
559 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
560 "perf/x86/amd/uncore:starting",
561 amd_uncore_cpu_starting, NULL))
563 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
564 "perf/x86/amd/uncore:online",
565 amd_uncore_cpu_online,
566 amd_uncore_cpu_down_prepare))
571 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
573 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
575 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
576 perf_pmu_unregister(&amd_nb_pmu);
578 free_percpu(amd_uncore_llc);
581 free_percpu(amd_uncore_nb);
586 device_initcall(amd_uncore_init);