1 #include <linux/perf_event.h>
2 #include <linux/export.h>
3 #include <linux/types.h>
4 #include <linux/init.h>
5 #include <linux/slab.h>
6 #include <linux/delay.h>
7 #include <asm/apicdef.h>
10 #include "../perf_event.h"
12 static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
14 static __initconst const u64 amd_hw_cache_event_ids
15 [PERF_COUNT_HW_CACHE_MAX]
16 [PERF_COUNT_HW_CACHE_OP_MAX]
17 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
21 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
22 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
25 [ C(RESULT_ACCESS) ] = 0,
26 [ C(RESULT_MISS) ] = 0,
28 [ C(OP_PREFETCH) ] = {
29 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
30 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
35 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
36 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
39 [ C(RESULT_ACCESS) ] = -1,
40 [ C(RESULT_MISS) ] = -1,
42 [ C(OP_PREFETCH) ] = {
43 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
44 [ C(RESULT_MISS) ] = 0,
49 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
50 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
53 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
54 [ C(RESULT_MISS) ] = 0,
56 [ C(OP_PREFETCH) ] = {
57 [ C(RESULT_ACCESS) ] = 0,
58 [ C(RESULT_MISS) ] = 0,
63 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
64 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
67 [ C(RESULT_ACCESS) ] = 0,
68 [ C(RESULT_MISS) ] = 0,
70 [ C(OP_PREFETCH) ] = {
71 [ C(RESULT_ACCESS) ] = 0,
72 [ C(RESULT_MISS) ] = 0,
77 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
78 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
81 [ C(RESULT_ACCESS) ] = -1,
82 [ C(RESULT_MISS) ] = -1,
84 [ C(OP_PREFETCH) ] = {
85 [ C(RESULT_ACCESS) ] = -1,
86 [ C(RESULT_MISS) ] = -1,
91 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
92 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
95 [ C(RESULT_ACCESS) ] = -1,
96 [ C(RESULT_MISS) ] = -1,
98 [ C(OP_PREFETCH) ] = {
99 [ C(RESULT_ACCESS) ] = -1,
100 [ C(RESULT_MISS) ] = -1,
105 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
106 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
109 [ C(RESULT_ACCESS) ] = -1,
110 [ C(RESULT_MISS) ] = -1,
112 [ C(OP_PREFETCH) ] = {
113 [ C(RESULT_ACCESS) ] = -1,
114 [ C(RESULT_MISS) ] = -1,
120 * AMD Performance Monitor K7 and later, up to and including Family 16h:
122 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
124 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
125 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
126 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
127 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
128 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
129 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
130 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
131 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
135 * AMD Performance Monitor Family 17h and later:
137 static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
139 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
140 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
141 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
142 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
143 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
144 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
145 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
148 static u64 amd_pmu_event_map(int hw_event)
150 if (boot_cpu_data.x86 >= 0x17)
151 return amd_f17h_perfmon_event_map[hw_event];
153 return amd_perfmon_event_map[hw_event];
157 * Previously calculated offsets
159 static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
160 static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
164 * 4 counters starting at 0xc0010000 each offset by 1
166 * CPUs with core performance counter extensions:
167 * 6 counters starting at 0xc0010200 each offset by 2
169 static inline int amd_pmu_addr_offset(int index, bool eventsel)
177 offset = event_offsets[index];
179 offset = count_offsets[index];
184 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
190 event_offsets[index] = offset;
192 count_offsets[index] = offset;
197 static int amd_core_hw_config(struct perf_event *event)
199 if (event->attr.exclude_host && event->attr.exclude_guest)
201 * When HO == GO == 1 the hardware treats that as GO == HO == 0
202 * and will count in both modes. We don't want to count in that
203 * case so we emulate no-counting by setting US = OS = 0.
205 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
206 ARCH_PERFMON_EVENTSEL_OS);
207 else if (event->attr.exclude_host)
208 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
209 else if (event->attr.exclude_guest)
210 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
216 * AMD64 events are detected based on their event codes.
218 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
220 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
223 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
225 return (hwc->config & 0xe0) == 0xe0;
228 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
230 struct amd_nb *nb = cpuc->amd_nb;
232 return nb && nb->nb_id != -1;
235 static int amd_pmu_hw_config(struct perf_event *event)
239 /* pass precise event sampling to ibs: */
240 if (event->attr.precise_ip && get_ibs_caps())
243 if (has_branch_stack(event))
246 ret = x86_pmu_hw_config(event);
250 if (event->attr.type == PERF_TYPE_RAW)
251 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
253 return amd_core_hw_config(event);
256 static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
257 struct perf_event *event)
259 struct amd_nb *nb = cpuc->amd_nb;
263 * need to scan whole list because event may not have
264 * been assigned during scheduling
266 * no race condition possible because event can only
267 * be removed on one CPU at a time AND PMU is disabled
270 for (i = 0; i < x86_pmu.num_counters; i++) {
271 if (cmpxchg(nb->owners + i, event, NULL) == event)
277 * AMD64 NorthBridge events need special treatment because
278 * counter access needs to be synchronized across all cores
279 * of a package. Refer to BKDG section 3.12
281 * NB events are events measuring L3 cache, Hypertransport
282 * traffic. They are identified by an event code >= 0xe00.
283 * They measure events on the NorthBride which is shared
284 * by all cores on a package. NB events are counted on a
285 * shared set of counters. When a NB event is programmed
286 * in a counter, the data actually comes from a shared
287 * counter. Thus, access to those counters needs to be
290 * We implement the synchronization such that no two cores
291 * can be measuring NB events using the same counters. Thus,
292 * we maintain a per-NB allocation table. The available slot
293 * is propagated using the event_constraint structure.
295 * We provide only one choice for each NB event based on
296 * the fact that only NB events have restrictions. Consequently,
297 * if a counter is available, there is a guarantee the NB event
298 * will be assigned to it. If no slot is available, an empty
299 * constraint is returned and scheduling will eventually fail
302 * Note that all cores attached the same NB compete for the same
303 * counters to host NB events, this is why we use atomic ops. Some
304 * multi-chip CPUs may have more than one NB.
306 * Given that resources are allocated (cmpxchg), they must be
307 * eventually freed for others to use. This is accomplished by
308 * calling __amd_put_nb_event_constraints()
310 * Non NB events are not impacted by this restriction.
312 static struct event_constraint *
313 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
314 struct event_constraint *c)
316 struct hw_perf_event *hwc = &event->hw;
317 struct amd_nb *nb = cpuc->amd_nb;
318 struct perf_event *old;
328 * detect if already present, if so reuse
330 * cannot merge with actual allocation
331 * because of possible holes
333 * event can already be present yet not assigned (in hwc->idx)
334 * because of successive calls to x86_schedule_events() from
335 * hw_perf_group_sched_in() without hw_perf_enable()
337 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
338 if (new == -1 || hwc->idx == idx)
339 /* assign free slot, prefer hwc->idx */
340 old = cmpxchg(nb->owners + idx, NULL, event);
341 else if (nb->owners[idx] == event)
342 /* event already present */
347 if (old && old != event)
350 /* reassign to this slot */
352 cmpxchg(nb->owners + new, event, NULL);
355 /* already present, reuse */
361 return &emptyconstraint;
363 return &nb->event_constraints[new];
366 static struct amd_nb *amd_alloc_nb(int cpu)
371 nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
378 * initialize all possible NB constraints
380 for (i = 0; i < x86_pmu.num_counters; i++) {
381 __set_bit(i, nb->event_constraints[i].idxmsk);
382 nb->event_constraints[i].weight = 1;
387 static int amd_pmu_cpu_prepare(int cpu)
389 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
391 WARN_ON_ONCE(cpuc->amd_nb);
393 if (!x86_pmu.amd_nb_constraints)
396 cpuc->amd_nb = amd_alloc_nb(cpu);
403 static void amd_pmu_cpu_starting(int cpu)
405 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
406 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
410 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
412 if (!x86_pmu.amd_nb_constraints)
415 nb_id = amd_get_nb_id(cpu);
416 WARN_ON_ONCE(nb_id == BAD_APICID);
418 for_each_online_cpu(i) {
419 nb = per_cpu(cpu_hw_events, i).amd_nb;
420 if (WARN_ON_ONCE(!nb))
423 if (nb->nb_id == nb_id) {
424 *onln = cpuc->amd_nb;
430 cpuc->amd_nb->nb_id = nb_id;
431 cpuc->amd_nb->refcnt++;
434 static void amd_pmu_cpu_dead(int cpu)
436 struct cpu_hw_events *cpuhw;
438 if (!x86_pmu.amd_nb_constraints)
441 cpuhw = &per_cpu(cpu_hw_events, cpu);
444 struct amd_nb *nb = cpuhw->amd_nb;
446 if (nb->nb_id == -1 || --nb->refcnt == 0)
449 cpuhw->amd_nb = NULL;
454 * When a PMC counter overflows, an NMI is used to process the event and
455 * reset the counter. NMI latency can result in the counter being updated
456 * before the NMI can run, which can result in what appear to be spurious
457 * NMIs. This function is intended to wait for the NMI to run and reset
458 * the counter to avoid possible unhandled NMI messages.
460 #define OVERFLOW_WAIT_COUNT 50
462 static void amd_pmu_wait_on_overflow(int idx)
468 * Wait for the counter to be reset if it has overflowed. This loop
469 * should exit very, very quickly, but just in case, don't wait
472 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
473 rdmsrl(x86_pmu_event_addr(idx), counter);
474 if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
477 /* Might be in IRQ context, so can't sleep */
482 static void amd_pmu_disable_all(void)
484 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
487 x86_pmu_disable_all();
490 * This shouldn't be called from NMI context, but add a safeguard here
491 * to return, since if we're in NMI context we can't wait for an NMI
492 * to reset an overflowed counter value.
498 * Check each counter for overflow and wait for it to be reset by the
499 * NMI if it has overflowed. This relies on the fact that all active
500 * counters are always enabled when this function is caled and
501 * ARCH_PERFMON_EVENTSEL_INT is always set.
503 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
504 if (!test_bit(idx, cpuc->active_mask))
507 amd_pmu_wait_on_overflow(idx);
511 static void amd_pmu_disable_event(struct perf_event *event)
513 x86_pmu_disable_event(event);
516 * This can be called from NMI context (via x86_pmu_stop). The counter
517 * may have overflowed, but either way, we'll never see it get reset
518 * by the NMI if we're already in the NMI. And the NMI latency support
519 * below will take care of any pending NMI that might have been
520 * generated by the overflow.
525 amd_pmu_wait_on_overflow(event->hw.idx);
529 * Because of NMI latency, if multiple PMC counters are active or other sources
530 * of NMIs are received, the perf NMI handler can handle one or more overflowed
531 * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
532 * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
533 * back-to-back NMI support won't be active. This PMC handler needs to take into
534 * account that this can occur, otherwise this could result in unknown NMI
535 * messages being issued. Examples of this is PMC overflow while in the NMI
536 * handler when multiple PMCs are active or PMC overflow while handling some
537 * other source of an NMI.
539 * Attempt to mitigate this by using the number of active PMCs to determine
540 * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
541 * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
542 * number of active PMCs or 2. The value of 2 is used in case an NMI does not
543 * arrive at the LAPIC in time to be collapsed into an already pending NMI.
545 static int amd_pmu_handle_irq(struct pt_regs *regs)
547 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
551 * Obtain the active count before calling x86_pmu_handle_irq() since
552 * it is possible that x86_pmu_handle_irq() may make a counter
553 * inactive (through x86_pmu_stop).
555 active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
557 /* Process any counter overflows */
558 handled = x86_pmu_handle_irq(regs);
561 * If a counter was handled, record the number of possible remaining
562 * NMIs that can occur.
565 this_cpu_write(perf_nmi_counter,
566 min_t(unsigned int, 2, active));
571 if (!this_cpu_read(perf_nmi_counter))
574 this_cpu_dec(perf_nmi_counter);
579 static struct event_constraint *
580 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
581 struct perf_event *event)
584 * if not NB event or no NB, then no constraints
586 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
587 return &unconstrained;
589 return __amd_get_nb_event_constraints(cpuc, event, NULL);
592 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
593 struct perf_event *event)
595 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
596 __amd_put_nb_event_constraints(cpuc, event);
599 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
600 PMU_FORMAT_ATTR(umask, "config:8-15" );
601 PMU_FORMAT_ATTR(edge, "config:18" );
602 PMU_FORMAT_ATTR(inv, "config:23" );
603 PMU_FORMAT_ATTR(cmask, "config:24-31" );
605 static struct attribute *amd_format_attr[] = {
606 &format_attr_event.attr,
607 &format_attr_umask.attr,
608 &format_attr_edge.attr,
609 &format_attr_inv.attr,
610 &format_attr_cmask.attr,
616 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
618 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
619 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
620 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
621 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
622 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
623 #define AMD_EVENT_EX_LS 0x000000C0ULL
624 #define AMD_EVENT_DE 0x000000D0ULL
625 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
628 * AMD family 15h event code/PMC mappings:
630 * type = event_code & 0x0F0:
632 * 0x000 FP PERF_CTL[5:3]
633 * 0x010 FP PERF_CTL[5:3]
634 * 0x020 LS PERF_CTL[5:0]
635 * 0x030 LS PERF_CTL[5:0]
636 * 0x040 DC PERF_CTL[5:0]
637 * 0x050 DC PERF_CTL[5:0]
638 * 0x060 CU PERF_CTL[2:0]
639 * 0x070 CU PERF_CTL[2:0]
640 * 0x080 IC/DE PERF_CTL[2:0]
641 * 0x090 IC/DE PERF_CTL[2:0]
644 * 0x0C0 EX/LS PERF_CTL[5:0]
645 * 0x0D0 DE PERF_CTL[2:0]
646 * 0x0E0 NB NB_PERF_CTL[3:0]
647 * 0x0F0 NB NB_PERF_CTL[3:0]
651 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
652 * 0x003 FP PERF_CTL[3]
653 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
654 * 0x00B FP PERF_CTL[3]
655 * 0x00D FP PERF_CTL[3]
656 * 0x023 DE PERF_CTL[2:0]
657 * 0x02D LS PERF_CTL[3]
658 * 0x02E LS PERF_CTL[3,0]
659 * 0x031 LS PERF_CTL[2:0] (**)
660 * 0x043 CU PERF_CTL[2:0]
661 * 0x045 CU PERF_CTL[2:0]
662 * 0x046 CU PERF_CTL[2:0]
663 * 0x054 CU PERF_CTL[2:0]
664 * 0x055 CU PERF_CTL[2:0]
665 * 0x08F IC PERF_CTL[0]
666 * 0x187 DE PERF_CTL[0]
667 * 0x188 DE PERF_CTL[0]
668 * 0x0DB EX PERF_CTL[5:0]
669 * 0x0DC LS PERF_CTL[5:0]
670 * 0x0DD LS PERF_CTL[5:0]
671 * 0x0DE LS PERF_CTL[5:0]
672 * 0x0DF LS PERF_CTL[5:0]
673 * 0x1C0 EX PERF_CTL[5:3]
674 * 0x1D6 EX PERF_CTL[5:0]
675 * 0x1D8 EX PERF_CTL[5:0]
677 * (*) depending on the umask all FPU counters may be used
678 * (**) only one unitmask enabled at a time
681 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
682 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
683 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
684 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
685 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
686 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
688 static struct event_constraint *
689 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
690 struct perf_event *event)
692 struct hw_perf_event *hwc = &event->hw;
693 unsigned int event_code = amd_get_event_code(hwc);
695 switch (event_code & AMD_EVENT_TYPE_MASK) {
697 switch (event_code) {
699 if (!(hwc->config & 0x0000F000ULL))
701 if (!(hwc->config & 0x00000F00ULL))
703 return &amd_f15_PMC3;
705 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
707 return &amd_f15_PMC3;
711 return &amd_f15_PMC3;
713 return &amd_f15_PMC53;
716 case AMD_EVENT_EX_LS:
717 switch (event_code) {
724 return &amd_f15_PMC20;
726 return &amd_f15_PMC3;
728 return &amd_f15_PMC30;
730 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
731 return &amd_f15_PMC20;
732 return &emptyconstraint;
734 return &amd_f15_PMC53;
736 return &amd_f15_PMC50;
739 case AMD_EVENT_IC_DE:
741 switch (event_code) {
745 return &amd_f15_PMC0;
746 case 0x0DB ... 0x0DF:
749 return &amd_f15_PMC50;
751 return &amd_f15_PMC20;
754 /* moved to uncore.c */
755 return &emptyconstraint;
757 return &emptyconstraint;
761 static ssize_t amd_event_sysfs_show(char *page, u64 config)
763 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
764 (config & AMD64_EVENTSEL_EVENT) >> 24;
766 return x86_event_sysfs_show(page, config, event);
769 static __initconst const struct x86_pmu amd_pmu = {
771 .handle_irq = amd_pmu_handle_irq,
772 .disable_all = amd_pmu_disable_all,
773 .enable_all = x86_pmu_enable_all,
774 .enable = x86_pmu_enable_event,
775 .disable = amd_pmu_disable_event,
776 .hw_config = amd_pmu_hw_config,
777 .schedule_events = x86_schedule_events,
778 .eventsel = MSR_K7_EVNTSEL0,
779 .perfctr = MSR_K7_PERFCTR0,
780 .addr_offset = amd_pmu_addr_offset,
781 .event_map = amd_pmu_event_map,
782 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
783 .num_counters = AMD64_NUM_COUNTERS,
785 .cntval_mask = (1ULL << 48) - 1,
787 /* use highest bit to detect overflow */
788 .max_period = (1ULL << 47) - 1,
789 .get_event_constraints = amd_get_event_constraints,
790 .put_event_constraints = amd_put_event_constraints,
792 .format_attrs = amd_format_attr,
793 .events_sysfs_show = amd_event_sysfs_show,
795 .cpu_prepare = amd_pmu_cpu_prepare,
796 .cpu_starting = amd_pmu_cpu_starting,
797 .cpu_dead = amd_pmu_cpu_dead,
799 .amd_nb_constraints = 1,
802 static int __init amd_core_pmu_init(void)
804 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
807 switch (boot_cpu_data.x86) {
810 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
815 * In family 17h, there are no event constraints in the PMC hardware.
816 * We fallback to using default amd_get_event_constraints.
821 /* Using default amd_get_event_constraints. */
824 pr_err("core perfctr but no constraints; unknown hardware!\n");
829 * If core performance counter extensions exists, we must use
830 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
831 * amd_pmu_addr_offset().
833 x86_pmu.eventsel = MSR_F15H_PERF_CTL;
834 x86_pmu.perfctr = MSR_F15H_PERF_CTR;
835 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
837 * AMD Core perfctr has separate MSRs for the NB events, see
838 * the amd/uncore.c driver.
840 x86_pmu.amd_nb_constraints = 0;
842 pr_cont("core perfctr, ");
846 __init int amd_pmu_init(void)
850 /* Performance-monitoring supported from K7 and later: */
851 if (boot_cpu_data.x86 < 6)
856 ret = amd_core_pmu_init();
860 if (num_possible_cpus() == 1) {
862 * No point in allocating data structures to serialize
863 * against other CPUs, when there is only the one CPU.
865 x86_pmu.amd_nb_constraints = 0;
868 /* Events are common for all AMDs */
869 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
870 sizeof(hw_cache_event_ids));
875 void amd_pmu_enable_virt(void)
877 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
879 cpuc->perf_ctr_virt_mask = 0;
881 /* Reload all events */
882 amd_pmu_disable_all();
883 x86_pmu_enable_all(0);
885 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
887 void amd_pmu_disable_virt(void)
889 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
892 * We only mask out the Host-only bit so that host-only counting works
893 * when SVM is disabled. If someone sets up a guest-only counter when
894 * SVM is disabled the Guest-only bits still gets set and the counter
895 * will not count anything.
897 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
899 /* Reload all events */
900 amd_pmu_disable_all();
901 x86_pmu_enable_all(0);
903 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);