2 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
4 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
7 * Avi Kivity <avi@redhat.com>
8 * Gleb Natapov <gleb@redhat.com>
9 * Wei Huang <wei@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
16 #include <linux/types.h>
17 #include <linux/kvm_host.h>
18 #include <linux/perf_event.h>
19 #include <asm/perf_event.h>
25 /* This keeps the total size of the filter under 4k. */
26 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 63
29 * - Each perf counter is defined as "struct kvm_pmc";
30 * - There are two types of perf counters: general purpose (gp) and fixed.
31 * gp counters are stored in gp_counters[] and fixed counters are stored
32 * in fixed_counters[] respectively. Both of them are part of "struct
34 * - pmu.c understands the difference between gp counters and fixed counters.
35 * However AMD doesn't support fixed-counters;
36 * - There are three types of index to access perf counters (PMC):
37 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
38 * has MSR_K7_PERFCTRn.
39 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
40 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
41 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
42 * that it also supports fixed counters. idx can be used to as index to
43 * gp and fixed counters.
44 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
45 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
46 * all perf counters (both gp and fixed). The mapping relationship
47 * between pmc and perf counters is as the following:
48 * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
49 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
50 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
53 static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
55 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
56 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
58 kvm_pmu_deliver_pmi(vcpu);
61 static void kvm_perf_overflow(struct perf_event *perf_event,
62 struct perf_sample_data *data,
65 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
66 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
68 if (!test_and_set_bit(pmc->idx,
69 (unsigned long *)&pmu->reprogram_pmi)) {
70 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
71 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
75 static void kvm_perf_overflow_intr(struct perf_event *perf_event,
76 struct perf_sample_data *data,
79 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
80 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
82 if (!test_and_set_bit(pmc->idx,
83 (unsigned long *)&pmu->reprogram_pmi)) {
84 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
85 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
88 * Inject PMI. If vcpu was in a guest mode during NMI PMI
89 * can be ejected on a guest mode re-entry. Otherwise we can't
90 * be sure that vcpu wasn't executing hlt instruction at the
91 * time of vmexit and is not going to re-enter guest mode until
92 * woken up. So we should wake it, but this is impossible from
93 * NMI context. Do it from irq work instead.
95 if (!kvm_is_in_guest())
96 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
98 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
102 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
103 unsigned config, bool exclude_user,
104 bool exclude_kernel, bool intr,
105 bool in_tx, bool in_tx_cp)
107 struct perf_event *event;
108 struct perf_event_attr attr = {
110 .size = sizeof(attr),
112 .exclude_idle = true,
114 .exclude_user = exclude_user,
115 .exclude_kernel = exclude_kernel,
119 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
122 attr.config |= HSW_IN_TX;
125 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
126 * period. Just clear the sample period so at least
127 * allocating the counter doesn't fail.
129 attr.sample_period = 0;
130 attr.config |= HSW_IN_TX_CHECKPOINTED;
133 event = perf_event_create_kernel_counter(&attr, -1, current,
134 intr ? kvm_perf_overflow_intr :
135 kvm_perf_overflow, pmc);
137 printk_once("kvm_pmu: event creation failed %ld\n",
142 pmc->perf_event = event;
143 clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
146 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
148 unsigned config, type = PERF_TYPE_RAW;
149 u8 event_select, unit_mask;
150 struct kvm *kvm = pmc->vcpu->kvm;
151 struct kvm_pmu_event_filter *filter;
153 bool allow_event = true;
155 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
156 printk_once("kvm pmu: pin control bit is ignored\n");
158 pmc->eventsel = eventsel;
160 pmc_stop_counter(pmc);
162 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
165 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
167 for (i = 0; i < filter->nevents; i++)
168 if (filter->events[i] ==
169 (eventsel & AMD64_RAW_EVENT_MASK_NB))
171 if (filter->action == KVM_PMU_EVENT_ALLOW &&
172 i == filter->nevents)
174 if (filter->action == KVM_PMU_EVENT_DENY &&
181 event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
182 unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
184 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
185 ARCH_PERFMON_EVENTSEL_INV |
186 ARCH_PERFMON_EVENTSEL_CMASK |
188 HSW_IN_TX_CHECKPOINTED))) {
189 config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
192 if (config != PERF_COUNT_HW_MAX)
193 type = PERF_TYPE_HARDWARE;
196 if (type == PERF_TYPE_RAW)
197 config = eventsel & X86_RAW_EVENT_MASK;
199 pmc_reprogram_counter(pmc, type, config,
200 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
201 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
202 eventsel & ARCH_PERFMON_EVENTSEL_INT,
203 (eventsel & HSW_IN_TX),
204 (eventsel & HSW_IN_TX_CHECKPOINTED));
206 EXPORT_SYMBOL_GPL(reprogram_gp_counter);
208 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
210 unsigned en_field = ctrl & 0x3;
211 bool pmi = ctrl & 0x8;
213 pmc_stop_counter(pmc);
215 if (!en_field || !pmc_is_enabled(pmc))
218 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
219 kvm_x86_ops->pmu_ops->find_fixed_event(idx),
220 !(en_field & 0x2), /* exclude user */
221 !(en_field & 0x1), /* exclude kernel */
224 EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
226 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
228 struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
234 reprogram_gp_counter(pmc, pmc->eventsel);
236 int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
237 u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
239 reprogram_fixed_counter(pmc, ctrl, idx);
242 EXPORT_SYMBOL_GPL(reprogram_counter);
244 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
246 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
250 bitmask = pmu->reprogram_pmi;
252 for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
253 struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
255 if (unlikely(!pmc || !pmc->perf_event)) {
256 clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
260 reprogram_counter(pmu, bit);
264 /* check if idx is a valid index to access PMU */
265 int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
267 return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
270 bool is_vmware_backdoor_pmc(u32 pmc_idx)
273 case VMWARE_BACKDOOR_PMC_HOST_TSC:
274 case VMWARE_BACKDOOR_PMC_REAL_TIME:
275 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
281 static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
286 case VMWARE_BACKDOOR_PMC_HOST_TSC:
289 case VMWARE_BACKDOOR_PMC_REAL_TIME:
290 ctr_val = ktime_get_boot_ns();
292 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
293 ctr_val = ktime_get_boot_ns() +
294 vcpu->kvm->arch.kvmclock_offset;
304 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
306 bool fast_mode = idx & (1u << 31);
307 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
309 u64 mask = fast_mode ? ~0u : ~0ull;
314 if (is_vmware_backdoor_pmc(idx))
315 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
317 pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
321 *data = pmc_read_counter(pmc) & mask;
325 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
327 if (lapic_in_kernel(vcpu))
328 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
331 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
333 return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
336 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
338 return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
341 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
343 return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
346 /* refresh PMU settings. This function generally is called when underlying
347 * settings are changed (such as changes of PMU CPUID by guest VMs), which
348 * should rarely happen.
350 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
352 kvm_x86_ops->pmu_ops->refresh(vcpu);
355 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
357 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
359 irq_work_sync(&pmu->irq_work);
360 kvm_x86_ops->pmu_ops->reset(vcpu);
363 void kvm_pmu_init(struct kvm_vcpu *vcpu)
365 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
367 memset(pmu, 0, sizeof(*pmu));
368 kvm_x86_ops->pmu_ops->init(vcpu);
369 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
370 kvm_pmu_refresh(vcpu);
373 void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
378 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
380 struct kvm_pmu_event_filter tmp, *filter;
384 if (copy_from_user(&tmp, argp, sizeof(tmp)))
387 if (tmp.action != KVM_PMU_EVENT_ALLOW &&
388 tmp.action != KVM_PMU_EVENT_DENY)
391 if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
394 size = struct_size(filter, events, tmp.nevents);
395 filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
400 if (copy_from_user(filter, argp, size))
403 /* Ensure nevents can't be changed between the user copies. */
406 mutex_lock(&kvm->lock);
407 rcu_swap_protected(kvm->arch.pmu_event_filter, filter,
408 mutex_is_locked(&kvm->lock));
409 mutex_unlock(&kvm->lock);
411 synchronize_srcu_expedited(&kvm->srcu);