]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kvm/pmu.c
KVM: x86: PMU Event Filter
[linux.git] / arch / x86 / kvm / pmu.c
index dd745b58ffd8a2f22115073f7790b98cf2e40c94..9d92c4d3cd44979d599dfba0ee88caf5c9ec81a3 100644 (file)
@@ -22,6 +22,9 @@
 #include "lapic.h"
 #include "pmu.h"
 
+/* This keeps the total size of the filter under 4k. */
+#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 63
+
 /* NOTE:
  * - Each perf counter is defined as "struct kvm_pmc";
  * - There are two types of perf counters: general purpose (gp) and fixed.
@@ -144,6 +147,10 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 {
        unsigned config, type = PERF_TYPE_RAW;
        u8 event_select, unit_mask;
+       struct kvm *kvm = pmc->vcpu->kvm;
+       struct kvm_pmu_event_filter *filter;
+       int i;
+       bool allow_event = true;
 
        if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
                printk_once("kvm pmu: pin control bit is ignored\n");
@@ -155,6 +162,22 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
        if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
                return;
 
+       filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
+       if (filter) {
+               for (i = 0; i < filter->nevents; i++)
+                       if (filter->events[i] ==
+                           (eventsel & AMD64_RAW_EVENT_MASK_NB))
+                               break;
+               if (filter->action == KVM_PMU_EVENT_ALLOW &&
+                   i == filter->nevents)
+                       allow_event = false;
+               if (filter->action == KVM_PMU_EVENT_DENY &&
+                   i < filter->nevents)
+                       allow_event = false;
+       }
+       if (!allow_event)
+               return;
+
        event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
        unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
 
@@ -351,3 +374,43 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
 {
        kvm_pmu_reset(vcpu);
 }
+
+int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
+{
+       struct kvm_pmu_event_filter tmp, *filter;
+       size_t size;
+       int r;
+
+       if (copy_from_user(&tmp, argp, sizeof(tmp)))
+               return -EFAULT;
+
+       if (tmp.action != KVM_PMU_EVENT_ALLOW &&
+           tmp.action != KVM_PMU_EVENT_DENY)
+               return -EINVAL;
+
+       if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
+               return -E2BIG;
+
+       size = struct_size(filter, events, tmp.nevents);
+       filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
+       if (!filter)
+               return -ENOMEM;
+
+       r = -EFAULT;
+       if (copy_from_user(filter, argp, size))
+               goto cleanup;
+
+       /* Ensure nevents can't be changed between the user copies. */
+       *filter = tmp;
+
+       mutex_lock(&kvm->lock);
+       rcu_swap_protected(kvm->arch.pmu_event_filter, filter,
+                          mutex_is_locked(&kvm->lock));
+       mutex_unlock(&kvm->lock);
+
+       synchronize_srcu_expedited(&kvm->srcu);
+       r = 0;
+cleanup:
+       kfree(filter);
+       return r;
+}