1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
9 #include <linux/kvm_host.h>
10 #include <linux/perf_event.h>
11 #include <linux/uaccess.h>
12 #include <asm/kvm_emulate.h>
13 #include <kvm/arm_pmu.h>
14 #include <kvm/arm_vgic.h>
16 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
18 #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
21 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
22 * @vcpu: The vcpu pointer
23 * @select_idx: The counter index
25 static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
27 return (select_idx == ARMV8_PMU_CYCLE_IDX &&
28 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
31 static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
34 struct kvm_vcpu_arch *vcpu_arch;
37 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
38 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
39 return container_of(vcpu_arch, struct kvm_vcpu, arch);
43 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
44 * @pmc: The PMU counter pointer
46 static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
48 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
50 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
54 * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
55 * @select_idx: The counter index
57 static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
59 return select_idx & 0x1;
63 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
64 * @pmc: The PMU counter pointer
66 * When a pair of PMCs are chained together we use the low counter (canonical)
67 * to hold the underlying perf event.
69 static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
71 if (kvm_pmu_pmc_is_chained(pmc) &&
72 kvm_pmu_idx_is_high_counter(pmc->idx))
79 * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
80 * @vcpu: The vcpu pointer
81 * @select_idx: The counter index
83 static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
89 if (select_idx == ARMV8_PMU_CYCLE_IDX)
92 reg = PMEVTYPER0_EL0 + select_idx;
93 eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
95 return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
99 * kvm_pmu_get_pair_counter_value - get PMU counter value
100 * @vcpu: The vcpu pointer
101 * @pmc: The PMU counter pointer
103 static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
106 u64 counter, counter_high, reg, enabled, running;
108 if (kvm_pmu_pmc_is_chained(pmc)) {
109 pmc = kvm_pmu_get_canonical_pmc(pmc);
110 reg = PMEVCNTR0_EL0 + pmc->idx;
112 counter = __vcpu_sys_reg(vcpu, reg);
113 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
115 counter = lower_32_bits(counter) | (counter_high << 32);
117 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
118 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
119 counter = __vcpu_sys_reg(vcpu, reg);
123 * The real counter value is equal to the value of counter register plus
124 * the value perf event counts.
127 counter += perf_event_read_value(pmc->perf_event, &enabled,
134 * kvm_pmu_get_counter_value - get PMU counter value
135 * @vcpu: The vcpu pointer
136 * @select_idx: The counter index
138 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
141 struct kvm_pmu *pmu = &vcpu->arch.pmu;
142 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
144 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
146 if (kvm_pmu_pmc_is_chained(pmc) &&
147 kvm_pmu_idx_is_high_counter(select_idx))
148 counter = upper_32_bits(counter);
150 else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
151 counter = lower_32_bits(counter);
157 * kvm_pmu_set_counter_value - set PMU counter value
158 * @vcpu: The vcpu pointer
159 * @select_idx: The counter index
160 * @val: The counter value
162 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
166 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
167 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
168 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
170 /* Recreate the perf event to reflect the updated sample_period */
171 kvm_pmu_create_perf_event(vcpu, select_idx);
175 * kvm_pmu_release_perf_event - remove the perf event
176 * @pmc: The PMU counter pointer
178 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
180 pmc = kvm_pmu_get_canonical_pmc(pmc);
181 if (pmc->perf_event) {
182 perf_event_disable(pmc->perf_event);
183 perf_event_release_kernel(pmc->perf_event);
184 pmc->perf_event = NULL;
189 * kvm_pmu_stop_counter - stop PMU counter
190 * @pmc: The PMU counter pointer
192 * If this counter has been configured to monitor some event, release it here.
194 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
198 pmc = kvm_pmu_get_canonical_pmc(pmc);
199 if (!pmc->perf_event)
202 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
204 if (kvm_pmu_pmc_is_chained(pmc)) {
205 reg = PMEVCNTR0_EL0 + pmc->idx;
206 __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
207 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
209 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
210 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
211 __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
214 kvm_pmu_release_perf_event(pmc);
218 * kvm_pmu_vcpu_reset - reset pmu state for cpu
219 * @vcpu: The vcpu pointer
222 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
225 struct kvm_pmu *pmu = &vcpu->arch.pmu;
227 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
228 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
232 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
236 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
237 * @vcpu: The vcpu pointer
240 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
243 struct kvm_pmu *pmu = &vcpu->arch.pmu;
245 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
246 kvm_pmu_release_perf_event(&pmu->pmc[i]);
249 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
251 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
253 val &= ARMV8_PMU_PMCR_N_MASK;
255 return BIT(ARMV8_PMU_CYCLE_IDX);
257 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
261 * kvm_pmu_enable_counter_mask - enable selected PMU counters
262 * @vcpu: The vcpu pointer
263 * @val: the value guest writes to PMCNTENSET register
265 * Call perf_event_enable to start counting the perf event
267 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
270 struct kvm_pmu *pmu = &vcpu->arch.pmu;
273 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
276 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
283 * For high counters of chained events we must recreate the
284 * perf event with the long (64bit) attribute set.
286 if (kvm_pmu_pmc_is_chained(pmc) &&
287 kvm_pmu_idx_is_high_counter(i)) {
288 kvm_pmu_create_perf_event(vcpu, i);
292 /* At this point, pmc must be the canonical */
293 if (pmc->perf_event) {
294 perf_event_enable(pmc->perf_event);
295 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
296 kvm_debug("fail to enable perf event\n");
302 * kvm_pmu_disable_counter_mask - disable selected PMU counters
303 * @vcpu: The vcpu pointer
304 * @val: the value guest writes to PMCNTENCLR register
306 * Call perf_event_disable to stop counting the perf event
308 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
311 struct kvm_pmu *pmu = &vcpu->arch.pmu;
317 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
324 * For high counters of chained events we must recreate the
325 * perf event with the long (64bit) attribute unset.
327 if (kvm_pmu_pmc_is_chained(pmc) &&
328 kvm_pmu_idx_is_high_counter(i)) {
329 kvm_pmu_create_perf_event(vcpu, i);
333 /* At this point, pmc must be the canonical */
335 perf_event_disable(pmc->perf_event);
339 static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
343 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
344 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
345 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
346 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
347 reg &= kvm_pmu_valid_counter_mask(vcpu);
353 static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
355 struct kvm_pmu *pmu = &vcpu->arch.pmu;
358 if (!kvm_arm_pmu_v3_ready(vcpu))
361 overflow = !!kvm_pmu_overflow_status(vcpu);
362 if (pmu->irq_level == overflow)
365 pmu->irq_level = overflow;
367 if (likely(irqchip_in_kernel(vcpu->kvm))) {
368 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
369 pmu->irq_num, overflow, pmu);
374 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
376 struct kvm_pmu *pmu = &vcpu->arch.pmu;
377 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
378 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
380 if (likely(irqchip_in_kernel(vcpu->kvm)))
383 return pmu->irq_level != run_level;
387 * Reflect the PMU overflow interrupt output level into the kvm_run structure
389 void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
391 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
393 /* Populate the timer bitmap for user space */
394 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
395 if (vcpu->arch.pmu.irq_level)
396 regs->device_irq_level |= KVM_ARM_DEV_PMU;
400 * kvm_pmu_flush_hwstate - flush pmu state to cpu
401 * @vcpu: The vcpu pointer
403 * Check if the PMU has overflowed while we were running in the host, and inject
404 * an interrupt if that was the case.
406 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
408 kvm_pmu_update_state(vcpu);
412 * kvm_pmu_sync_hwstate - sync pmu state from cpu
413 * @vcpu: The vcpu pointer
415 * Check if the PMU has overflowed while we were running in the guest, and
416 * inject an interrupt if that was the case.
418 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
420 kvm_pmu_update_state(vcpu);
424 * When the perf event overflows, set the overflow status and inform the vcpu.
426 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
427 struct perf_sample_data *data,
428 struct pt_regs *regs)
430 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
431 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
434 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
436 if (kvm_pmu_overflow_status(vcpu)) {
437 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
443 * kvm_pmu_software_increment - do software increment
444 * @vcpu: The vcpu pointer
445 * @val: the value guest writes to PMSWINC register
447 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
450 u64 type, enable, reg;
455 enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
456 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
459 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
460 & ARMV8_PMU_EVTYPE_EVENT;
461 if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
462 && (enable & BIT(i))) {
463 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
464 reg = lower_32_bits(reg);
465 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
467 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
473 * kvm_pmu_handle_pmcr - handle PMCR register
474 * @vcpu: The vcpu pointer
475 * @val: the value guest writes to PMCR register
477 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
482 mask = kvm_pmu_valid_counter_mask(vcpu);
483 if (val & ARMV8_PMU_PMCR_E) {
484 kvm_pmu_enable_counter_mask(vcpu,
485 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
487 kvm_pmu_disable_counter_mask(vcpu, mask);
490 if (val & ARMV8_PMU_PMCR_C)
491 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
493 if (val & ARMV8_PMU_PMCR_P) {
494 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
495 kvm_pmu_set_counter_value(vcpu, i, 0);
499 static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
501 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
502 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
506 * kvm_pmu_create_perf_event - create a perf event for a counter
507 * @vcpu: The vcpu pointer
508 * @select_idx: The number of selected counter
510 static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
512 struct kvm_pmu *pmu = &vcpu->arch.pmu;
514 struct perf_event *event;
515 struct perf_event_attr attr;
516 u64 eventsel, counter, reg, data;
519 * For chained counters the event type and filtering attributes are
520 * obtained from the low/even counter. We also use this counter to
521 * determine if the event is enabled/disabled.
523 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
525 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
526 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
527 data = __vcpu_sys_reg(vcpu, reg);
529 kvm_pmu_stop_counter(vcpu, pmc);
530 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
532 /* Software increment event does't need to be backed by a perf event */
533 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
534 pmc->idx != ARMV8_PMU_CYCLE_IDX)
537 memset(&attr, 0, sizeof(struct perf_event_attr));
538 attr.type = PERF_TYPE_RAW;
539 attr.size = sizeof(attr);
541 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
542 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
543 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
544 attr.exclude_hv = 1; /* Don't count EL2 events */
545 attr.exclude_host = 1; /* Don't count host events */
546 attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
547 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
549 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
551 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
553 * The initial sample period (overflow count) of an event. For
554 * chained counters we only support overflow interrupts on the
557 attr.sample_period = (-counter) & GENMASK(63, 0);
558 event = perf_event_create_kernel_counter(&attr, -1, current,
559 kvm_pmu_perf_overflow,
562 if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
563 attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
565 /* The initial sample period (overflow count) of an event. */
566 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
567 attr.sample_period = (-counter) & GENMASK(63, 0);
569 attr.sample_period = (-counter) & GENMASK(31, 0);
571 event = perf_event_create_kernel_counter(&attr, -1, current,
572 kvm_pmu_perf_overflow, pmc);
576 pr_err_once("kvm: pmu event creation failed %ld\n",
581 pmc->perf_event = event;
585 * kvm_pmu_update_pmc_chained - update chained bitmap
586 * @vcpu: The vcpu pointer
587 * @select_idx: The number of selected counter
589 * Update the chained bitmap based on the event type written in the
592 static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
594 struct kvm_pmu *pmu = &vcpu->arch.pmu;
595 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
597 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
599 * During promotion from !chained to chained we must ensure
600 * the adjacent counter is stopped and its event destroyed
602 if (!kvm_pmu_pmc_is_chained(pmc))
603 kvm_pmu_stop_counter(vcpu, pmc);
605 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
607 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
612 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
613 * @vcpu: The vcpu pointer
614 * @data: The data guest writes to PMXEVTYPER_EL0
615 * @select_idx: The number of selected counter
617 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
618 * event with given hardware event number. Here we call perf_event API to
619 * emulate this action and create a kernel perf event for it.
621 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
624 u64 reg, event_type = data & ARMV8_PMU_EVTYPE_MASK;
626 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
627 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
629 __vcpu_sys_reg(vcpu, reg) = event_type;
631 kvm_pmu_update_pmc_chained(vcpu, select_idx);
632 kvm_pmu_create_perf_event(vcpu, select_idx);
635 bool kvm_arm_support_pmu_v3(void)
638 * Check if HW_PERF_EVENTS are supported by checking the number of
639 * hardware performance counters. This could ensure the presence of
640 * a physical PMU and CONFIG_PERF_EVENT is selected.
642 return (perf_num_counters() > 0);
645 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
647 if (!vcpu->arch.pmu.created)
651 * A valid interrupt configuration for the PMU is either to have a
652 * properly configured interrupt number and using an in-kernel
653 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
655 if (irqchip_in_kernel(vcpu->kvm)) {
656 int irq = vcpu->arch.pmu.irq_num;
657 if (!kvm_arm_pmu_irq_initialized(vcpu))
661 * If we are using an in-kernel vgic, at this point we know
662 * the vgic will be initialized, so we can check the PMU irq
663 * number against the dimensions of the vgic and make sure
666 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
668 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
672 kvm_pmu_vcpu_reset(vcpu);
673 vcpu->arch.pmu.ready = true;
678 static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
680 if (!kvm_arm_support_pmu_v3())
683 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
686 if (vcpu->arch.pmu.created)
689 if (irqchip_in_kernel(vcpu->kvm)) {
693 * If using the PMU with an in-kernel virtual GIC
694 * implementation, we require the GIC to be already
695 * initialized when initializing the PMU.
697 if (!vgic_initialized(vcpu->kvm))
700 if (!kvm_arm_pmu_irq_initialized(vcpu))
703 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
709 vcpu->arch.pmu.created = true;
714 * For one VM the interrupt type must be same for each vcpu.
715 * As a PPI, the interrupt number is the same for all vcpus,
716 * while as an SPI it must be a separate number per vcpu.
718 static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
721 struct kvm_vcpu *vcpu;
723 kvm_for_each_vcpu(i, vcpu, kvm) {
724 if (!kvm_arm_pmu_irq_initialized(vcpu))
727 if (irq_is_ppi(irq)) {
728 if (vcpu->arch.pmu.irq_num != irq)
731 if (vcpu->arch.pmu.irq_num == irq)
739 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
741 switch (attr->attr) {
742 case KVM_ARM_VCPU_PMU_V3_IRQ: {
743 int __user *uaddr = (int __user *)(long)attr->addr;
746 if (!irqchip_in_kernel(vcpu->kvm))
749 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
752 if (get_user(irq, uaddr))
755 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
756 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
759 if (!pmu_irq_is_valid(vcpu->kvm, irq))
762 if (kvm_arm_pmu_irq_initialized(vcpu))
765 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
766 vcpu->arch.pmu.irq_num = irq;
769 case KVM_ARM_VCPU_PMU_V3_INIT:
770 return kvm_arm_pmu_v3_init(vcpu);
776 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
778 switch (attr->attr) {
779 case KVM_ARM_VCPU_PMU_V3_IRQ: {
780 int __user *uaddr = (int __user *)(long)attr->addr;
783 if (!irqchip_in_kernel(vcpu->kvm))
786 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
789 if (!kvm_arm_pmu_irq_initialized(vcpu))
792 irq = vcpu->arch.pmu.irq_num;
793 return put_user(irq, uaddr);
800 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
802 switch (attr->attr) {
803 case KVM_ARM_VCPU_PMU_V3_IRQ:
804 case KVM_ARM_VCPU_PMU_V3_INIT:
805 if (kvm_arm_support_pmu_v3() &&
806 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))