2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/guest.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/bits.h>
23 #include <linux/errno.h>
24 #include <linux/err.h>
25 #include <linux/nospec.h>
26 #include <linux/kernel.h>
27 #include <linux/kvm_host.h>
28 #include <linux/module.h>
29 #include <linux/stddef.h>
30 #include <linux/string.h>
31 #include <linux/vmalloc.h>
33 #include <kvm/arm_psci.h>
34 #include <asm/cputype.h>
35 #include <linux/uaccess.h>
36 #include <asm/fpsimd.h>
38 #include <asm/kvm_emulate.h>
39 #include <asm/kvm_coproc.h>
40 #include <asm/kvm_host.h>
41 #include <asm/sigcontext.h>
45 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
46 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
48 struct kvm_stats_debugfs_item debugfs_entries[] = {
49 VCPU_STAT(hvc_exit_stat),
50 VCPU_STAT(wfe_exit_stat),
51 VCPU_STAT(wfi_exit_stat),
52 VCPU_STAT(mmio_exit_user),
53 VCPU_STAT(mmio_exit_kernel),
58 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
63 static bool core_reg_offset_is_vreg(u64 off)
65 return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
66 off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
69 static u64 core_reg_offset_from_id(u64 id)
71 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
74 static int validate_core_offset(const struct kvm_vcpu *vcpu,
75 const struct kvm_one_reg *reg)
77 u64 off = core_reg_offset_from_id(reg->id);
81 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
82 KVM_REG_ARM_CORE_REG(regs.regs[30]):
83 case KVM_REG_ARM_CORE_REG(regs.sp):
84 case KVM_REG_ARM_CORE_REG(regs.pc):
85 case KVM_REG_ARM_CORE_REG(regs.pstate):
86 case KVM_REG_ARM_CORE_REG(sp_el1):
87 case KVM_REG_ARM_CORE_REG(elr_el1):
88 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
89 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
93 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
94 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
95 size = sizeof(__uint128_t);
98 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
99 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
100 size = sizeof(__u32);
107 if (KVM_REG_SIZE(reg->id) != size ||
108 !IS_ALIGNED(off, size / sizeof(__u32)))
112 * The KVM_REG_ARM64_SVE regs must be used instead of
113 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
116 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
122 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
125 * Because the kvm_regs structure is a mix of 32, 64 and
126 * 128bit fields, we index it as if it was a 32bit
127 * array. Hence below, nr_regs is the number of entries, and
128 * off the index in the "array".
130 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
131 struct kvm_regs *regs = vcpu_gp_regs(vcpu);
132 int nr_regs = sizeof(*regs) / sizeof(__u32);
135 /* Our ID is an index into the kvm_regs struct. */
136 off = core_reg_offset_from_id(reg->id);
137 if (off >= nr_regs ||
138 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
141 if (validate_core_offset(vcpu, reg))
144 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
150 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
152 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
153 struct kvm_regs *regs = vcpu_gp_regs(vcpu);
154 int nr_regs = sizeof(*regs) / sizeof(__u32);
160 /* Our ID is an index into the kvm_regs struct. */
161 off = core_reg_offset_from_id(reg->id);
162 if (off >= nr_regs ||
163 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
166 if (validate_core_offset(vcpu, reg))
169 if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
172 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
177 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
178 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
180 case PSR_AA32_MODE_USR:
181 if (!system_supports_32bit_el0())
184 case PSR_AA32_MODE_FIQ:
185 case PSR_AA32_MODE_IRQ:
186 case PSR_AA32_MODE_SVC:
187 case PSR_AA32_MODE_ABT:
188 case PSR_AA32_MODE_UND:
189 if (!vcpu_el1_is_32bit(vcpu))
195 if (vcpu_el1_is_32bit(vcpu))
204 memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
209 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
210 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
212 static bool vq_present(
213 const u64 (*const vqs)[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)],
216 return (*vqs)[vq_word(vq)] & vq_mask(vq);
219 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
221 unsigned int max_vq, vq;
222 u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
224 if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
227 memset(vqs, 0, sizeof(vqs));
229 max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
230 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
231 if (sve_vq_available(vq))
232 vqs[vq_word(vq)] |= vq_mask(vq);
234 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
240 static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
242 unsigned int max_vq, vq;
243 u64 vqs[DIV_ROUND_UP(SVE_VQ_MAX - SVE_VQ_MIN + 1, 64)];
245 if (kvm_arm_vcpu_sve_finalized(vcpu))
246 return -EPERM; /* too late! */
248 if (WARN_ON(vcpu->arch.sve_state))
251 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
255 for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
256 if (vq_present(&vqs, vq))
259 if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
262 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
263 if (vq_present(&vqs, vq) != sve_vq_available(vq))
266 /* Can't run with no vector lengths at all: */
267 if (max_vq < SVE_VQ_MIN)
270 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
271 vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
276 #define SVE_REG_SLICE_SHIFT 0
277 #define SVE_REG_SLICE_BITS 5
278 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
279 #define SVE_REG_ID_BITS 5
281 #define SVE_REG_SLICE_MASK \
282 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
284 #define SVE_REG_ID_MASK \
285 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
287 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
289 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
290 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
293 * number of register slices required to cover each whole SVE register on vcpu
294 * NOTE: If you are tempted to modify this, you must also to rework
295 * sve_reg_to_region() to match:
297 #define vcpu_sve_slices(vcpu) 1
299 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
300 struct sve_state_reg_region {
301 unsigned int koffset; /* offset into sve_state in kernel memory */
302 unsigned int klen; /* length in kernel memory */
303 unsigned int upad; /* extra trailing padding in user memory */
306 /* Get sanitised bounds for user/kernel SVE register copy */
307 static int sve_reg_to_region(struct sve_state_reg_region *region,
308 struct kvm_vcpu *vcpu,
309 const struct kvm_one_reg *reg)
311 /* reg ID ranges for Z- registers */
312 const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
313 const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
316 /* reg ID ranges for P- registers and FFR (which are contiguous) */
317 const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
318 const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
321 unsigned int reg_num;
323 unsigned int reqoffset, reqlen; /* User-requested offset and length */
324 unsigned int maxlen; /* Maxmimum permitted length */
326 size_t sve_state_size;
328 /* Only the first slice ever exists, for now: */
329 if ((reg->id & SVE_REG_SLICE_MASK) != 0)
332 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
334 reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
336 if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
337 reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
339 reqlen = KVM_SVE_ZREG_SIZE;
340 maxlen = SVE_SIG_ZREG_SIZE(vq);
341 } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
342 reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
344 reqlen = KVM_SVE_PREG_SIZE;
345 maxlen = SVE_SIG_PREG_SIZE(vq);
350 sve_state_size = vcpu_sve_state_size(vcpu);
354 region->koffset = array_index_nospec(reqoffset, sve_state_size);
355 region->klen = min(maxlen, reqlen);
356 region->upad = reqlen - region->klen;
361 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
363 struct sve_state_reg_region region;
364 char __user *uptr = (char __user *)reg->addr;
366 if (!vcpu_has_sve(vcpu))
369 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
370 if (reg->id == KVM_REG_ARM64_SVE_VLS)
371 return get_sve_vls(vcpu, reg);
373 /* Otherwise, reg is an architectural SVE register... */
375 if (!kvm_arm_vcpu_sve_finalized(vcpu))
378 if (sve_reg_to_region(®ion, vcpu, reg))
381 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
383 clear_user(uptr + region.klen, region.upad))
389 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
391 struct sve_state_reg_region region;
392 const char __user *uptr = (const char __user *)reg->addr;
394 if (!vcpu_has_sve(vcpu))
397 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
398 if (reg->id == KVM_REG_ARM64_SVE_VLS)
399 return set_sve_vls(vcpu, reg);
401 /* Otherwise, reg is an architectural SVE register... */
403 if (!kvm_arm_vcpu_sve_finalized(vcpu))
406 if (sve_reg_to_region(®ion, vcpu, reg))
409 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
416 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
421 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
426 static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
427 u64 __user *uindices)
431 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
433 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
435 * The KVM_REG_ARM64_SVE regs must be used instead of
436 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
439 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
443 if (put_user(core_reg | i, uindices))
454 static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
456 return copy_core_reg_indices(vcpu, NULL);
460 * ARM64 versions of the TIMER registers, always available on arm64
463 #define NUM_TIMER_REGS 3
465 static bool is_timer_reg(u64 index)
468 case KVM_REG_ARM_TIMER_CTL:
469 case KVM_REG_ARM_TIMER_CNT:
470 case KVM_REG_ARM_TIMER_CVAL:
476 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
478 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
481 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
484 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
490 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
492 void __user *uaddr = (void __user *)(long)reg->addr;
496 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
500 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
503 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
505 void __user *uaddr = (void __user *)(long)reg->addr;
508 val = kvm_arm_timer_get_reg(vcpu, reg->id);
509 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
512 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
514 /* Only the first slice ever exists, for now */
515 const unsigned int slices = vcpu_sve_slices(vcpu);
517 if (!vcpu_has_sve(vcpu))
520 /* Policed by KVM_GET_REG_LIST: */
521 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
523 return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
524 + 1; /* KVM_REG_ARM64_SVE_VLS */
527 static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
528 u64 __user *uindices)
530 /* Only the first slice ever exists, for now */
531 const unsigned int slices = vcpu_sve_slices(vcpu);
536 if (!vcpu_has_sve(vcpu))
539 /* Policed by KVM_GET_REG_LIST: */
540 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
543 * Enumerate this first, so that userspace can save/restore in
544 * the order reported by KVM_GET_REG_LIST:
546 reg = KVM_REG_ARM64_SVE_VLS;
547 if (put_user(reg, uindices++))
552 for (i = 0; i < slices; i++) {
553 for (n = 0; n < SVE_NUM_ZREGS; n++) {
554 reg = KVM_REG_ARM64_SVE_ZREG(n, i);
555 if (put_user(reg, uindices++))
561 for (n = 0; n < SVE_NUM_PREGS; n++) {
562 reg = KVM_REG_ARM64_SVE_PREG(n, i);
563 if (put_user(reg, uindices++))
569 reg = KVM_REG_ARM64_SVE_FFR(i);
570 if (put_user(reg, uindices++))
580 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
582 * This is for all registers.
584 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
586 unsigned long res = 0;
588 res += num_core_regs(vcpu);
589 res += num_sve_regs(vcpu);
590 res += kvm_arm_num_sys_reg_descs(vcpu);
591 res += kvm_arm_get_fw_num_regs(vcpu);
592 res += NUM_TIMER_REGS;
598 * kvm_arm_copy_reg_indices - get indices of all registers.
600 * We do core registers right here, then we append system regs.
602 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
606 ret = copy_core_reg_indices(vcpu, uindices);
611 ret = copy_sve_reg_indices(vcpu, uindices);
616 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
619 uindices += kvm_arm_get_fw_num_regs(vcpu);
621 ret = copy_timer_indices(vcpu, uindices);
624 uindices += NUM_TIMER_REGS;
626 return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
629 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
631 /* We currently use nothing arch-specific in upper 32 bits */
632 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
635 switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
636 case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
637 case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
638 case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
639 default: break; /* fall through */
642 if (is_timer_reg(reg->id))
643 return get_timer_reg(vcpu, reg);
645 return kvm_arm_sys_reg_get_reg(vcpu, reg);
648 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
650 /* We currently use nothing arch-specific in upper 32 bits */
651 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
654 switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
655 case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
656 case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
657 case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
658 default: break; /* fall through */
661 if (is_timer_reg(reg->id))
662 return set_timer_reg(vcpu, reg);
664 return kvm_arm_sys_reg_set_reg(vcpu, reg);
667 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
668 struct kvm_sregs *sregs)
673 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
674 struct kvm_sregs *sregs)
679 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
680 struct kvm_vcpu_events *events)
682 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
683 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
685 if (events->exception.serror_pending && events->exception.serror_has_esr)
686 events->exception.serror_esr = vcpu_get_vsesr(vcpu);
691 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
692 struct kvm_vcpu_events *events)
694 bool serror_pending = events->exception.serror_pending;
695 bool has_esr = events->exception.serror_has_esr;
697 if (serror_pending && has_esr) {
698 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
701 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
702 kvm_set_sei_esr(vcpu, events->exception.serror_esr);
705 } else if (serror_pending) {
706 kvm_inject_vabt(vcpu);
712 int __attribute_const__ kvm_target_cpu(void)
714 unsigned long implementor = read_cpuid_implementor();
715 unsigned long part_number = read_cpuid_part_number();
717 switch (implementor) {
718 case ARM_CPU_IMP_ARM:
719 switch (part_number) {
720 case ARM_CPU_PART_AEM_V8:
721 return KVM_ARM_TARGET_AEM_V8;
722 case ARM_CPU_PART_FOUNDATION:
723 return KVM_ARM_TARGET_FOUNDATION_V8;
724 case ARM_CPU_PART_CORTEX_A53:
725 return KVM_ARM_TARGET_CORTEX_A53;
726 case ARM_CPU_PART_CORTEX_A57:
727 return KVM_ARM_TARGET_CORTEX_A57;
730 case ARM_CPU_IMP_APM:
731 switch (part_number) {
732 case APM_CPU_PART_POTENZA:
733 return KVM_ARM_TARGET_XGENE_POTENZA;
738 /* Return a default generic target */
739 return KVM_ARM_TARGET_GENERIC_V8;
742 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
744 int target = kvm_target_cpu();
749 memset(init, 0, sizeof(*init));
752 * For now, we don't return any features.
753 * In future, we might use features to return target
754 * specific features available for the preferred
757 init->target = (__u32)target;
762 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
767 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
772 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
773 struct kvm_translation *tr)
778 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
779 KVM_GUESTDBG_USE_SW_BP | \
780 KVM_GUESTDBG_USE_HW | \
781 KVM_GUESTDBG_SINGLESTEP)
784 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
785 * @kvm: pointer to the KVM struct
786 * @kvm_guest_debug: the ioctl data buffer
788 * This sets up and enables the VM for guest debugging. Userspace
789 * passes in a control flag to enable different debug types and
790 * potentially other architecture specific information in the rest of
793 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
794 struct kvm_guest_debug *dbg)
798 trace_kvm_set_guest_debug(vcpu, dbg->control);
800 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
805 if (dbg->control & KVM_GUESTDBG_ENABLE) {
806 vcpu->guest_debug = dbg->control;
808 /* Hardware assisted Break and Watch points */
809 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
810 vcpu->arch.external_debug_state = dbg->arch;
814 /* If not enabled clear all flags */
815 vcpu->guest_debug = 0;
822 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
823 struct kvm_device_attr *attr)
827 switch (attr->group) {
828 case KVM_ARM_VCPU_PMU_V3_CTRL:
829 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
831 case KVM_ARM_VCPU_TIMER_CTRL:
832 ret = kvm_arm_timer_set_attr(vcpu, attr);
842 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
843 struct kvm_device_attr *attr)
847 switch (attr->group) {
848 case KVM_ARM_VCPU_PMU_V3_CTRL:
849 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
851 case KVM_ARM_VCPU_TIMER_CTRL:
852 ret = kvm_arm_timer_get_attr(vcpu, attr);
862 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
863 struct kvm_device_attr *attr)
867 switch (attr->group) {
868 case KVM_ARM_VCPU_PMU_V3_CTRL:
869 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
871 case KVM_ARM_VCPU_TIMER_CTRL:
872 ret = kvm_arm_timer_has_attr(vcpu, attr);