1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/frame.h>
4 #include <linux/percpu.h>
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
16 static bool __read_mostly enable_shadow_vmcs = 1;
17 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
19 static bool __read_mostly nested_early_check = 0;
20 module_param(nested_early_check, bool, S_IRUGO);
23 * Hyper-V requires all of these, so mark them as supported even though
24 * they are just treated the same as all-context.
26 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
27 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
28 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
29 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
30 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
32 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
39 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
41 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
42 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
44 static u16 shadow_read_only_fields[] = {
45 #define SHADOW_FIELD_RO(x) x,
46 #include "vmcs_shadow_fields.h"
48 static int max_shadow_read_only_fields =
49 ARRAY_SIZE(shadow_read_only_fields);
51 static u16 shadow_read_write_fields[] = {
52 #define SHADOW_FIELD_RW(x) x,
53 #include "vmcs_shadow_fields.h"
55 static int max_shadow_read_write_fields =
56 ARRAY_SIZE(shadow_read_write_fields);
58 static void init_vmcs_shadow_fields(void)
62 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
63 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
65 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
66 u16 field = shadow_read_only_fields[i];
68 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
69 (i + 1 == max_shadow_read_only_fields ||
70 shadow_read_only_fields[i + 1] != field + 1))
71 pr_err("Missing field from shadow_read_only_field %x\n",
74 clear_bit(field, vmx_vmread_bitmap);
80 shadow_read_only_fields[j] = field;
83 max_shadow_read_only_fields = j;
85 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
86 u16 field = shadow_read_write_fields[i];
88 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
89 (i + 1 == max_shadow_read_write_fields ||
90 shadow_read_write_fields[i + 1] != field + 1))
91 pr_err("Missing field from shadow_read_write_field %x\n",
95 * PML and the preemption timer can be emulated, but the
96 * processor cannot vmwrite to fields that don't exist
100 case GUEST_PML_INDEX:
101 if (!cpu_has_vmx_pml())
104 case VMX_PREEMPTION_TIMER_VALUE:
105 if (!cpu_has_vmx_preemption_timer())
108 case GUEST_INTR_STATUS:
109 if (!cpu_has_vmx_apicv())
116 clear_bit(field, vmx_vmwrite_bitmap);
117 clear_bit(field, vmx_vmread_bitmap);
123 shadow_read_write_fields[j] = field;
126 max_shadow_read_write_fields = j;
130 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
131 * set the success or error code of an emulated VMX instruction (as specified
132 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
135 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
137 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
138 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
139 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
140 return kvm_skip_emulated_instruction(vcpu);
143 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
145 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
146 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
147 X86_EFLAGS_SF | X86_EFLAGS_OF))
149 return kvm_skip_emulated_instruction(vcpu);
152 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
153 u32 vm_instruction_error)
155 struct vcpu_vmx *vmx = to_vmx(vcpu);
158 * failValid writes the error number to the current VMCS, which
159 * can't be done if there isn't a current VMCS.
161 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
162 return nested_vmx_failInvalid(vcpu);
164 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
165 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
166 X86_EFLAGS_SF | X86_EFLAGS_OF))
168 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
170 * We don't need to force a shadow sync because
171 * VM_INSTRUCTION_ERROR is not shadowed
173 return kvm_skip_emulated_instruction(vcpu);
176 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
178 /* TODO: not to reset guest simply here. */
179 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
180 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
183 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
185 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
186 vmcs_write64(VMCS_LINK_POINTER, -1ull);
189 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
191 struct vcpu_vmx *vmx = to_vmx(vcpu);
193 if (!vmx->nested.hv_evmcs)
196 kunmap(vmx->nested.hv_evmcs_page);
197 kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
198 vmx->nested.hv_evmcs_vmptr = -1ull;
199 vmx->nested.hv_evmcs_page = NULL;
200 vmx->nested.hv_evmcs = NULL;
204 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
205 * just stops using VMX.
207 static void free_nested(struct kvm_vcpu *vcpu)
209 struct vcpu_vmx *vmx = to_vmx(vcpu);
211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
214 vmx->nested.vmxon = false;
215 vmx->nested.smm.vmxon = false;
216 free_vpid(vmx->nested.vpid02);
217 vmx->nested.posted_intr_nv = -1;
218 vmx->nested.current_vmptr = -1ull;
219 if (enable_shadow_vmcs) {
220 vmx_disable_shadow_vmcs(vmx);
221 vmcs_clear(vmx->vmcs01.shadow_vmcs);
222 free_vmcs(vmx->vmcs01.shadow_vmcs);
223 vmx->vmcs01.shadow_vmcs = NULL;
225 kfree(vmx->nested.cached_vmcs12);
226 kfree(vmx->nested.cached_shadow_vmcs12);
227 /* Unpin physical memory we referred to in the vmcs02 */
228 if (vmx->nested.apic_access_page) {
229 kvm_release_page_dirty(vmx->nested.apic_access_page);
230 vmx->nested.apic_access_page = NULL;
232 if (vmx->nested.virtual_apic_page) {
233 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
234 vmx->nested.virtual_apic_page = NULL;
236 if (vmx->nested.pi_desc_page) {
237 kunmap(vmx->nested.pi_desc_page);
238 kvm_release_page_dirty(vmx->nested.pi_desc_page);
239 vmx->nested.pi_desc_page = NULL;
240 vmx->nested.pi_desc = NULL;
243 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
245 nested_release_evmcs(vcpu);
247 free_loaded_vmcs(&vmx->nested.vmcs02);
250 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
252 struct vcpu_vmx *vmx = to_vmx(vcpu);
255 if (vmx->loaded_vmcs == vmcs)
260 vmx->loaded_vmcs = vmcs;
261 vmx_vcpu_load(vcpu, cpu);
264 vm_entry_controls_reset_shadow(vmx);
265 vm_exit_controls_reset_shadow(vmx);
266 vmx_segment_cache_clear(vmx);
270 * Ensure that the current vmcs of the logical processor is the
271 * vmcs01 of the vcpu before calling free_nested().
273 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
276 vmx_leave_nested(vcpu);
277 vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
282 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
283 struct x86_exception *fault)
285 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
286 struct vcpu_vmx *vmx = to_vmx(vcpu);
288 unsigned long exit_qualification = vcpu->arch.exit_qualification;
290 if (vmx->nested.pml_full) {
291 exit_reason = EXIT_REASON_PML_FULL;
292 vmx->nested.pml_full = false;
293 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
294 } else if (fault->error_code & PFERR_RSVD_MASK)
295 exit_reason = EXIT_REASON_EPT_MISCONFIG;
297 exit_reason = EXIT_REASON_EPT_VIOLATION;
299 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
300 vmcs12->guest_physical_address = fault->address;
303 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
305 WARN_ON(mmu_is_nested(vcpu));
307 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
308 kvm_init_shadow_ept_mmu(vcpu,
309 to_vmx(vcpu)->nested.msrs.ept_caps &
310 VMX_EPT_EXECUTE_ONLY_BIT,
311 nested_ept_ad_enabled(vcpu),
312 nested_ept_get_cr3(vcpu));
313 vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
314 vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
315 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
316 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
318 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
321 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
323 vcpu->arch.mmu = &vcpu->arch.root_mmu;
324 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
327 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
330 bool inequality, bit;
332 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
334 (error_code & vmcs12->page_fault_error_code_mask) !=
335 vmcs12->page_fault_error_code_match;
336 return inequality ^ bit;
341 * KVM wants to inject page-faults which it got to the guest. This function
342 * checks whether in a nested guest, we need to inject them to L1 or L2.
344 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
346 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
347 unsigned int nr = vcpu->arch.exception.nr;
348 bool has_payload = vcpu->arch.exception.has_payload;
349 unsigned long payload = vcpu->arch.exception.payload;
351 if (nr == PF_VECTOR) {
352 if (vcpu->arch.exception.nested_apf) {
353 *exit_qual = vcpu->arch.apf.nested_apf_token;
356 if (nested_vmx_is_page_fault_vmexit(vmcs12,
357 vcpu->arch.exception.error_code)) {
358 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
361 } else if (vmcs12->exception_bitmap & (1u << nr)) {
362 if (nr == DB_VECTOR) {
364 payload = vcpu->arch.dr6;
365 payload &= ~(DR6_FIXED_1 | DR6_BT);
368 *exit_qual = payload;
378 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
379 struct x86_exception *fault)
381 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
383 WARN_ON(!is_guest_mode(vcpu));
385 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
386 !to_vmx(vcpu)->nested.nested_run_pending) {
387 vmcs12->vm_exit_intr_error_code = fault->error_code;
388 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
389 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
390 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
393 kvm_inject_page_fault(vcpu, fault);
397 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
399 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
402 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
403 struct vmcs12 *vmcs12)
405 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
408 if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
409 !page_address_valid(vcpu, vmcs12->io_bitmap_b))
415 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
416 struct vmcs12 *vmcs12)
418 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
421 if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
427 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
428 struct vmcs12 *vmcs12)
430 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
433 if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
440 * Check if MSR is intercepted for L01 MSR bitmap.
442 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
444 unsigned long *msr_bitmap;
445 int f = sizeof(unsigned long);
447 if (!cpu_has_vmx_msr_bitmap())
450 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
453 return !!test_bit(msr, msr_bitmap + 0x800 / f);
454 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
456 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
463 * If a msr is allowed by L0, we should check whether it is allowed by L1.
464 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
466 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
467 unsigned long *msr_bitmap_nested,
470 int f = sizeof(unsigned long);
473 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
474 * have the write-low and read-high bitmap offsets the wrong way round.
475 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
478 if (type & MSR_TYPE_R &&
479 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
481 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
483 if (type & MSR_TYPE_W &&
484 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
486 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
488 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
490 if (type & MSR_TYPE_R &&
491 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
493 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
495 if (type & MSR_TYPE_W &&
496 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
498 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
504 * Merge L0's and L1's MSR bitmap, return false to indicate that
505 * we do not use the hardware.
507 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
508 struct vmcs12 *vmcs12)
512 unsigned long *msr_bitmap_l1;
513 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
515 * pred_cmd & spec_ctrl are trying to verify two things:
517 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
518 * ensures that we do not accidentally generate an L02 MSR bitmap
519 * from the L12 MSR bitmap that is too permissive.
520 * 2. That L1 or L2s have actually used the MSR. This avoids
521 * unnecessarily merging of the bitmap if the MSR is unused. This
522 * works properly because we only update the L01 MSR bitmap lazily.
523 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
524 * updated to reflect this when L1 (or its L2s) actually write to
527 bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
528 bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
530 /* Nothing to do if the MSR bitmap is not in use. */
531 if (!cpu_has_vmx_msr_bitmap() ||
532 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
535 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
536 !pred_cmd && !spec_ctrl)
539 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
540 if (is_error_page(page))
543 msr_bitmap_l1 = (unsigned long *)kmap(page);
544 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
546 * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
547 * just lets the processor take the value from the virtual-APIC page;
548 * take those 256 bits directly from the L1 bitmap.
550 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
551 unsigned word = msr / BITS_PER_LONG;
552 msr_bitmap_l0[word] = msr_bitmap_l1[word];
553 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
556 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
557 unsigned word = msr / BITS_PER_LONG;
558 msr_bitmap_l0[word] = ~0;
559 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
563 nested_vmx_disable_intercept_for_msr(
564 msr_bitmap_l1, msr_bitmap_l0,
565 X2APIC_MSR(APIC_TASKPRI),
568 if (nested_cpu_has_vid(vmcs12)) {
569 nested_vmx_disable_intercept_for_msr(
570 msr_bitmap_l1, msr_bitmap_l0,
571 X2APIC_MSR(APIC_EOI),
573 nested_vmx_disable_intercept_for_msr(
574 msr_bitmap_l1, msr_bitmap_l0,
575 X2APIC_MSR(APIC_SELF_IPI),
580 nested_vmx_disable_intercept_for_msr(
581 msr_bitmap_l1, msr_bitmap_l0,
583 MSR_TYPE_R | MSR_TYPE_W);
586 nested_vmx_disable_intercept_for_msr(
587 msr_bitmap_l1, msr_bitmap_l0,
592 kvm_release_page_clean(page);
597 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
598 struct vmcs12 *vmcs12)
600 struct vmcs12 *shadow;
603 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
604 vmcs12->vmcs_link_pointer == -1ull)
607 shadow = get_shadow_vmcs12(vcpu);
608 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
610 memcpy(shadow, kmap(page), VMCS12_SIZE);
613 kvm_release_page_clean(page);
616 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
617 struct vmcs12 *vmcs12)
619 struct vcpu_vmx *vmx = to_vmx(vcpu);
621 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
622 vmcs12->vmcs_link_pointer == -1ull)
625 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
626 get_shadow_vmcs12(vcpu), VMCS12_SIZE);
630 * In nested virtualization, check if L1 has set
631 * VM_EXIT_ACK_INTR_ON_EXIT
633 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
635 return get_vmcs12(vcpu)->vm_exit_controls &
636 VM_EXIT_ACK_INTR_ON_EXIT;
639 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
641 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
644 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
645 struct vmcs12 *vmcs12)
647 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
648 !page_address_valid(vcpu, vmcs12->apic_access_addr))
654 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
655 struct vmcs12 *vmcs12)
657 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
658 !nested_cpu_has_apic_reg_virt(vmcs12) &&
659 !nested_cpu_has_vid(vmcs12) &&
660 !nested_cpu_has_posted_intr(vmcs12))
664 * If virtualize x2apic mode is enabled,
665 * virtualize apic access must be disabled.
667 if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
668 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
672 * If virtual interrupt delivery is enabled,
673 * we must exit on external interrupts.
675 if (nested_cpu_has_vid(vmcs12) &&
676 !nested_exit_on_intr(vcpu))
680 * bits 15:8 should be zero in posted_intr_nv,
681 * the descriptor address has been already checked
682 * in nested_get_vmcs12_pages.
684 * bits 5:0 of posted_intr_desc_addr should be zero.
686 if (nested_cpu_has_posted_intr(vmcs12) &&
687 (!nested_cpu_has_vid(vmcs12) ||
688 !nested_exit_intr_ack_set(vcpu) ||
689 (vmcs12->posted_intr_nv & 0xff00) ||
690 (vmcs12->posted_intr_desc_addr & 0x3f) ||
691 (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
694 /* tpr shadow is needed by all apicv features. */
695 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
701 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
708 maxphyaddr = cpuid_maxphyaddr(vcpu);
709 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
710 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
716 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
717 struct vmcs12 *vmcs12)
719 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
720 vmcs12->vm_exit_msr_load_addr) ||
721 nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
722 vmcs12->vm_exit_msr_store_addr))
728 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
729 struct vmcs12 *vmcs12)
731 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
732 vmcs12->vm_entry_msr_load_addr))
738 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
739 struct vmcs12 *vmcs12)
741 if (!nested_cpu_has_pml(vmcs12))
744 if (!nested_cpu_has_ept(vmcs12) ||
745 !page_address_valid(vcpu, vmcs12->pml_address))
751 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
752 struct vmcs12 *vmcs12)
754 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
755 !nested_cpu_has_ept(vmcs12))
760 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
761 struct vmcs12 *vmcs12)
763 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
764 !nested_cpu_has_ept(vmcs12))
769 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
770 struct vmcs12 *vmcs12)
772 if (!nested_cpu_has_shadow_vmcs(vmcs12))
775 if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
776 !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
782 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
783 struct vmx_msr_entry *e)
785 /* x2APIC MSR accesses are not allowed */
786 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
788 if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
789 e->index == MSR_IA32_UCODE_REV)
791 if (e->reserved != 0)
796 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
797 struct vmx_msr_entry *e)
799 if (e->index == MSR_FS_BASE ||
800 e->index == MSR_GS_BASE ||
801 e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
802 nested_vmx_msr_check_common(vcpu, e))
807 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
808 struct vmx_msr_entry *e)
810 if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
811 nested_vmx_msr_check_common(vcpu, e))
817 * Load guest's/host's msr at nested entry/exit.
818 * return 0 for success, entry index for failure.
820 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
823 struct vmx_msr_entry e;
826 msr.host_initiated = false;
827 for (i = 0; i < count; i++) {
828 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
830 pr_debug_ratelimited(
831 "%s cannot read MSR entry (%u, 0x%08llx)\n",
832 __func__, i, gpa + i * sizeof(e));
835 if (nested_vmx_load_msr_check(vcpu, &e)) {
836 pr_debug_ratelimited(
837 "%s check failed (%u, 0x%x, 0x%x)\n",
838 __func__, i, e.index, e.reserved);
843 if (kvm_set_msr(vcpu, &msr)) {
844 pr_debug_ratelimited(
845 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
846 __func__, i, e.index, e.value);
855 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
858 struct vmx_msr_entry e;
860 for (i = 0; i < count; i++) {
861 struct msr_data msr_info;
862 if (kvm_vcpu_read_guest(vcpu,
864 &e, 2 * sizeof(u32))) {
865 pr_debug_ratelimited(
866 "%s cannot read MSR entry (%u, 0x%08llx)\n",
867 __func__, i, gpa + i * sizeof(e));
870 if (nested_vmx_store_msr_check(vcpu, &e)) {
871 pr_debug_ratelimited(
872 "%s check failed (%u, 0x%x, 0x%x)\n",
873 __func__, i, e.index, e.reserved);
876 msr_info.host_initiated = false;
877 msr_info.index = e.index;
878 if (kvm_get_msr(vcpu, &msr_info)) {
879 pr_debug_ratelimited(
880 "%s cannot read MSR (%u, 0x%x)\n",
881 __func__, i, e.index);
884 if (kvm_vcpu_write_guest(vcpu,
885 gpa + i * sizeof(e) +
886 offsetof(struct vmx_msr_entry, value),
887 &msr_info.data, sizeof(msr_info.data))) {
888 pr_debug_ratelimited(
889 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
890 __func__, i, e.index, msr_info.data);
897 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
899 unsigned long invalid_mask;
901 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
902 return (val & invalid_mask) == 0;
906 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
907 * emulating VM entry into a guest with EPT enabled.
908 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
909 * is assigned to entry_failure_code on failure.
911 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
912 u32 *entry_failure_code)
914 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
915 if (!nested_cr3_valid(vcpu, cr3)) {
916 *entry_failure_code = ENTRY_FAIL_DEFAULT;
921 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
922 * must not be dereferenced.
924 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
926 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
927 *entry_failure_code = ENTRY_FAIL_PDPTE;
934 kvm_mmu_new_cr3(vcpu, cr3, false);
936 vcpu->arch.cr3 = cr3;
937 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
939 kvm_init_mmu(vcpu, false);
945 * Returns if KVM is able to config CPU to tag TLB entries
946 * populated by L2 differently than TLB entries populated
949 * If L1 uses EPT, then TLB entries are tagged with different EPTP.
951 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
952 * with different VPID (L1 entries are tagged with vmx->vpid
953 * while L2 entries are tagged with vmx->nested.vpid02).
955 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
957 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
959 return nested_cpu_has_ept(vmcs12) ||
960 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
963 static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
965 struct vcpu_vmx *vmx = to_vmx(vcpu);
967 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
971 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
973 return fixed_bits_valid(control, low, high);
976 static inline u64 vmx_control_msr(u32 low, u32 high)
978 return low | ((u64)high << 32);
981 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
986 return (superset | subset) == superset;
989 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
991 const u64 feature_and_reserved =
992 /* feature (except bit 48; see below) */
993 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
995 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
996 u64 vmx_basic = vmx->nested.msrs.basic;
998 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1002 * KVM does not emulate a version of VMX that constrains physical
1003 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1005 if (data & BIT_ULL(48))
1008 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1009 vmx_basic_vmcs_revision_id(data))
1012 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1015 vmx->nested.msrs.basic = data;
1020 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1025 switch (msr_index) {
1026 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1027 lowp = &vmx->nested.msrs.pinbased_ctls_low;
1028 highp = &vmx->nested.msrs.pinbased_ctls_high;
1030 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1031 lowp = &vmx->nested.msrs.procbased_ctls_low;
1032 highp = &vmx->nested.msrs.procbased_ctls_high;
1034 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1035 lowp = &vmx->nested.msrs.exit_ctls_low;
1036 highp = &vmx->nested.msrs.exit_ctls_high;
1038 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1039 lowp = &vmx->nested.msrs.entry_ctls_low;
1040 highp = &vmx->nested.msrs.entry_ctls_high;
1042 case MSR_IA32_VMX_PROCBASED_CTLS2:
1043 lowp = &vmx->nested.msrs.secondary_ctls_low;
1044 highp = &vmx->nested.msrs.secondary_ctls_high;
1050 supported = vmx_control_msr(*lowp, *highp);
1052 /* Check must-be-1 bits are still 1. */
1053 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1056 /* Check must-be-0 bits are still 0. */
1057 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1061 *highp = data >> 32;
1065 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1067 const u64 feature_and_reserved_bits =
1069 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1070 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1072 GENMASK_ULL(13, 9) | BIT_ULL(31);
1075 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
1076 vmx->nested.msrs.misc_high);
1078 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1081 if ((vmx->nested.msrs.pinbased_ctls_high &
1082 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1083 vmx_misc_preemption_timer_rate(data) !=
1084 vmx_misc_preemption_timer_rate(vmx_misc))
1087 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1090 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1093 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1096 vmx->nested.msrs.misc_low = data;
1097 vmx->nested.msrs.misc_high = data >> 32;
1100 * If L1 has read-only VM-exit information fields, use the
1101 * less permissive vmx_vmwrite_bitmap to specify write
1102 * permissions for the shadow VMCS.
1104 if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
1105 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
1110 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1112 u64 vmx_ept_vpid_cap;
1114 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
1115 vmx->nested.msrs.vpid_caps);
1117 /* Every bit is either reserved or a feature bit. */
1118 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1121 vmx->nested.msrs.ept_caps = data;
1122 vmx->nested.msrs.vpid_caps = data >> 32;
1126 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1130 switch (msr_index) {
1131 case MSR_IA32_VMX_CR0_FIXED0:
1132 msr = &vmx->nested.msrs.cr0_fixed0;
1134 case MSR_IA32_VMX_CR4_FIXED0:
1135 msr = &vmx->nested.msrs.cr4_fixed0;
1142 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1143 * must be 1 in the restored value.
1145 if (!is_bitwise_subset(data, *msr, -1ULL))
1153 * Called when userspace is restoring VMX MSRs.
1155 * Returns 0 on success, non-0 otherwise.
1157 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1159 struct vcpu_vmx *vmx = to_vmx(vcpu);
1162 * Don't allow changes to the VMX capability MSRs while the vCPU
1163 * is in VMX operation.
1165 if (vmx->nested.vmxon)
1168 switch (msr_index) {
1169 case MSR_IA32_VMX_BASIC:
1170 return vmx_restore_vmx_basic(vmx, data);
1171 case MSR_IA32_VMX_PINBASED_CTLS:
1172 case MSR_IA32_VMX_PROCBASED_CTLS:
1173 case MSR_IA32_VMX_EXIT_CTLS:
1174 case MSR_IA32_VMX_ENTRY_CTLS:
1176 * The "non-true" VMX capability MSRs are generated from the
1177 * "true" MSRs, so we do not support restoring them directly.
1179 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1180 * should restore the "true" MSRs with the must-be-1 bits
1181 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1182 * DEFAULT SETTINGS".
1185 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1186 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1187 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1188 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1189 case MSR_IA32_VMX_PROCBASED_CTLS2:
1190 return vmx_restore_control_msr(vmx, msr_index, data);
1191 case MSR_IA32_VMX_MISC:
1192 return vmx_restore_vmx_misc(vmx, data);
1193 case MSR_IA32_VMX_CR0_FIXED0:
1194 case MSR_IA32_VMX_CR4_FIXED0:
1195 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1196 case MSR_IA32_VMX_CR0_FIXED1:
1197 case MSR_IA32_VMX_CR4_FIXED1:
1199 * These MSRs are generated based on the vCPU's CPUID, so we
1200 * do not support restoring them directly.
1203 case MSR_IA32_VMX_EPT_VPID_CAP:
1204 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1205 case MSR_IA32_VMX_VMCS_ENUM:
1206 vmx->nested.msrs.vmcs_enum = data;
1210 * The rest of the VMX capability MSRs do not support restore.
1216 /* Returns 0 on success, non-0 otherwise. */
1217 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1219 switch (msr_index) {
1220 case MSR_IA32_VMX_BASIC:
1221 *pdata = msrs->basic;
1223 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1224 case MSR_IA32_VMX_PINBASED_CTLS:
1225 *pdata = vmx_control_msr(
1226 msrs->pinbased_ctls_low,
1227 msrs->pinbased_ctls_high);
1228 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1229 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1231 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1232 case MSR_IA32_VMX_PROCBASED_CTLS:
1233 *pdata = vmx_control_msr(
1234 msrs->procbased_ctls_low,
1235 msrs->procbased_ctls_high);
1236 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1237 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1239 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1240 case MSR_IA32_VMX_EXIT_CTLS:
1241 *pdata = vmx_control_msr(
1242 msrs->exit_ctls_low,
1243 msrs->exit_ctls_high);
1244 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1245 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1247 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1248 case MSR_IA32_VMX_ENTRY_CTLS:
1249 *pdata = vmx_control_msr(
1250 msrs->entry_ctls_low,
1251 msrs->entry_ctls_high);
1252 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1253 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1255 case MSR_IA32_VMX_MISC:
1256 *pdata = vmx_control_msr(
1260 case MSR_IA32_VMX_CR0_FIXED0:
1261 *pdata = msrs->cr0_fixed0;
1263 case MSR_IA32_VMX_CR0_FIXED1:
1264 *pdata = msrs->cr0_fixed1;
1266 case MSR_IA32_VMX_CR4_FIXED0:
1267 *pdata = msrs->cr4_fixed0;
1269 case MSR_IA32_VMX_CR4_FIXED1:
1270 *pdata = msrs->cr4_fixed1;
1272 case MSR_IA32_VMX_VMCS_ENUM:
1273 *pdata = msrs->vmcs_enum;
1275 case MSR_IA32_VMX_PROCBASED_CTLS2:
1276 *pdata = vmx_control_msr(
1277 msrs->secondary_ctls_low,
1278 msrs->secondary_ctls_high);
1280 case MSR_IA32_VMX_EPT_VPID_CAP:
1281 *pdata = msrs->ept_caps |
1282 ((u64)msrs->vpid_caps << 32);
1284 case MSR_IA32_VMX_VMFUNC:
1285 *pdata = msrs->vmfunc_controls;
1295 * Copy the writable VMCS shadow fields back to the VMCS12, in case
1296 * they have been modified by the L1 guest. Note that the "read-only"
1297 * VM-exit information fields are actually writable if the vCPU is
1298 * configured to support "VMWRITE to any supported field in the VMCS."
1300 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1302 const u16 *fields[] = {
1303 shadow_read_write_fields,
1304 shadow_read_only_fields
1306 const int max_fields[] = {
1307 max_shadow_read_write_fields,
1308 max_shadow_read_only_fields
1311 unsigned long field;
1313 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1317 vmcs_load(shadow_vmcs);
1319 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1320 for (i = 0; i < max_fields[q]; i++) {
1321 field = fields[q][i];
1322 field_value = __vmcs_readl(field);
1323 vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value);
1326 * Skip the VM-exit information fields if they are read-only.
1328 if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
1332 vmcs_clear(shadow_vmcs);
1333 vmcs_load(vmx->loaded_vmcs->vmcs);
1338 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1340 const u16 *fields[] = {
1341 shadow_read_write_fields,
1342 shadow_read_only_fields
1344 const int max_fields[] = {
1345 max_shadow_read_write_fields,
1346 max_shadow_read_only_fields
1349 unsigned long field;
1350 u64 field_value = 0;
1351 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1353 vmcs_load(shadow_vmcs);
1355 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1356 for (i = 0; i < max_fields[q]; i++) {
1357 field = fields[q][i];
1358 vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value);
1359 __vmcs_writel(field, field_value);
1363 vmcs_clear(shadow_vmcs);
1364 vmcs_load(vmx->loaded_vmcs->vmcs);
1367 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
1369 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1370 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1372 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1373 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1374 vmcs12->guest_rip = evmcs->guest_rip;
1376 if (unlikely(!(evmcs->hv_clean_fields &
1377 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1378 vmcs12->guest_rsp = evmcs->guest_rsp;
1379 vmcs12->guest_rflags = evmcs->guest_rflags;
1380 vmcs12->guest_interruptibility_info =
1381 evmcs->guest_interruptibility_info;
1384 if (unlikely(!(evmcs->hv_clean_fields &
1385 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1386 vmcs12->cpu_based_vm_exec_control =
1387 evmcs->cpu_based_vm_exec_control;
1390 if (unlikely(!(evmcs->hv_clean_fields &
1391 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1392 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1395 if (unlikely(!(evmcs->hv_clean_fields &
1396 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1397 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1400 if (unlikely(!(evmcs->hv_clean_fields &
1401 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1402 vmcs12->vm_entry_intr_info_field =
1403 evmcs->vm_entry_intr_info_field;
1404 vmcs12->vm_entry_exception_error_code =
1405 evmcs->vm_entry_exception_error_code;
1406 vmcs12->vm_entry_instruction_len =
1407 evmcs->vm_entry_instruction_len;
1410 if (unlikely(!(evmcs->hv_clean_fields &
1411 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1412 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1413 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1414 vmcs12->host_cr0 = evmcs->host_cr0;
1415 vmcs12->host_cr3 = evmcs->host_cr3;
1416 vmcs12->host_cr4 = evmcs->host_cr4;
1417 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1418 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1419 vmcs12->host_rip = evmcs->host_rip;
1420 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1421 vmcs12->host_es_selector = evmcs->host_es_selector;
1422 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1423 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1424 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1425 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1426 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1427 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1430 if (unlikely(!(evmcs->hv_clean_fields &
1431 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1432 vmcs12->pin_based_vm_exec_control =
1433 evmcs->pin_based_vm_exec_control;
1434 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1435 vmcs12->secondary_vm_exec_control =
1436 evmcs->secondary_vm_exec_control;
1439 if (unlikely(!(evmcs->hv_clean_fields &
1440 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1441 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1442 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1445 if (unlikely(!(evmcs->hv_clean_fields &
1446 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1447 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1450 if (unlikely(!(evmcs->hv_clean_fields &
1451 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1452 vmcs12->guest_es_base = evmcs->guest_es_base;
1453 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1454 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1455 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1456 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1457 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1458 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1459 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1460 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1461 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1462 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1463 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1464 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1465 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1466 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1467 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1468 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1469 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1470 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1471 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1472 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1473 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1474 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1475 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1476 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1477 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1478 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1479 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1480 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1481 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1482 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1483 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1484 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1485 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1486 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1487 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1490 if (unlikely(!(evmcs->hv_clean_fields &
1491 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1492 vmcs12->tsc_offset = evmcs->tsc_offset;
1493 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1494 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1497 if (unlikely(!(evmcs->hv_clean_fields &
1498 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1499 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1500 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1501 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1502 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1503 vmcs12->guest_cr0 = evmcs->guest_cr0;
1504 vmcs12->guest_cr3 = evmcs->guest_cr3;
1505 vmcs12->guest_cr4 = evmcs->guest_cr4;
1506 vmcs12->guest_dr7 = evmcs->guest_dr7;
1509 if (unlikely(!(evmcs->hv_clean_fields &
1510 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1511 vmcs12->host_fs_base = evmcs->host_fs_base;
1512 vmcs12->host_gs_base = evmcs->host_gs_base;
1513 vmcs12->host_tr_base = evmcs->host_tr_base;
1514 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1515 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1516 vmcs12->host_rsp = evmcs->host_rsp;
1519 if (unlikely(!(evmcs->hv_clean_fields &
1520 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1521 vmcs12->ept_pointer = evmcs->ept_pointer;
1522 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1525 if (unlikely(!(evmcs->hv_clean_fields &
1526 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1527 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1528 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1529 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1530 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1531 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1532 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1533 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1534 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1535 vmcs12->guest_pending_dbg_exceptions =
1536 evmcs->guest_pending_dbg_exceptions;
1537 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1538 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1539 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1540 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1541 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1546 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1547 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1548 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1549 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
1550 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
1551 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
1552 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
1553 * vmcs12->page_fault_error_code_mask =
1554 * evmcs->page_fault_error_code_mask;
1555 * vmcs12->page_fault_error_code_match =
1556 * evmcs->page_fault_error_code_match;
1557 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1558 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1559 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1560 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1565 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1566 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1567 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1568 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1569 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1570 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1571 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1572 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1573 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1574 * vmcs12->exit_qualification = evmcs->exit_qualification;
1575 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1577 * Not present in struct vmcs12:
1578 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1579 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1580 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1581 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1587 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1589 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1590 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1593 * Should not be changed by KVM:
1595 * evmcs->host_es_selector = vmcs12->host_es_selector;
1596 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1597 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1598 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1599 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1600 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1601 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1602 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1603 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1604 * evmcs->host_cr0 = vmcs12->host_cr0;
1605 * evmcs->host_cr3 = vmcs12->host_cr3;
1606 * evmcs->host_cr4 = vmcs12->host_cr4;
1607 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1608 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1609 * evmcs->host_rip = vmcs12->host_rip;
1610 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1611 * evmcs->host_fs_base = vmcs12->host_fs_base;
1612 * evmcs->host_gs_base = vmcs12->host_gs_base;
1613 * evmcs->host_tr_base = vmcs12->host_tr_base;
1614 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1615 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1616 * evmcs->host_rsp = vmcs12->host_rsp;
1617 * sync_vmcs12() doesn't read these:
1618 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1619 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1620 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1621 * evmcs->ept_pointer = vmcs12->ept_pointer;
1622 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1623 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1624 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1625 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1626 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
1627 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
1628 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
1629 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
1630 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1631 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1632 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1633 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1634 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1635 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1636 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1637 * evmcs->page_fault_error_code_mask =
1638 * vmcs12->page_fault_error_code_mask;
1639 * evmcs->page_fault_error_code_match =
1640 * vmcs12->page_fault_error_code_match;
1641 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1642 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1643 * evmcs->tsc_offset = vmcs12->tsc_offset;
1644 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1645 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1646 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1647 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1648 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1649 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1650 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1651 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1653 * Not present in struct vmcs12:
1654 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1655 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1656 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1657 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1660 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1661 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1662 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1663 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1664 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1665 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1666 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1667 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1669 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1670 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1671 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1672 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1673 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1674 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1675 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1676 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1677 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1678 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1680 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1681 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1682 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1683 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1684 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1685 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1686 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1687 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1689 evmcs->guest_es_base = vmcs12->guest_es_base;
1690 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1691 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1692 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1693 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1694 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1695 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1696 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1697 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1698 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1700 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1701 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1703 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1704 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1705 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1706 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1708 evmcs->guest_pending_dbg_exceptions =
1709 vmcs12->guest_pending_dbg_exceptions;
1710 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1711 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1713 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1714 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1716 evmcs->guest_cr0 = vmcs12->guest_cr0;
1717 evmcs->guest_cr3 = vmcs12->guest_cr3;
1718 evmcs->guest_cr4 = vmcs12->guest_cr4;
1719 evmcs->guest_dr7 = vmcs12->guest_dr7;
1721 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1723 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1724 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1725 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1726 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1727 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1728 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1729 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1730 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1732 evmcs->exit_qualification = vmcs12->exit_qualification;
1734 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1735 evmcs->guest_rsp = vmcs12->guest_rsp;
1736 evmcs->guest_rflags = vmcs12->guest_rflags;
1738 evmcs->guest_interruptibility_info =
1739 vmcs12->guest_interruptibility_info;
1740 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1741 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1742 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1743 evmcs->vm_entry_exception_error_code =
1744 vmcs12->vm_entry_exception_error_code;
1745 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1747 evmcs->guest_rip = vmcs12->guest_rip;
1749 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1755 * This is an equivalent of the nested hypervisor executing the vmptrld
1758 static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
1761 struct vcpu_vmx *vmx = to_vmx(vcpu);
1762 struct hv_vp_assist_page assist_page;
1764 if (likely(!vmx->nested.enlightened_vmcs_enabled))
1767 if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
1770 if (unlikely(!assist_page.enlighten_vmentry))
1773 if (unlikely(assist_page.current_nested_vmcs !=
1774 vmx->nested.hv_evmcs_vmptr)) {
1776 if (!vmx->nested.hv_evmcs)
1777 vmx->nested.current_vmptr = -1ull;
1779 nested_release_evmcs(vcpu);
1781 vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page(
1782 vcpu, assist_page.current_nested_vmcs);
1784 if (unlikely(is_error_page(vmx->nested.hv_evmcs_page)))
1787 vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
1790 * Currently, KVM only supports eVMCS version 1
1791 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1792 * value to first u32 field of eVMCS which should specify eVMCS
1795 * Guest should be aware of supported eVMCS versions by host by
1796 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
1797 * expected to set this CPUID leaf according to the value
1798 * returned in vmcs_version from nested_enable_evmcs().
1800 * However, it turns out that Microsoft Hyper-V fails to comply
1801 * to their own invented interface: When Hyper-V use eVMCS, it
1802 * just sets first u32 field of eVMCS to revision_id specified
1803 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
1804 * which is one of the supported versions specified in
1805 * CPUID.0x4000000A.EAX[0:15].
1807 * To overcome Hyper-V bug, we accept here either a supported
1808 * eVMCS version or VMCS12 revision_id as valid values for first
1809 * u32 field of eVMCS.
1811 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
1812 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
1813 nested_release_evmcs(vcpu);
1817 vmx->nested.dirty_vmcs12 = true;
1819 * As we keep L2 state for one guest only 'hv_clean_fields' mask
1820 * can't be used when we switch between them. Reset it here for
1823 vmx->nested.hv_evmcs->hv_clean_fields &=
1824 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1825 vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs;
1828 * Unlike normal vmcs12, enlightened vmcs12 is not fully
1829 * reloaded from guest's memory (read only fields, fields not
1830 * present in struct hv_enlightened_vmcs, ...). Make sure there
1834 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1835 memset(vmcs12, 0, sizeof(*vmcs12));
1836 vmcs12->hdr.revision_id = VMCS12_REVISION;
1843 void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu)
1845 struct vcpu_vmx *vmx = to_vmx(vcpu);
1848 * hv_evmcs may end up being not mapped after migration (when
1849 * L2 was running), map it here to make sure vmcs12 changes are
1850 * properly reflected.
1852 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
1853 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
1855 if (vmx->nested.hv_evmcs) {
1856 copy_vmcs12_to_enlightened(vmx);
1857 /* All fields are clean */
1858 vmx->nested.hv_evmcs->hv_clean_fields |=
1859 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1861 copy_vmcs12_to_shadow(vmx);
1864 vmx->nested.need_vmcs12_sync = false;
1867 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
1869 struct vcpu_vmx *vmx =
1870 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
1872 vmx->nested.preemption_timer_expired = true;
1873 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
1874 kvm_vcpu_kick(&vmx->vcpu);
1876 return HRTIMER_NORESTART;
1879 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
1881 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
1882 struct vcpu_vmx *vmx = to_vmx(vcpu);
1885 * A timer value of zero is architecturally guaranteed to cause
1886 * a VMExit prior to executing any instructions in the guest.
1888 if (preemption_timeout == 0) {
1889 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
1893 if (vcpu->arch.virtual_tsc_khz == 0)
1896 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
1897 preemption_timeout *= 1000000;
1898 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
1899 hrtimer_start(&vmx->nested.preemption_timer,
1900 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
1903 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1905 if (vmx->nested.nested_run_pending &&
1906 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
1907 return vmcs12->guest_ia32_efer;
1908 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
1909 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
1911 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
1914 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
1917 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
1918 * according to L0's settings (vmcs12 is irrelevant here). Host
1919 * fields that come from L0 and are not constant, e.g. HOST_CR3,
1920 * will be set as needed prior to VMLAUNCH/VMRESUME.
1922 if (vmx->nested.vmcs02_initialized)
1924 vmx->nested.vmcs02_initialized = true;
1927 * We don't care what the EPTP value is we just need to guarantee
1928 * it's valid so we don't get a false positive when doing early
1929 * consistency checks.
1931 if (enable_ept && nested_early_check)
1932 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
1934 /* All VMFUNCs are currently emulated through L0 vmexits. */
1935 if (cpu_has_vmx_vmfunc())
1936 vmcs_write64(VM_FUNCTION_CONTROL, 0);
1938 if (cpu_has_vmx_posted_intr())
1939 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
1941 if (cpu_has_vmx_msr_bitmap())
1942 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
1945 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
1948 * Set the MSR load/store lists to match L0's settings. Only the
1949 * addresses are constant (for vmcs02), the counts can change based
1950 * on L2's behavior, e.g. switching to/from long mode.
1952 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1953 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
1954 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
1956 vmx_set_constant_host_state(vmx);
1959 static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx,
1960 struct vmcs12 *vmcs12)
1962 prepare_vmcs02_constant_state(vmx);
1964 vmcs_write64(VMCS_LINK_POINTER, -1ull);
1967 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
1968 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
1970 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
1974 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1976 u32 exec_control, vmcs12_exec_ctrl;
1977 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
1979 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
1980 prepare_vmcs02_early_full(vmx, vmcs12);
1985 exec_control = vmcs12->pin_based_vm_exec_control;
1987 /* Preemption timer setting is computed directly in vmx_vcpu_run. */
1988 exec_control |= vmcs_config.pin_based_exec_ctrl;
1989 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
1990 vmx->loaded_vmcs->hv_timer_armed = false;
1992 /* Posted interrupts setting is only taken from vmcs12. */
1993 if (nested_cpu_has_posted_intr(vmcs12)) {
1994 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
1995 vmx->nested.pi_pending = false;
1997 exec_control &= ~PIN_BASED_POSTED_INTR;
1999 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
2004 exec_control = vmx_exec_control(vmx); /* L0's desires */
2005 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2006 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
2007 exec_control &= ~CPU_BASED_TPR_SHADOW;
2008 exec_control |= vmcs12->cpu_based_vm_exec_control;
2011 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
2012 * nested_get_vmcs12_pages can't fix it up, the illegal value
2013 * will result in a VM entry failure.
2015 if (exec_control & CPU_BASED_TPR_SHADOW) {
2016 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
2017 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2019 #ifdef CONFIG_X86_64
2020 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2021 CPU_BASED_CR8_STORE_EXITING;
2026 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2027 * for I/O port accesses.
2029 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2030 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2031 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
2034 * SECONDARY EXEC CONTROLS
2036 if (cpu_has_secondary_exec_ctrls()) {
2037 exec_control = vmx->secondary_exec_control;
2039 /* Take the following fields only from vmcs12 */
2040 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2041 SECONDARY_EXEC_ENABLE_INVPCID |
2042 SECONDARY_EXEC_RDTSCP |
2043 SECONDARY_EXEC_XSAVES |
2044 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2045 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2046 SECONDARY_EXEC_ENABLE_VMFUNC);
2047 if (nested_cpu_has(vmcs12,
2048 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
2049 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
2050 ~SECONDARY_EXEC_ENABLE_PML;
2051 exec_control |= vmcs12_exec_ctrl;
2054 /* VMCS shadowing for L2 is emulated for now */
2055 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2057 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2058 vmcs_write16(GUEST_INTR_STATUS,
2059 vmcs12->guest_intr_status);
2062 * Write an illegal value to APIC_ACCESS_ADDR. Later,
2063 * nested_get_vmcs12_pages will either fix it up or
2064 * remove the VM execution control.
2066 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
2067 vmcs_write64(APIC_ACCESS_ADDR, -1ull);
2069 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
2070 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
2072 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2078 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2079 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2080 * on the related bits (if supported by the CPU) in the hope that
2081 * we can avoid VMWrites during vmx_set_efer().
2083 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
2084 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
2085 if (cpu_has_load_ia32_efer()) {
2086 if (guest_efer & EFER_LMA)
2087 exec_control |= VM_ENTRY_IA32E_MODE;
2088 if (guest_efer != host_efer)
2089 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2091 vm_entry_controls_init(vmx, exec_control);
2096 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2097 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2098 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2100 exec_control = vmx_vmexit_ctrl();
2101 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2102 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2103 vm_exit_controls_init(vmx, exec_control);
2106 * Conceptually we want to copy the PML address and index from
2107 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
2108 * since we always flush the log on each vmexit and never change
2109 * the PML address (once set), this happens to be equivalent to
2110 * simply resetting the index in vmcs02.
2113 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
2116 * Interrupt/Exception Fields
2118 if (vmx->nested.nested_run_pending) {
2119 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2120 vmcs12->vm_entry_intr_info_field);
2121 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2122 vmcs12->vm_entry_exception_error_code);
2123 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2124 vmcs12->vm_entry_instruction_len);
2125 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2126 vmcs12->guest_interruptibility_info);
2127 vmx->loaded_vmcs->nmi_known_unmasked =
2128 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2130 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2134 static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2136 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2138 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2139 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2140 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2141 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2142 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2143 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2144 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2145 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2146 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2147 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2148 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2149 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2150 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2151 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2152 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2153 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2154 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2155 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2156 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2157 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2158 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2159 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2160 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2161 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2162 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2163 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2164 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2165 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2166 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2167 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2168 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2169 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2170 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2171 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2172 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2173 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2176 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2177 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2178 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2179 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2180 vmcs12->guest_pending_dbg_exceptions);
2181 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2182 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2185 * L1 may access the L2's PDPTR, so save them to construct
2189 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2190 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2191 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2192 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2196 if (nested_cpu_has_xsaves(vmcs12))
2197 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2200 * Whether page-faults are trapped is determined by a combination of
2201 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
2202 * If enable_ept, L0 doesn't care about page faults and we should
2203 * set all of these to L1's desires. However, if !enable_ept, L0 does
2204 * care about (at least some) page faults, and because it is not easy
2205 * (if at all possible?) to merge L0 and L1's desires, we simply ask
2206 * to exit on each and every L2 page fault. This is done by setting
2207 * MASK=MATCH=0 and (see below) EB.PF=1.
2208 * Note that below we don't need special code to set EB.PF beyond the
2209 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2210 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2211 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2213 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
2214 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
2215 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
2216 enable_ept ? vmcs12->page_fault_error_code_match : 0);
2218 if (cpu_has_vmx_apicv()) {
2219 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2220 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2221 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2222 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2225 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2226 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2228 set_cr4_guest_host_mask(vmx);
2230 if (kvm_mpx_supported()) {
2231 if (vmx->nested.nested_run_pending &&
2232 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2233 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2235 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
2240 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2241 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2242 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2243 * guest in a way that will both be appropriate to L1's requests, and our
2244 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2245 * function also has additional necessary side-effects, like setting various
2246 * vcpu->arch fields.
2247 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2248 * is assigned to entry_failure_code on failure.
2250 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2251 u32 *entry_failure_code)
2253 struct vcpu_vmx *vmx = to_vmx(vcpu);
2254 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2256 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) {
2257 prepare_vmcs02_full(vmx, vmcs12);
2258 vmx->nested.dirty_vmcs12 = false;
2262 * First, the fields that are shadowed. This must be kept in sync
2263 * with vmcs_shadow_fields.h.
2265 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2266 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2267 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2268 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2271 if (vmx->nested.nested_run_pending &&
2272 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2273 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2274 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2276 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2277 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2279 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2281 vmx->nested.preemption_timer_expired = false;
2282 if (nested_cpu_has_preemption_timer(vmcs12))
2283 vmx_start_preemption_timer(vcpu);
2285 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2286 * bitwise-or of what L1 wants to trap for L2, and what we want to
2287 * trap. Note that CR0.TS also needs updating - we do this later.
2289 update_exception_bitmap(vcpu);
2290 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2291 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2293 if (vmx->nested.nested_run_pending &&
2294 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2295 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2296 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2297 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2298 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2301 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2303 if (kvm_has_tsc_control)
2304 decache_tsc_multiplier(vmx);
2308 * There is no direct mapping between vpid02 and vpid12, the
2309 * vpid02 is per-vCPU for L0 and reused while the value of
2310 * vpid12 is changed w/ one invvpid during nested vmentry.
2311 * The vpid12 is allocated by L1 for L2, so it will not
2312 * influence global bitmap(for vpid01 and vpid02 allocation)
2313 * even if spawn a lot of nested vCPUs.
2315 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
2316 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
2317 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
2318 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
2322 * If L1 use EPT, then L0 needs to execute INVEPT on
2323 * EPTP02 instead of EPTP01. Therefore, delay TLB
2324 * flush until vmcs02->eptp is fully updated by
2325 * KVM_REQ_LOAD_CR3. Note that this assumes
2326 * KVM_REQ_TLB_FLUSH is evaluated after
2327 * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
2329 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2333 if (nested_cpu_has_ept(vmcs12))
2334 nested_ept_init_mmu_context(vcpu);
2335 else if (nested_cpu_has2(vmcs12,
2336 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2337 vmx_flush_tlb(vcpu, true);
2340 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2341 * bits which we consider mandatory enabled.
2342 * The CR0_READ_SHADOW is what L2 should have expected to read given
2343 * the specifications by L1; It's not enough to take
2344 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2345 * have more bits than L1 expected.
2347 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2348 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2350 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2351 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2353 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2354 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2355 vmx_set_efer(vcpu, vcpu->arch.efer);
2358 * Guest state is invalid and unrestricted guest is disabled,
2359 * which means L1 attempted VMEntry to L2 with invalid state.
2362 if (vmx->emulation_required) {
2363 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2367 /* Shadow page tables on either EPT or shadow page tables. */
2368 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2369 entry_failure_code))
2373 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2375 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
2376 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
2380 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2382 if (!nested_cpu_has_nmi_exiting(vmcs12) &&
2383 nested_cpu_has_virtual_nmis(vmcs12))
2386 if (!nested_cpu_has_virtual_nmis(vmcs12) &&
2387 nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
2393 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
2395 struct vcpu_vmx *vmx = to_vmx(vcpu);
2396 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2398 /* Check for memory type validity */
2399 switch (address & VMX_EPTP_MT_MASK) {
2400 case VMX_EPTP_MT_UC:
2401 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
2404 case VMX_EPTP_MT_WB:
2405 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
2412 /* only 4 levels page-walk length are valid */
2413 if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
2416 /* Reserved bits should not be set */
2417 if (address >> maxphyaddr || ((address >> 7) & 0x1f))
2420 /* AD, if set, should be supported */
2421 if (address & VMX_EPTP_AD_ENABLE_BIT) {
2422 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
2430 * Checks related to VM-Execution Control Fields
2432 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2433 struct vmcs12 *vmcs12)
2435 struct vcpu_vmx *vmx = to_vmx(vcpu);
2437 if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2438 vmx->nested.msrs.pinbased_ctls_low,
2439 vmx->nested.msrs.pinbased_ctls_high) ||
2440 !vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2441 vmx->nested.msrs.procbased_ctls_low,
2442 vmx->nested.msrs.procbased_ctls_high))
2445 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2446 !vmx_control_verify(vmcs12->secondary_vm_exec_control,
2447 vmx->nested.msrs.secondary_ctls_low,
2448 vmx->nested.msrs.secondary_ctls_high))
2451 if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) ||
2452 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2453 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2454 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2455 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2456 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2457 nested_vmx_check_nmi_controls(vmcs12) ||
2458 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2459 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2460 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2461 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2462 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2465 if (nested_cpu_has_ept(vmcs12) &&
2466 !valid_ept_address(vcpu, vmcs12->ept_pointer))
2469 if (nested_cpu_has_vmfunc(vmcs12)) {
2470 if (vmcs12->vm_function_control &
2471 ~vmx->nested.msrs.vmfunc_controls)
2474 if (nested_cpu_has_eptp_switching(vmcs12)) {
2475 if (!nested_cpu_has_ept(vmcs12) ||
2476 !page_address_valid(vcpu, vmcs12->eptp_list_address))
2485 * Checks related to VM-Exit Control Fields
2487 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2488 struct vmcs12 *vmcs12)
2490 struct vcpu_vmx *vmx = to_vmx(vcpu);
2492 if (!vmx_control_verify(vmcs12->vm_exit_controls,
2493 vmx->nested.msrs.exit_ctls_low,
2494 vmx->nested.msrs.exit_ctls_high) ||
2495 nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
2502 * Checks related to VM-Entry Control Fields
2504 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2505 struct vmcs12 *vmcs12)
2507 struct vcpu_vmx *vmx = to_vmx(vcpu);
2509 if (!vmx_control_verify(vmcs12->vm_entry_controls,
2510 vmx->nested.msrs.entry_ctls_low,
2511 vmx->nested.msrs.entry_ctls_high))
2515 * From the Intel SDM, volume 3:
2516 * Fields relevant to VM-entry event injection must be set properly.
2517 * These fields are the VM-entry interruption-information field, the
2518 * VM-entry exception error code, and the VM-entry instruction length.
2520 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2521 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2522 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2523 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2524 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2525 bool should_have_error_code;
2526 bool urg = nested_cpu_has2(vmcs12,
2527 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2528 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2530 /* VM-entry interruption-info field: interruption type */
2531 if (intr_type == INTR_TYPE_RESERVED ||
2532 (intr_type == INTR_TYPE_OTHER_EVENT &&
2533 !nested_cpu_supports_monitor_trap_flag(vcpu)))
2536 /* VM-entry interruption-info field: vector */
2537 if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2538 (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2539 (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2542 /* VM-entry interruption-info field: deliver error code */
2543 should_have_error_code =
2544 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2545 x86_exception_has_error_code(vector);
2546 if (has_error_code != should_have_error_code)
2549 /* VM-entry exception error code */
2550 if (has_error_code &&
2551 vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
2554 /* VM-entry interruption-info field: reserved bits */
2555 if (intr_info & INTR_INFO_RESVD_BITS_MASK)
2558 /* VM-entry instruction length */
2559 switch (intr_type) {
2560 case INTR_TYPE_SOFT_EXCEPTION:
2561 case INTR_TYPE_SOFT_INTR:
2562 case INTR_TYPE_PRIV_SW_EXCEPTION:
2563 if ((vmcs12->vm_entry_instruction_len > 15) ||
2564 (vmcs12->vm_entry_instruction_len == 0 &&
2565 !nested_cpu_has_zero_length_injection(vcpu)))
2570 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2577 * Checks related to Host Control Registers and MSRs
2579 static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
2580 struct vmcs12 *vmcs12)
2584 if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
2585 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
2586 !nested_cr3_valid(vcpu, vmcs12->host_cr3))
2589 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2590 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2591 * the values of the LMA and LME bits in the field must each be that of
2592 * the host address-space size VM-exit control.
2594 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2595 ia32e = (vmcs12->vm_exit_controls &
2596 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
2597 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
2598 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
2599 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
2607 * Checks related to Guest Non-register State
2609 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2611 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2612 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
2618 static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
2619 struct vmcs12 *vmcs12)
2621 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2622 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2623 nested_check_vm_entry_controls(vcpu, vmcs12))
2624 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2626 if (nested_check_host_control_regs(vcpu, vmcs12))
2627 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
2629 if (nested_check_guest_non_reg_state(vmcs12))
2630 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2635 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2636 struct vmcs12 *vmcs12)
2640 struct vmcs12 *shadow;
2642 if (vmcs12->vmcs_link_pointer == -1ull)
2645 if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
2648 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
2649 if (is_error_page(page))
2653 shadow = kmap(page);
2654 if (shadow->hdr.revision_id != VMCS12_REVISION ||
2655 shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
2658 kvm_release_page_clean(page);
2662 static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
2663 struct vmcs12 *vmcs12,
2668 *exit_qual = ENTRY_FAIL_DEFAULT;
2670 if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
2671 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
2674 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
2675 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
2680 * If the load IA32_EFER VM-entry control is 1, the following checks
2681 * are performed on the field for the IA32_EFER MSR:
2682 * - Bits reserved in the IA32_EFER MSR must be 0.
2683 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2684 * the IA-32e mode guest VM-exit control. It must also be identical
2685 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
2688 if (to_vmx(vcpu)->nested.nested_run_pending &&
2689 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
2690 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
2691 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
2692 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
2693 ((vmcs12->guest_cr0 & X86_CR0_PG) &&
2694 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
2698 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
2699 (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
2700 (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
2706 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
2708 struct vcpu_vmx *vmx = to_vmx(vcpu);
2709 unsigned long cr3, cr4;
2712 if (!nested_early_check)
2715 if (vmx->msr_autoload.host.nr)
2716 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2717 if (vmx->msr_autoload.guest.nr)
2718 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2722 vmx_prepare_switch_to_guest(vcpu);
2725 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
2726 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
2727 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
2728 * there is no need to preserve other bits or save/restore the field.
2730 vmcs_writel(GUEST_RFLAGS, 0);
2732 cr3 = __get_current_cr3_fast();
2733 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
2734 vmcs_writel(HOST_CR3, cr3);
2735 vmx->loaded_vmcs->host_state.cr3 = cr3;
2738 cr4 = cr4_read_shadow();
2739 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
2740 vmcs_writel(HOST_CR4, cr4);
2741 vmx->loaded_vmcs->host_state.cr4 = cr4;
2745 "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
2746 "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2748 __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
2749 "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2751 "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
2753 /* Check if vmlaunch or vmresume is needed */
2754 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
2757 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
2758 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
2759 * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
2760 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
2762 "call vmx_vmenter\n\t"
2765 : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
2766 : [HOST_RSP]"r"((unsigned long)HOST_RSP),
2767 [loaded_vmcs]"r"(vmx->loaded_vmcs),
2768 [launched]"i"(offsetof(struct loaded_vmcs, launched)),
2769 [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
2770 [wordsize]"i"(sizeof(ulong))
2776 if (vmx->msr_autoload.host.nr)
2777 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2778 if (vmx->msr_autoload.guest.nr)
2779 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2782 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
2783 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
2788 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
2791 if (hw_breakpoint_active())
2792 set_debugreg(__this_cpu_read(cpu_dr7), 7);
2795 * A non-failing VMEntry means we somehow entered guest mode with
2796 * an illegal RIP, and that's just the tip of the iceberg. There
2797 * is no telling what memory has been modified or what state has
2798 * been exposed to unknown code. Hitting this all but guarantees
2799 * a (very critical) hardware issue.
2801 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
2802 VMX_EXIT_REASONS_FAILED_VMENTRY));
2807 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
2808 struct vmcs12 *vmcs12);
2810 static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
2812 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2813 struct vcpu_vmx *vmx = to_vmx(vcpu);
2817 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2819 * Translate L1 physical address to host physical
2820 * address for vmcs02. Keep the page pinned, so this
2821 * physical address remains valid. We keep a reference
2822 * to it so we can release it later.
2824 if (vmx->nested.apic_access_page) { /* shouldn't happen */
2825 kvm_release_page_dirty(vmx->nested.apic_access_page);
2826 vmx->nested.apic_access_page = NULL;
2828 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
2830 * If translation failed, no matter: This feature asks
2831 * to exit when accessing the given address, and if it
2832 * can never be accessed, this feature won't do
2835 if (!is_error_page(page)) {
2836 vmx->nested.apic_access_page = page;
2837 hpa = page_to_phys(vmx->nested.apic_access_page);
2838 vmcs_write64(APIC_ACCESS_ADDR, hpa);
2840 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
2841 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
2845 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
2846 if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
2847 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
2848 vmx->nested.virtual_apic_page = NULL;
2850 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
2853 * If translation failed, VM entry will fail because
2854 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
2855 * Failing the vm entry is _not_ what the processor
2856 * does but it's basically the only possibility we
2857 * have. We could still enter the guest if CR8 load
2858 * exits are enabled, CR8 store exits are enabled, and
2859 * virtualize APIC access is disabled; in this case
2860 * the processor would never use the TPR shadow and we
2861 * could simply clear the bit from the execution
2862 * control. But such a configuration is useless, so
2863 * let's keep the code simple.
2865 if (!is_error_page(page)) {
2866 vmx->nested.virtual_apic_page = page;
2867 hpa = page_to_phys(vmx->nested.virtual_apic_page);
2868 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
2872 if (nested_cpu_has_posted_intr(vmcs12)) {
2873 if (vmx->nested.pi_desc_page) { /* shouldn't happen */
2874 kunmap(vmx->nested.pi_desc_page);
2875 kvm_release_page_dirty(vmx->nested.pi_desc_page);
2876 vmx->nested.pi_desc_page = NULL;
2877 vmx->nested.pi_desc = NULL;
2878 vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
2880 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
2881 if (is_error_page(page))
2883 vmx->nested.pi_desc_page = page;
2884 vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
2885 vmx->nested.pi_desc =
2886 (struct pi_desc *)((void *)vmx->nested.pi_desc +
2887 (unsigned long)(vmcs12->posted_intr_desc_addr &
2889 vmcs_write64(POSTED_INTR_DESC_ADDR,
2890 page_to_phys(vmx->nested.pi_desc_page) +
2891 (unsigned long)(vmcs12->posted_intr_desc_addr &
2894 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
2895 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
2896 CPU_BASED_USE_MSR_BITMAPS);
2898 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
2899 CPU_BASED_USE_MSR_BITMAPS);
2903 * Intel's VMX Instruction Reference specifies a common set of prerequisites
2904 * for running VMX instructions (except VMXON, whose prerequisites are
2905 * slightly different). It also specifies what exception to inject otherwise.
2906 * Note that many of these exceptions have priority over VM exits, so they
2907 * don't have to be checked again here.
2909 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
2911 if (!to_vmx(vcpu)->nested.vmxon) {
2912 kvm_queue_exception(vcpu, UD_VECTOR);
2916 if (vmx_get_cpl(vcpu)) {
2917 kvm_inject_gp(vcpu, 0);
2924 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
2926 u8 rvi = vmx_get_rvi();
2927 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
2929 return ((rvi & 0xf0) > (vppr & 0xf0));
2932 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
2933 struct vmcs12 *vmcs12);
2936 * If from_vmentry is false, this is being called from state restore (either RSM
2937 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
2940 + * 0 - success, i.e. proceed with actual VMEnter
2941 + * 1 - consistency check VMExit
2942 + * -1 - consistency check VMFail
2944 int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
2946 struct vcpu_vmx *vmx = to_vmx(vcpu);
2947 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2948 bool evaluate_pending_interrupts;
2949 u32 exit_reason = EXIT_REASON_INVALID_STATE;
2952 evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
2953 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
2954 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
2955 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
2957 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
2958 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
2959 if (kvm_mpx_supported() &&
2960 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2961 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
2963 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
2965 prepare_vmcs02_early(vmx, vmcs12);
2968 nested_get_vmcs12_pages(vcpu);
2970 if (nested_vmx_check_vmentry_hw(vcpu)) {
2971 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
2975 if (nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
2976 goto vmentry_fail_vmexit;
2979 enter_guest_mode(vcpu);
2980 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
2981 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
2983 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
2984 goto vmentry_fail_vmexit_guest_mode;
2987 exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
2988 exit_qual = nested_vmx_load_msr(vcpu,
2989 vmcs12->vm_entry_msr_load_addr,
2990 vmcs12->vm_entry_msr_load_count);
2992 goto vmentry_fail_vmexit_guest_mode;
2995 * The MMU is not initialized to point at the right entities yet and
2996 * "get pages" would need to read data from the guest (i.e. we will
2997 * need to perform gpa to hpa translation). Request a call
2998 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
2999 * have already been set at vmentry time and should not be reset.
3001 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
3005 * If L1 had a pending IRQ/NMI until it executed
3006 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3007 * disallowed (e.g. interrupts disabled), L0 needs to
3008 * evaluate if this pending event should cause an exit from L2
3009 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3010 * intercept EXTERNAL_INTERRUPT).
3012 * Usually this would be handled by the processor noticing an
3013 * IRQ/NMI window request, or checking RVI during evaluation of
3014 * pending virtual interrupts. However, this setting was done
3015 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3016 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3018 if (unlikely(evaluate_pending_interrupts))
3019 kvm_make_request(KVM_REQ_EVENT, vcpu);
3022 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3023 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3024 * returned as far as L1 is concerned. It will only return (and set
3025 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3030 * A failed consistency check that leads to a VMExit during L1's
3031 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3032 * 26.7 "VM-entry failures during or after loading guest state".
3034 vmentry_fail_vmexit_guest_mode:
3035 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3036 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3037 leave_guest_mode(vcpu);
3039 vmentry_fail_vmexit:
3040 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3045 load_vmcs12_host_state(vcpu, vmcs12);
3046 vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
3047 vmcs12->exit_qualification = exit_qual;
3048 if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
3049 vmx->nested.need_vmcs12_sync = true;
3054 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3055 * for running an L2 nested guest.
3057 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3059 struct vmcs12 *vmcs12;
3060 struct vcpu_vmx *vmx = to_vmx(vcpu);
3061 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3064 if (!nested_vmx_check_permission(vcpu))
3067 if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true))
3070 if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
3071 return nested_vmx_failInvalid(vcpu);
3073 vmcs12 = get_vmcs12(vcpu);
3076 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3077 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3078 * rather than RFLAGS.ZF, and no error number is stored to the
3079 * VM-instruction error field.
3081 if (vmcs12->hdr.shadow_vmcs)
3082 return nested_vmx_failInvalid(vcpu);
3084 if (vmx->nested.hv_evmcs) {
3085 copy_enlightened_to_vmcs12(vmx);
3086 /* Enlightened VMCS doesn't have launch state */
3087 vmcs12->launch_state = !launch;
3088 } else if (enable_shadow_vmcs) {
3089 copy_shadow_to_vmcs12(vmx);
3093 * The nested entry process starts with enforcing various prerequisites
3094 * on vmcs12 as required by the Intel SDM, and act appropriately when
3095 * they fail: As the SDM explains, some conditions should cause the
3096 * instruction to fail, while others will cause the instruction to seem
3097 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3098 * To speed up the normal (success) code path, we should avoid checking
3099 * for misconfigurations which will anyway be caught by the processor
3100 * when using the merged vmcs02.
3102 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
3103 return nested_vmx_failValid(vcpu,
3104 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3106 if (vmcs12->launch_state == launch)
3107 return nested_vmx_failValid(vcpu,
3108 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3109 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3111 ret = nested_vmx_check_vmentry_prereqs(vcpu, vmcs12);
3113 return nested_vmx_failValid(vcpu, ret);
3116 * We're finally done with prerequisite checking, and can start with
3119 vmx->nested.nested_run_pending = 1;
3120 ret = nested_vmx_enter_non_root_mode(vcpu, true);
3121 vmx->nested.nested_run_pending = !ret;
3125 return nested_vmx_failValid(vcpu,
3126 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3128 /* Hide L1D cache contents from the nested guest. */
3129 vmx->vcpu.arch.l1tf_flush_l1d = true;
3132 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3133 * also be used as part of restoring nVMX state for
3134 * snapshot restore (migration).
3136 * In this flow, it is assumed that vmcs12 cache was
3137 * trasferred as part of captured nVMX state and should
3138 * therefore not be read from guest memory (which may not
3139 * exist on destination host yet).
3141 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3144 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3145 * awakened by event injection or by an NMI-window VM-exit or
3146 * by an interrupt-window VM-exit, halt the vcpu.
3148 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
3149 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3150 !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
3151 !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
3152 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3153 vmx->nested.nested_run_pending = 0;
3154 return kvm_vcpu_halt(vcpu);
3160 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3161 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
3162 * This function returns the new value we should put in vmcs12.guest_cr0.
3163 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3164 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3165 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3166 * didn't trap the bit, because if L1 did, so would L0).
3167 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3168 * been modified by L2, and L1 knows it. So just leave the old value of
3169 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3170 * isn't relevant, because if L0 traps this bit it can set it to anything.
3171 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3172 * changed these bits, and therefore they need to be updated, but L0
3173 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3174 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3176 static inline unsigned long
3177 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3180 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3181 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3182 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3183 vcpu->arch.cr0_guest_owned_bits));
3186 static inline unsigned long
3187 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3190 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3191 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3192 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3193 vcpu->arch.cr4_guest_owned_bits));
3196 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3197 struct vmcs12 *vmcs12)
3202 if (vcpu->arch.exception.injected) {
3203 nr = vcpu->arch.exception.nr;
3204 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3206 if (kvm_exception_is_soft(nr)) {
3207 vmcs12->vm_exit_instruction_len =
3208 vcpu->arch.event_exit_inst_len;
3209 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3211 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3213 if (vcpu->arch.exception.has_error_code) {
3214 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3215 vmcs12->idt_vectoring_error_code =
3216 vcpu->arch.exception.error_code;
3219 vmcs12->idt_vectoring_info_field = idt_vectoring;
3220 } else if (vcpu->arch.nmi_injected) {
3221 vmcs12->idt_vectoring_info_field =
3222 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3223 } else if (vcpu->arch.interrupt.injected) {
3224 nr = vcpu->arch.interrupt.nr;
3225 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3227 if (vcpu->arch.interrupt.soft) {
3228 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3229 vmcs12->vm_entry_instruction_len =
3230 vcpu->arch.event_exit_inst_len;
3232 idt_vectoring |= INTR_TYPE_EXT_INTR;
3234 vmcs12->idt_vectoring_info_field = idt_vectoring;
3239 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3241 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3245 * Don't need to mark the APIC access page dirty; it is never
3246 * written to by the CPU during APIC virtualization.
3249 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3250 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3251 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3254 if (nested_cpu_has_posted_intr(vmcs12)) {
3255 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3256 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3260 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3262 struct vcpu_vmx *vmx = to_vmx(vcpu);
3267 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
3270 vmx->nested.pi_pending = false;
3271 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3274 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3275 if (max_irr != 256) {
3276 vapic_page = kmap(vmx->nested.virtual_apic_page);
3277 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3278 vapic_page, &max_irr);
3279 kunmap(vmx->nested.virtual_apic_page);
3281 status = vmcs_read16(GUEST_INTR_STATUS);
3282 if ((u8)max_irr > ((u8)status & 0xff)) {
3284 status |= (u8)max_irr;
3285 vmcs_write16(GUEST_INTR_STATUS, status);
3289 nested_mark_vmcs12_pages_dirty(vcpu);
3292 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3293 unsigned long exit_qual)
3295 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3296 unsigned int nr = vcpu->arch.exception.nr;
3297 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3299 if (vcpu->arch.exception.has_error_code) {
3300 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3301 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3304 if (kvm_exception_is_soft(nr))
3305 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3307 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3309 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3310 vmx_get_nmi_mask(vcpu))
3311 intr_info |= INTR_INFO_UNBLOCK_NMI;
3313 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3316 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
3318 struct vcpu_vmx *vmx = to_vmx(vcpu);
3319 unsigned long exit_qual;
3320 bool block_nested_events =
3321 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3323 if (vcpu->arch.exception.pending &&
3324 nested_vmx_check_exception(vcpu, &exit_qual)) {
3325 if (block_nested_events)
3327 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3331 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3332 vmx->nested.preemption_timer_expired) {
3333 if (block_nested_events)
3335 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3339 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
3340 if (block_nested_events)
3342 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3343 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3344 INTR_INFO_VALID_MASK, 0);
3346 * The NMI-triggered VM exit counts as injection:
3347 * clear this one and block further NMIs.
3349 vcpu->arch.nmi_pending = 0;
3350 vmx_set_nmi_mask(vcpu, true);
3354 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
3355 nested_exit_on_intr(vcpu)) {
3356 if (block_nested_events)
3358 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3362 vmx_complete_nested_posted_interrupt(vcpu);
3366 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3369 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3372 if (ktime_to_ns(remaining) <= 0)
3375 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3376 do_div(value, 1000000);
3377 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3381 * Update the guest state fields of vmcs12 to reflect changes that
3382 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
3383 * VM-entry controls is also updated, since this is really a guest
3386 static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3388 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
3389 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
3391 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3392 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
3393 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
3395 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
3396 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
3397 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
3398 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
3399 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
3400 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
3401 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
3402 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
3403 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
3404 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
3405 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
3406 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
3407 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
3408 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
3409 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
3410 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
3411 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
3412 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
3413 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
3414 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
3415 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
3416 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
3417 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
3418 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
3419 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
3420 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
3421 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
3422 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
3423 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
3424 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
3425 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
3426 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
3427 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
3428 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
3429 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
3430 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
3432 vmcs12->guest_interruptibility_info =
3433 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3434 vmcs12->guest_pending_dbg_exceptions =
3435 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
3436 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
3437 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
3439 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
3441 if (nested_cpu_has_preemption_timer(vmcs12) &&
3442 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
3443 vmcs12->vmx_preemption_timer_value =
3444 vmx_get_preemption_timer_value(vcpu);
3447 * In some cases (usually, nested EPT), L2 is allowed to change its
3448 * own CR3 without exiting. If it has changed it, we must keep it.
3449 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
3450 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
3452 * Additionally, restore L2's PDPTR to vmcs12.
3455 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
3456 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
3457 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
3458 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
3459 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
3462 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
3464 if (nested_cpu_has_vid(vmcs12))
3465 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
3467 vmcs12->vm_entry_controls =
3468 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
3469 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
3471 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
3472 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
3473 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3476 /* TODO: These cannot have changed unless we have MSR bitmaps and
3477 * the relevant bit asks not to trap the change */
3478 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
3479 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
3480 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
3481 vmcs12->guest_ia32_efer = vcpu->arch.efer;
3482 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
3483 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
3484 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
3485 if (kvm_mpx_supported())
3486 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3490 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
3491 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
3492 * and this function updates it to reflect the changes to the guest state while
3493 * L2 was running (and perhaps made some exits which were handled directly by L0
3494 * without going back to L1), and to reflect the exit reason.
3495 * Note that we do not have to copy here all VMCS fields, just those that
3496 * could have changed by the L2 guest or the exit - i.e., the guest-state and
3497 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
3498 * which already writes to vmcs12 directly.
3500 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
3501 u32 exit_reason, u32 exit_intr_info,
3502 unsigned long exit_qualification)
3504 /* update guest state fields: */
3505 sync_vmcs12(vcpu, vmcs12);
3507 /* update exit information fields: */
3509 vmcs12->vm_exit_reason = exit_reason;
3510 vmcs12->exit_qualification = exit_qualification;
3511 vmcs12->vm_exit_intr_info = exit_intr_info;
3513 vmcs12->idt_vectoring_info_field = 0;
3514 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3515 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
3517 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
3518 vmcs12->launch_state = 1;
3520 /* vm_entry_intr_info_field is cleared on exit. Emulate this
3521 * instead of reading the real value. */
3522 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
3525 * Transfer the event that L0 or L1 may wanted to inject into
3526 * L2 to IDT_VECTORING_INFO_FIELD.
3528 vmcs12_save_pending_event(vcpu, vmcs12);
3531 * According to spec, there's no need to store the guest's
3532 * MSRs if the exit is due to a VM-entry failure that occurs
3533 * during or after loading the guest state. Since this exit
3534 * does not fall in that category, we need to save the MSRs.
3536 if (nested_vmx_store_msr(vcpu,
3537 vmcs12->vm_exit_msr_store_addr,
3538 vmcs12->vm_exit_msr_store_count))
3539 nested_vmx_abort(vcpu,
3540 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
3544 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
3545 * preserved above and would only end up incorrectly in L1.
3547 vcpu->arch.nmi_injected = false;
3548 kvm_clear_exception_queue(vcpu);
3549 kvm_clear_interrupt_queue(vcpu);
3553 * A part of what we need to when the nested L2 guest exits and we want to
3554 * run its L1 parent, is to reset L1's guest state to the host state specified
3556 * This function is to be called not only on normal nested exit, but also on
3557 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
3558 * Failures During or After Loading Guest State").
3559 * This function should be called when the active VMCS is L1's (vmcs01).
3561 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3562 struct vmcs12 *vmcs12)
3564 struct kvm_segment seg;
3565 u32 entry_failure_code;
3567 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
3568 vcpu->arch.efer = vmcs12->host_ia32_efer;
3569 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3570 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
3572 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
3573 vmx_set_efer(vcpu, vcpu->arch.efer);
3575 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
3576 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
3577 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
3578 vmx_set_interrupt_shadow(vcpu, 0);
3581 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
3582 * actually changed, because vmx_set_cr0 refers to efer set above.
3584 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
3585 * (KVM doesn't change it);
3587 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3588 vmx_set_cr0(vcpu, vmcs12->host_cr0);
3590 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
3591 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3592 vmx_set_cr4(vcpu, vmcs12->host_cr4);
3594 nested_ept_uninit_mmu_context(vcpu);
3597 * Only PDPTE load can fail as the value of cr3 was checked on entry and
3598 * couldn't have changed.
3600 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
3601 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
3604 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3607 * If vmcs01 doesn't use VPID, CPU flushes TLB on every
3608 * VMEntry/VMExit. Thus, no need to flush TLB.
3610 * If vmcs12 doesn't use VPID, L1 expects TLB to be
3611 * flushed on every VMEntry/VMExit.
3613 * Otherwise, we can preserve TLB entries as long as we are
3614 * able to tag L1 TLB entries differently than L2 TLB entries.
3616 * If vmcs12 uses EPT, we need to execute this flush on EPTP01
3617 * and therefore we request the TLB flush to happen only after VMCS EPTP
3618 * has been set by KVM_REQ_LOAD_CR3.
3621 (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
3622 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3625 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
3626 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
3627 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
3628 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
3629 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
3630 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
3631 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
3633 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
3634 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
3635 vmcs_write64(GUEST_BNDCFGS, 0);
3637 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
3638 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
3639 vcpu->arch.pat = vmcs12->host_ia32_pat;
3641 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
3642 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
3643 vmcs12->host_ia32_perf_global_ctrl);
3645 /* Set L1 segment info according to Intel SDM
3646 27.5.2 Loading Host Segment and Descriptor-Table Registers */
3647 seg = (struct kvm_segment) {
3649 .limit = 0xFFFFFFFF,
3650 .selector = vmcs12->host_cs_selector,
3656 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3660 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
3661 seg = (struct kvm_segment) {
3663 .limit = 0xFFFFFFFF,
3670 seg.selector = vmcs12->host_ds_selector;
3671 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
3672 seg.selector = vmcs12->host_es_selector;
3673 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
3674 seg.selector = vmcs12->host_ss_selector;
3675 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
3676 seg.selector = vmcs12->host_fs_selector;
3677 seg.base = vmcs12->host_fs_base;
3678 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
3679 seg.selector = vmcs12->host_gs_selector;
3680 seg.base = vmcs12->host_gs_base;
3681 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
3682 seg = (struct kvm_segment) {
3683 .base = vmcs12->host_tr_base,
3685 .selector = vmcs12->host_tr_selector,
3689 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
3691 kvm_set_dr(vcpu, 7, 0x400);
3692 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
3694 if (cpu_has_vmx_msr_bitmap())
3695 vmx_update_msr_bitmap(vcpu);
3697 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
3698 vmcs12->vm_exit_msr_load_count))
3699 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3702 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
3704 struct shared_msr_entry *efer_msr;
3707 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
3708 return vmcs_read64(GUEST_IA32_EFER);
3710 if (cpu_has_load_ia32_efer())
3713 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
3714 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
3715 return vmx->msr_autoload.guest.val[i].value;
3718 efer_msr = find_msr_entry(vmx, MSR_EFER);
3720 return efer_msr->data;
3725 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
3727 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3728 struct vcpu_vmx *vmx = to_vmx(vcpu);
3729 struct vmx_msr_entry g, h;
3730 struct msr_data msr;
3734 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
3736 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
3738 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
3739 * as vmcs01.GUEST_DR7 contains a userspace defined value
3740 * and vcpu->arch.dr7 is not squirreled away before the
3741 * nested VMENTER (not worth adding a variable in nested_vmx).
3743 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
3744 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
3746 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
3750 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
3751 * handle a variety of side effects to KVM's software model.
3753 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
3755 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3756 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
3758 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3759 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
3761 nested_ept_uninit_mmu_context(vcpu);
3762 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3763 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3766 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
3767 * from vmcs01 (if necessary). The PDPTRs are not loaded on
3768 * VMFail, like everything else we just need to ensure our
3769 * software model is up-to-date.
3771 ept_save_pdptrs(vcpu);
3773 kvm_mmu_reset_context(vcpu);
3775 if (cpu_has_vmx_msr_bitmap())
3776 vmx_update_msr_bitmap(vcpu);
3779 * This nasty bit of open coding is a compromise between blindly
3780 * loading L1's MSRs using the exit load lists (incorrect emulation
3781 * of VMFail), leaving the nested VM's MSRs in the software model
3782 * (incorrect behavior) and snapshotting the modified MSRs (too
3783 * expensive since the lists are unbound by hardware). For each
3784 * MSR that was (prematurely) loaded from the nested VMEntry load
3785 * list, reload it from the exit load list if it exists and differs
3786 * from the guest value. The intent is to stuff host state as
3787 * silently as possible, not to fully process the exit load list.
3789 msr.host_initiated = false;
3790 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
3791 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
3792 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
3793 pr_debug_ratelimited(
3794 "%s read MSR index failed (%u, 0x%08llx)\n",
3799 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
3800 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
3801 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
3802 pr_debug_ratelimited(
3803 "%s read MSR failed (%u, 0x%08llx)\n",
3807 if (h.index != g.index)
3809 if (h.value == g.value)
3812 if (nested_vmx_load_msr_check(vcpu, &h)) {
3813 pr_debug_ratelimited(
3814 "%s check failed (%u, 0x%x, 0x%x)\n",
3815 __func__, j, h.index, h.reserved);
3819 msr.index = h.index;
3821 if (kvm_set_msr(vcpu, &msr)) {
3822 pr_debug_ratelimited(
3823 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
3824 __func__, j, h.index, h.value);
3833 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3837 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
3838 * and modify vmcs12 to make it see what it would expect to see there if
3839 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
3841 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
3842 u32 exit_intr_info, unsigned long exit_qualification)
3844 struct vcpu_vmx *vmx = to_vmx(vcpu);
3845 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3847 /* trying to cancel vmlaunch/vmresume is a bug */
3848 WARN_ON_ONCE(vmx->nested.nested_run_pending);
3850 leave_guest_mode(vcpu);
3852 if (nested_cpu_has_preemption_timer(vmcs12))
3853 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
3855 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3856 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3858 if (likely(!vmx->fail)) {
3859 if (exit_reason == -1)
3860 sync_vmcs12(vcpu, vmcs12);
3862 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
3863 exit_qualification);
3866 * Must happen outside of sync_vmcs12() as it will
3867 * also be used to capture vmcs12 cache as part of
3868 * capturing nVMX state for snapshot (migration).
3870 * Otherwise, this flush will dirty guest memory at a
3871 * point it is already assumed by user-space to be
3874 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
3877 * The only expected VM-instruction error is "VM entry with
3878 * invalid control field(s)." Anything else indicates a
3879 * problem with L0. And we should never get here with a
3880 * VMFail of any type if early consistency checks are enabled.
3882 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
3883 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3884 WARN_ON_ONCE(nested_early_check);
3887 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3889 /* Update any VMCS fields that might have changed while L2 ran */
3890 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3891 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3892 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
3894 if (kvm_has_tsc_control)
3895 decache_tsc_multiplier(vmx);
3897 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
3898 vmx->nested.change_vmcs01_virtual_apic_mode = false;
3899 vmx_set_virtual_apic_mode(vcpu);
3900 } else if (!nested_cpu_has_ept(vmcs12) &&
3901 nested_cpu_has2(vmcs12,
3902 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3903 vmx_flush_tlb(vcpu, true);
3906 /* Unpin physical memory we referred to in vmcs02 */
3907 if (vmx->nested.apic_access_page) {
3908 kvm_release_page_dirty(vmx->nested.apic_access_page);
3909 vmx->nested.apic_access_page = NULL;
3911 if (vmx->nested.virtual_apic_page) {
3912 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
3913 vmx->nested.virtual_apic_page = NULL;
3915 if (vmx->nested.pi_desc_page) {
3916 kunmap(vmx->nested.pi_desc_page);
3917 kvm_release_page_dirty(vmx->nested.pi_desc_page);
3918 vmx->nested.pi_desc_page = NULL;
3919 vmx->nested.pi_desc = NULL;
3923 * We are now running in L2, mmu_notifier will force to reload the
3924 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
3926 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
3928 if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
3929 vmx->nested.need_vmcs12_sync = true;
3931 /* in case we halted in L2 */
3932 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3934 if (likely(!vmx->fail)) {
3936 * TODO: SDM says that with acknowledge interrupt on
3937 * exit, bit 31 of the VM-exit interrupt information
3938 * (valid interrupt) is always set to 1 on
3939 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
3940 * need kvm_cpu_has_interrupt(). See the commit
3941 * message for details.
3943 if (nested_exit_intr_ack_set(vcpu) &&
3944 exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
3945 kvm_cpu_has_interrupt(vcpu)) {
3946 int irq = kvm_cpu_get_interrupt(vcpu);
3948 vmcs12->vm_exit_intr_info = irq |
3949 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
3952 if (exit_reason != -1)
3953 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
3954 vmcs12->exit_qualification,
3955 vmcs12->idt_vectoring_info_field,
3956 vmcs12->vm_exit_intr_info,
3957 vmcs12->vm_exit_intr_error_code,
3960 load_vmcs12_host_state(vcpu, vmcs12);
3966 * After an early L2 VM-entry failure, we're now back
3967 * in L1 which thinks it just finished a VMLAUNCH or
3968 * VMRESUME instruction, so we need to set the failure
3969 * flag and the VM-instruction error field of the VMCS
3970 * accordingly, and skip the emulated instruction.
3972 (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3975 * Restore L1's host state to KVM's software model. We're here
3976 * because a consistency check was caught by hardware, which
3977 * means some amount of guest state has been propagated to KVM's
3978 * model and needs to be unwound to the host's state.
3980 nested_vmx_restore_host_state(vcpu);
3986 * Decode the memory-address operand of a vmx instruction, as recorded on an
3987 * exit caused by such an instruction (run by a guest hypervisor).
3988 * On success, returns 0. When the operand is invalid, returns 1 and throws
3991 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
3992 u32 vmx_instruction_info, bool wr, gva_t *ret)
3996 struct kvm_segment s;
3999 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4000 * Execution", on an exit, vmx_instruction_info holds most of the
4001 * addressing components of the operand. Only the displacement part
4002 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4003 * For how an actual address is calculated from all these components,
4004 * refer to Vol. 1, "Operand Addressing".
4006 int scaling = vmx_instruction_info & 3;
4007 int addr_size = (vmx_instruction_info >> 7) & 7;
4008 bool is_reg = vmx_instruction_info & (1u << 10);
4009 int seg_reg = (vmx_instruction_info >> 15) & 7;
4010 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4011 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4012 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4013 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4016 kvm_queue_exception(vcpu, UD_VECTOR);
4020 /* Addr = segment_base + offset */
4021 /* offset = base + [index * scale] + displacement */
4022 off = exit_qualification; /* holds the displacement */
4024 off = (gva_t)sign_extend64(off, 31);
4025 else if (addr_size == 0)
4026 off = (gva_t)sign_extend64(off, 15);
4028 off += kvm_register_read(vcpu, base_reg);
4030 off += kvm_register_read(vcpu, index_reg)<<scaling;
4031 vmx_get_segment(vcpu, &s, seg_reg);
4034 * The effective address, i.e. @off, of a memory operand is truncated
4035 * based on the address size of the instruction. Note that this is
4036 * the *effective address*, i.e. the address prior to accounting for
4037 * the segment's base.
4039 if (addr_size == 1) /* 32 bit */
4041 else if (addr_size == 0) /* 16 bit */
4044 /* Checks for #GP/#SS exceptions. */
4046 if (is_long_mode(vcpu)) {
4048 * The virtual/linear address is never truncated in 64-bit
4049 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4050 * address when using FS/GS with a non-zero base.
4052 *ret = s.base + off;
4054 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4055 * non-canonical form. This is the only check on the memory
4056 * destination for long mode!
4058 exn = is_noncanonical_address(*ret, vcpu);
4061 * When not in long mode, the virtual/linear address is
4062 * unconditionally truncated to 32 bits regardless of the
4065 *ret = (s.base + off) & 0xffffffff;
4067 /* Protected mode: apply checks for segment validity in the
4069 * - segment type check (#GP(0) may be thrown)
4070 * - usability check (#GP(0)/#SS(0))
4071 * - limit check (#GP(0)/#SS(0))
4074 /* #GP(0) if the destination operand is located in a
4075 * read-only data segment or any code segment.
4077 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4079 /* #GP(0) if the source operand is located in an
4080 * execute-only code segment
4082 exn = ((s.type & 0xa) == 8);
4084 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4087 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4089 exn = (s.unusable != 0);
4092 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4093 * outside the segment limit. All CPUs that support VMX ignore
4094 * limit checks for flat segments, i.e. segments with base==0,
4095 * limit==0xffffffff and of type expand-up data or code.
4097 if (!(s.base == 0 && s.limit == 0xffffffff &&
4098 ((s.type & 8) || !(s.type & 4))))
4099 exn = exn || (off + sizeof(u64) > s.limit);
4102 kvm_queue_exception_e(vcpu,
4103 seg_reg == VCPU_SREG_SS ?
4104 SS_VECTOR : GP_VECTOR,
4112 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
4115 struct x86_exception e;
4117 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4118 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
4121 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
4122 kvm_inject_page_fault(vcpu, &e);
4130 * Allocate a shadow VMCS and associate it with the currently loaded
4131 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4132 * VMCS is also VMCLEARed, so that it is ready for use.
4134 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4136 struct vcpu_vmx *vmx = to_vmx(vcpu);
4137 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4140 * We should allocate a shadow vmcs for vmcs01 only when L1
4141 * executes VMXON and free it when L1 executes VMXOFF.
4142 * As it is invalid to execute VMXON twice, we shouldn't reach
4143 * here when vmcs01 already have an allocated shadow vmcs.
4145 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4147 if (!loaded_vmcs->shadow_vmcs) {
4148 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4149 if (loaded_vmcs->shadow_vmcs)
4150 vmcs_clear(loaded_vmcs->shadow_vmcs);
4152 return loaded_vmcs->shadow_vmcs;
4155 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4157 struct vcpu_vmx *vmx = to_vmx(vcpu);
4160 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4164 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
4165 if (!vmx->nested.cached_vmcs12)
4166 goto out_cached_vmcs12;
4168 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
4169 if (!vmx->nested.cached_shadow_vmcs12)
4170 goto out_cached_shadow_vmcs12;
4172 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4173 goto out_shadow_vmcs;
4175 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4176 HRTIMER_MODE_REL_PINNED);
4177 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4179 vmx->nested.vpid02 = allocate_vpid();
4181 vmx->nested.vmcs02_initialized = false;
4182 vmx->nested.vmxon = true;
4184 if (pt_mode == PT_MODE_HOST_GUEST) {
4185 vmx->pt_desc.guest.ctl = 0;
4186 pt_update_intercept_for_msr(vmx);
4192 kfree(vmx->nested.cached_shadow_vmcs12);
4194 out_cached_shadow_vmcs12:
4195 kfree(vmx->nested.cached_vmcs12);
4198 free_loaded_vmcs(&vmx->nested.vmcs02);
4205 * Emulate the VMXON instruction.
4206 * Currently, we just remember that VMX is active, and do not save or even
4207 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4208 * do not currently need to store anything in that guest-allocated memory
4209 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4210 * argument is different from the VMXON pointer (which the spec says they do).
4212 static int handle_vmon(struct kvm_vcpu *vcpu)
4217 struct vcpu_vmx *vmx = to_vmx(vcpu);
4218 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
4219 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
4222 * The Intel VMX Instruction Reference lists a bunch of bits that are
4223 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4224 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
4225 * Otherwise, we should fail with #UD. But most faulting conditions
4226 * have already been checked by hardware, prior to the VM-exit for
4227 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4228 * that bit set to 1 in non-root mode.
4230 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4231 kvm_queue_exception(vcpu, UD_VECTOR);
4235 /* CPL=0 must be checked manually. */
4236 if (vmx_get_cpl(vcpu)) {
4237 kvm_inject_gp(vcpu, 0);
4241 if (vmx->nested.vmxon)
4242 return nested_vmx_failValid(vcpu,
4243 VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
4245 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4246 != VMXON_NEEDED_FEATURES) {
4247 kvm_inject_gp(vcpu, 0);
4251 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4256 * The first 4 bytes of VMXON region contain the supported
4257 * VMCS revision identifier
4259 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4260 * which replaces physical address width with 32
4262 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
4263 return nested_vmx_failInvalid(vcpu);
4265 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
4266 if (is_error_page(page))
4267 return nested_vmx_failInvalid(vcpu);
4269 if (*(u32 *)kmap(page) != VMCS12_REVISION) {
4271 kvm_release_page_clean(page);
4272 return nested_vmx_failInvalid(vcpu);
4275 kvm_release_page_clean(page);
4277 vmx->nested.vmxon_ptr = vmptr;
4278 ret = enter_vmx_operation(vcpu);
4282 return nested_vmx_succeed(vcpu);
4285 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
4287 struct vcpu_vmx *vmx = to_vmx(vcpu);
4289 if (vmx->nested.current_vmptr == -1ull)
4292 if (enable_shadow_vmcs) {
4293 /* copy to memory all shadowed fields in case
4294 they were modified */
4295 copy_shadow_to_vmcs12(vmx);
4296 vmx->nested.need_vmcs12_sync = false;
4297 vmx_disable_shadow_vmcs(vmx);
4299 vmx->nested.posted_intr_nv = -1;
4301 /* Flush VMCS12 to guest memory */
4302 kvm_vcpu_write_guest_page(vcpu,
4303 vmx->nested.current_vmptr >> PAGE_SHIFT,
4304 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
4306 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4308 vmx->nested.current_vmptr = -1ull;
4311 /* Emulate the VMXOFF instruction */
4312 static int handle_vmoff(struct kvm_vcpu *vcpu)
4314 if (!nested_vmx_check_permission(vcpu))
4317 return nested_vmx_succeed(vcpu);
4320 /* Emulate the VMCLEAR instruction */
4321 static int handle_vmclear(struct kvm_vcpu *vcpu)
4323 struct vcpu_vmx *vmx = to_vmx(vcpu);
4327 if (!nested_vmx_check_permission(vcpu))
4330 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4333 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
4334 return nested_vmx_failValid(vcpu,
4335 VMXERR_VMCLEAR_INVALID_ADDRESS);
4337 if (vmptr == vmx->nested.vmxon_ptr)
4338 return nested_vmx_failValid(vcpu,
4339 VMXERR_VMCLEAR_VMXON_POINTER);
4341 if (vmx->nested.hv_evmcs_page) {
4342 if (vmptr == vmx->nested.hv_evmcs_vmptr)
4343 nested_release_evmcs(vcpu);
4345 if (vmptr == vmx->nested.current_vmptr)
4346 nested_release_vmcs12(vcpu);
4348 kvm_vcpu_write_guest(vcpu,
4349 vmptr + offsetof(struct vmcs12,
4351 &zero, sizeof(zero));
4354 return nested_vmx_succeed(vcpu);
4357 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
4359 /* Emulate the VMLAUNCH instruction */
4360 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
4362 return nested_vmx_run(vcpu, true);
4365 /* Emulate the VMRESUME instruction */
4366 static int handle_vmresume(struct kvm_vcpu *vcpu)
4369 return nested_vmx_run(vcpu, false);
4372 static int handle_vmread(struct kvm_vcpu *vcpu)
4374 unsigned long field;
4376 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4377 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4379 struct vmcs12 *vmcs12;
4381 if (!nested_vmx_check_permission(vcpu))
4384 if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
4385 return nested_vmx_failInvalid(vcpu);
4387 if (!is_guest_mode(vcpu))
4388 vmcs12 = get_vmcs12(vcpu);
4391 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
4392 * to shadowed-field sets the ALU flags for VMfailInvalid.
4394 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4395 return nested_vmx_failInvalid(vcpu);
4396 vmcs12 = get_shadow_vmcs12(vcpu);
4399 /* Decode instruction info and find the field to read */
4400 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4401 /* Read the field, zero-extended to a u64 field_value */
4402 if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
4403 return nested_vmx_failValid(vcpu,
4404 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4407 * Now copy part of this value to register or memory, as requested.
4408 * Note that the number of bits actually copied is 32 or 64 depending
4409 * on the guest's mode (32 or 64 bit), not on the given field's length.
4411 if (vmx_instruction_info & (1u << 10)) {
4412 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
4415 if (get_vmx_mem_address(vcpu, exit_qualification,
4416 vmx_instruction_info, true, &gva))
4418 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
4419 kvm_write_guest_virt_system(vcpu, gva, &field_value,
4420 (is_long_mode(vcpu) ? 8 : 4), NULL);
4423 return nested_vmx_succeed(vcpu);
4427 static int handle_vmwrite(struct kvm_vcpu *vcpu)
4429 unsigned long field;
4431 struct vcpu_vmx *vmx = to_vmx(vcpu);
4432 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4433 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4435 /* The value to write might be 32 or 64 bits, depending on L1's long
4436 * mode, and eventually we need to write that into a field of several
4437 * possible lengths. The code below first zero-extends the value to 64
4438 * bit (field_value), and then copies only the appropriate number of
4439 * bits into the vmcs12 field.
4441 u64 field_value = 0;
4442 struct x86_exception e;
4443 struct vmcs12 *vmcs12;
4445 if (!nested_vmx_check_permission(vcpu))
4448 if (vmx->nested.current_vmptr == -1ull)
4449 return nested_vmx_failInvalid(vcpu);
4451 if (vmx_instruction_info & (1u << 10))
4452 field_value = kvm_register_readl(vcpu,
4453 (((vmx_instruction_info) >> 3) & 0xf));
4455 if (get_vmx_mem_address(vcpu, exit_qualification,
4456 vmx_instruction_info, false, &gva))
4458 if (kvm_read_guest_virt(vcpu, gva, &field_value,
4459 (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
4460 kvm_inject_page_fault(vcpu, &e);
4466 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4468 * If the vCPU supports "VMWRITE to any supported field in the
4469 * VMCS," then the "read-only" fields are actually read/write.
4471 if (vmcs_field_readonly(field) &&
4472 !nested_cpu_has_vmwrite_any_field(vcpu))
4473 return nested_vmx_failValid(vcpu,
4474 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
4476 if (!is_guest_mode(vcpu))
4477 vmcs12 = get_vmcs12(vcpu);
4480 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
4481 * to shadowed-field sets the ALU flags for VMfailInvalid.
4483 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4484 return nested_vmx_failInvalid(vcpu);
4485 vmcs12 = get_shadow_vmcs12(vcpu);
4488 if (vmcs12_write_any(vmcs12, field, field_value) < 0)
4489 return nested_vmx_failValid(vcpu,
4490 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4493 * Do not track vmcs12 dirty-state if in guest-mode
4494 * as we actually dirty shadow vmcs12 instead of vmcs12.
4496 if (!is_guest_mode(vcpu)) {
4498 #define SHADOW_FIELD_RW(x) case x:
4499 #include "vmcs_shadow_fields.h"
4501 * The fields that can be updated by L1 without a vmexit are
4502 * always updated in the vmcs02, the others go down the slow
4503 * path of prepare_vmcs02.
4507 vmx->nested.dirty_vmcs12 = true;
4512 return nested_vmx_succeed(vcpu);
4515 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
4517 vmx->nested.current_vmptr = vmptr;
4518 if (enable_shadow_vmcs) {
4519 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
4520 SECONDARY_EXEC_SHADOW_VMCS);
4521 vmcs_write64(VMCS_LINK_POINTER,
4522 __pa(vmx->vmcs01.shadow_vmcs));
4523 vmx->nested.need_vmcs12_sync = true;
4525 vmx->nested.dirty_vmcs12 = true;
4528 /* Emulate the VMPTRLD instruction */
4529 static int handle_vmptrld(struct kvm_vcpu *vcpu)
4531 struct vcpu_vmx *vmx = to_vmx(vcpu);
4534 if (!nested_vmx_check_permission(vcpu))
4537 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4540 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
4541 return nested_vmx_failValid(vcpu,
4542 VMXERR_VMPTRLD_INVALID_ADDRESS);
4544 if (vmptr == vmx->nested.vmxon_ptr)
4545 return nested_vmx_failValid(vcpu,
4546 VMXERR_VMPTRLD_VMXON_POINTER);
4548 /* Forbid normal VMPTRLD if Enlightened version was used */
4549 if (vmx->nested.hv_evmcs)
4552 if (vmx->nested.current_vmptr != vmptr) {
4553 struct vmcs12 *new_vmcs12;
4556 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
4557 if (is_error_page(page)) {
4559 * Reads from an unbacked page return all 1s,
4560 * which means that the 32 bits located at the
4561 * given physical address won't match the required
4562 * VMCS12_REVISION identifier.
4564 return nested_vmx_failValid(vcpu,
4565 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4567 new_vmcs12 = kmap(page);
4568 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
4569 (new_vmcs12->hdr.shadow_vmcs &&
4570 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
4572 kvm_release_page_clean(page);
4573 return nested_vmx_failValid(vcpu,
4574 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4577 nested_release_vmcs12(vcpu);
4580 * Load VMCS12 from guest memory since it is not already
4583 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
4585 kvm_release_page_clean(page);
4587 set_current_vmptr(vmx, vmptr);
4590 return nested_vmx_succeed(vcpu);
4593 /* Emulate the VMPTRST instruction */
4594 static int handle_vmptrst(struct kvm_vcpu *vcpu)
4596 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
4597 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4598 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
4599 struct x86_exception e;
4602 if (!nested_vmx_check_permission(vcpu))
4605 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
4608 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
4610 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
4611 if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
4612 sizeof(gpa_t), &e)) {
4613 kvm_inject_page_fault(vcpu, &e);
4616 return nested_vmx_succeed(vcpu);
4619 /* Emulate the INVEPT instruction */
4620 static int handle_invept(struct kvm_vcpu *vcpu)
4622 struct vcpu_vmx *vmx = to_vmx(vcpu);
4623 u32 vmx_instruction_info, types;
4626 struct x86_exception e;
4631 if (!(vmx->nested.msrs.secondary_ctls_high &
4632 SECONDARY_EXEC_ENABLE_EPT) ||
4633 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
4634 kvm_queue_exception(vcpu, UD_VECTOR);
4638 if (!nested_vmx_check_permission(vcpu))
4641 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4642 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4644 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
4646 if (type >= 32 || !(types & (1 << type)))
4647 return nested_vmx_failValid(vcpu,
4648 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4650 /* According to the Intel VMX instruction reference, the memory
4651 * operand is read even if it isn't needed (e.g., for type==global)
4653 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4654 vmx_instruction_info, false, &gva))
4656 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4657 kvm_inject_page_fault(vcpu, &e);
4662 case VMX_EPT_EXTENT_GLOBAL:
4664 * TODO: track mappings and invalidate
4665 * single context requests appropriately
4667 case VMX_EPT_EXTENT_CONTEXT:
4668 kvm_mmu_sync_roots(vcpu);
4669 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4676 return nested_vmx_succeed(vcpu);
4679 static int handle_invvpid(struct kvm_vcpu *vcpu)
4681 struct vcpu_vmx *vmx = to_vmx(vcpu);
4682 u32 vmx_instruction_info;
4683 unsigned long type, types;
4685 struct x86_exception e;
4692 if (!(vmx->nested.msrs.secondary_ctls_high &
4693 SECONDARY_EXEC_ENABLE_VPID) ||
4694 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
4695 kvm_queue_exception(vcpu, UD_VECTOR);
4699 if (!nested_vmx_check_permission(vcpu))
4702 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4703 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4705 types = (vmx->nested.msrs.vpid_caps &
4706 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
4708 if (type >= 32 || !(types & (1 << type)))
4709 return nested_vmx_failValid(vcpu,
4710 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4712 /* according to the intel vmx instruction reference, the memory
4713 * operand is read even if it isn't needed (e.g., for type==global)
4715 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4716 vmx_instruction_info, false, &gva))
4718 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4719 kvm_inject_page_fault(vcpu, &e);
4722 if (operand.vpid >> 16)
4723 return nested_vmx_failValid(vcpu,
4724 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4726 vpid02 = nested_get_vpid02(vcpu);
4728 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
4729 if (!operand.vpid ||
4730 is_noncanonical_address(operand.gla, vcpu))
4731 return nested_vmx_failValid(vcpu,
4732 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4733 if (cpu_has_vmx_invvpid_individual_addr()) {
4734 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
4735 vpid02, operand.gla);
4737 __vmx_flush_tlb(vcpu, vpid02, false);
4739 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
4740 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
4742 return nested_vmx_failValid(vcpu,
4743 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4744 __vmx_flush_tlb(vcpu, vpid02, false);
4746 case VMX_VPID_EXTENT_ALL_CONTEXT:
4747 __vmx_flush_tlb(vcpu, vpid02, false);
4751 return kvm_skip_emulated_instruction(vcpu);
4754 return nested_vmx_succeed(vcpu);
4757 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
4758 struct vmcs12 *vmcs12)
4760 u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
4762 bool accessed_dirty;
4763 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
4765 if (!nested_cpu_has_eptp_switching(vmcs12) ||
4766 !nested_cpu_has_ept(vmcs12))
4769 if (index >= VMFUNC_EPTP_ENTRIES)
4773 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
4774 &address, index * 8, 8))
4777 accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
4780 * If the (L2) guest does a vmfunc to the currently
4781 * active ept pointer, we don't have to do anything else
4783 if (vmcs12->ept_pointer != address) {
4784 if (!valid_ept_address(vcpu, address))
4787 kvm_mmu_unload(vcpu);
4788 mmu->ept_ad = accessed_dirty;
4789 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
4790 vmcs12->ept_pointer = address;
4792 * TODO: Check what's the correct approach in case
4793 * mmu reload fails. Currently, we just let the next
4794 * reload potentially fail
4796 kvm_mmu_reload(vcpu);
4802 static int handle_vmfunc(struct kvm_vcpu *vcpu)
4804 struct vcpu_vmx *vmx = to_vmx(vcpu);
4805 struct vmcs12 *vmcs12;
4806 u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
4809 * VMFUNC is only supported for nested guests, but we always enable the
4810 * secondary control for simplicity; for non-nested mode, fake that we
4811 * didn't by injecting #UD.
4813 if (!is_guest_mode(vcpu)) {
4814 kvm_queue_exception(vcpu, UD_VECTOR);
4818 vmcs12 = get_vmcs12(vcpu);
4819 if ((vmcs12->vm_function_control & (1 << function)) == 0)
4824 if (nested_vmx_eptp_switching(vcpu, vmcs12))
4830 return kvm_skip_emulated_instruction(vcpu);
4833 nested_vmx_vmexit(vcpu, vmx->exit_reason,
4834 vmcs_read32(VM_EXIT_INTR_INFO),
4835 vmcs_readl(EXIT_QUALIFICATION));
4840 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
4841 struct vmcs12 *vmcs12)
4843 unsigned long exit_qualification;
4844 gpa_t bitmap, last_bitmap;
4849 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
4850 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
4852 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4854 port = exit_qualification >> 16;
4855 size = (exit_qualification & 7) + 1;
4857 last_bitmap = (gpa_t)-1;
4862 bitmap = vmcs12->io_bitmap_a;
4863 else if (port < 0x10000)
4864 bitmap = vmcs12->io_bitmap_b;
4867 bitmap += (port & 0x7fff) / 8;
4869 if (last_bitmap != bitmap)
4870 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
4872 if (b & (1 << (port & 7)))
4877 last_bitmap = bitmap;
4884 * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
4885 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
4886 * disinterest in the current event (read or write a specific MSR) by using an
4887 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
4889 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
4890 struct vmcs12 *vmcs12, u32 exit_reason)
4892 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
4895 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
4899 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
4900 * for the four combinations of read/write and low/high MSR numbers.
4901 * First we need to figure out which of the four to use:
4903 bitmap = vmcs12->msr_bitmap;
4904 if (exit_reason == EXIT_REASON_MSR_WRITE)
4906 if (msr_index >= 0xc0000000) {
4907 msr_index -= 0xc0000000;
4911 /* Then read the msr_index'th bit from this bitmap: */
4912 if (msr_index < 1024*8) {
4914 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
4916 return 1 & (b >> (msr_index & 7));
4918 return true; /* let L1 handle the wrong parameter */
4922 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
4923 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
4924 * intercept (via guest_host_mask etc.) the current event.
4926 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
4927 struct vmcs12 *vmcs12)
4929 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4930 int cr = exit_qualification & 15;
4934 switch ((exit_qualification >> 4) & 3) {
4935 case 0: /* mov to cr */
4936 reg = (exit_qualification >> 8) & 15;
4937 val = kvm_register_readl(vcpu, reg);
4940 if (vmcs12->cr0_guest_host_mask &
4941 (val ^ vmcs12->cr0_read_shadow))
4945 if ((vmcs12->cr3_target_count >= 1 &&
4946 vmcs12->cr3_target_value0 == val) ||
4947 (vmcs12->cr3_target_count >= 2 &&
4948 vmcs12->cr3_target_value1 == val) ||
4949 (vmcs12->cr3_target_count >= 3 &&
4950 vmcs12->cr3_target_value2 == val) ||
4951 (vmcs12->cr3_target_count >= 4 &&
4952 vmcs12->cr3_target_value3 == val))
4954 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
4958 if (vmcs12->cr4_guest_host_mask &
4959 (vmcs12->cr4_read_shadow ^ val))
4963 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
4969 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
4970 (vmcs12->cr0_read_shadow & X86_CR0_TS))
4973 case 1: /* mov from cr */
4976 if (vmcs12->cpu_based_vm_exec_control &
4977 CPU_BASED_CR3_STORE_EXITING)
4981 if (vmcs12->cpu_based_vm_exec_control &
4982 CPU_BASED_CR8_STORE_EXITING)
4989 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
4990 * cr0. Other attempted changes are ignored, with no exit.
4992 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
4993 if (vmcs12->cr0_guest_host_mask & 0xe &
4994 (val ^ vmcs12->cr0_read_shadow))
4996 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
4997 !(vmcs12->cr0_read_shadow & 0x1) &&
5005 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5006 struct vmcs12 *vmcs12, gpa_t bitmap)
5008 u32 vmx_instruction_info;
5009 unsigned long field;
5012 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5015 /* Decode instruction info and find the field to access */
5016 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5017 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5019 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5023 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5026 return 1 & (b >> (field & 7));
5030 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
5031 * should handle it ourselves in L0 (and then continue L2). Only call this
5032 * when in is_guest_mode (L2).
5034 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
5036 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
5037 struct vcpu_vmx *vmx = to_vmx(vcpu);
5038 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5040 if (vmx->nested.nested_run_pending)
5043 if (unlikely(vmx->fail)) {
5044 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
5045 vmcs_read32(VM_INSTRUCTION_ERROR));
5050 * The host physical addresses of some pages of guest memory
5051 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
5052 * Page). The CPU may write to these pages via their host
5053 * physical address while L2 is running, bypassing any
5054 * address-translation-based dirty tracking (e.g. EPT write
5057 * Mark them dirty on every exit from L2 to prevent them from
5058 * getting out of sync with dirty tracking.
5060 nested_mark_vmcs12_pages_dirty(vcpu);
5062 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
5063 vmcs_readl(EXIT_QUALIFICATION),
5064 vmx->idt_vectoring_info,
5066 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
5069 switch (exit_reason) {
5070 case EXIT_REASON_EXCEPTION_NMI:
5071 if (is_nmi(intr_info))
5073 else if (is_page_fault(intr_info))
5074 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
5075 else if (is_debug(intr_info) &&
5077 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5079 else if (is_breakpoint(intr_info) &&
5080 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5082 return vmcs12->exception_bitmap &
5083 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5084 case EXIT_REASON_EXTERNAL_INTERRUPT:
5086 case EXIT_REASON_TRIPLE_FAULT:
5088 case EXIT_REASON_PENDING_INTERRUPT:
5089 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
5090 case EXIT_REASON_NMI_WINDOW:
5091 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
5092 case EXIT_REASON_TASK_SWITCH:
5094 case EXIT_REASON_CPUID:
5096 case EXIT_REASON_HLT:
5097 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5098 case EXIT_REASON_INVD:
5100 case EXIT_REASON_INVLPG:
5101 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5102 case EXIT_REASON_RDPMC:
5103 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5104 case EXIT_REASON_RDRAND:
5105 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5106 case EXIT_REASON_RDSEED:
5107 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5108 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
5109 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5110 case EXIT_REASON_VMREAD:
5111 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5112 vmcs12->vmread_bitmap);
5113 case EXIT_REASON_VMWRITE:
5114 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5115 vmcs12->vmwrite_bitmap);
5116 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
5117 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
5118 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
5119 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
5120 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
5122 * VMX instructions trap unconditionally. This allows L1 to
5123 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5126 case EXIT_REASON_CR_ACCESS:
5127 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5128 case EXIT_REASON_DR_ACCESS:
5129 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5130 case EXIT_REASON_IO_INSTRUCTION:
5131 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5132 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
5133 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5134 case EXIT_REASON_MSR_READ:
5135 case EXIT_REASON_MSR_WRITE:
5136 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5137 case EXIT_REASON_INVALID_STATE:
5139 case EXIT_REASON_MWAIT_INSTRUCTION:
5140 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5141 case EXIT_REASON_MONITOR_TRAP_FLAG:
5142 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
5143 case EXIT_REASON_MONITOR_INSTRUCTION:
5144 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
5145 case EXIT_REASON_PAUSE_INSTRUCTION:
5146 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
5147 nested_cpu_has2(vmcs12,
5148 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
5149 case EXIT_REASON_MCE_DURING_VMENTRY:
5151 case EXIT_REASON_TPR_BELOW_THRESHOLD:
5152 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
5153 case EXIT_REASON_APIC_ACCESS:
5154 case EXIT_REASON_APIC_WRITE:
5155 case EXIT_REASON_EOI_INDUCED:
5157 * The controls for "virtualize APIC accesses," "APIC-
5158 * register virtualization," and "virtual-interrupt
5159 * delivery" only come from vmcs12.
5162 case EXIT_REASON_EPT_VIOLATION:
5164 * L0 always deals with the EPT violation. If nested EPT is
5165 * used, and the nested mmu code discovers that the address is
5166 * missing in the guest EPT table (EPT12), the EPT violation
5167 * will be injected with nested_ept_inject_page_fault()
5170 case EXIT_REASON_EPT_MISCONFIG:
5172 * L2 never uses directly L1's EPT, but rather L0's own EPT
5173 * table (shadow on EPT) or a merged EPT table that L0 built
5174 * (EPT on EPT). So any problems with the structure of the
5175 * table is L0's fault.
5178 case EXIT_REASON_INVPCID:
5180 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
5181 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5182 case EXIT_REASON_WBINVD:
5183 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
5184 case EXIT_REASON_XSETBV:
5186 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
5188 * This should never happen, since it is not possible to
5189 * set XSS to a non-zero value---neither in L1 nor in L2.
5190 * If if it were, XSS would have to be checked against
5191 * the XSS exit bitmap in vmcs12.
5193 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
5194 case EXIT_REASON_PREEMPTION_TIMER:
5196 case EXIT_REASON_PML_FULL:
5197 /* We emulate PML support to L1. */
5199 case EXIT_REASON_VMFUNC:
5200 /* VM functions are emulated through L2->L0 vmexits. */
5202 case EXIT_REASON_ENCLS:
5203 /* SGX is never exposed to L1 */
5211 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5212 struct kvm_nested_state __user *user_kvm_nested_state,
5215 struct vcpu_vmx *vmx;
5216 struct vmcs12 *vmcs12;
5217 struct kvm_nested_state kvm_state = {
5220 .size = sizeof(kvm_state),
5221 .vmx.vmxon_pa = -1ull,
5222 .vmx.vmcs_pa = -1ull,
5226 return kvm_state.size + 2 * VMCS12_SIZE;
5229 vmcs12 = get_vmcs12(vcpu);
5231 if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
5232 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
5234 if (nested_vmx_allowed(vcpu) &&
5235 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
5236 kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
5237 kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
5239 if (vmx_has_valid_vmcs12(vcpu)) {
5240 kvm_state.size += VMCS12_SIZE;
5242 if (is_guest_mode(vcpu) &&
5243 nested_cpu_has_shadow_vmcs(vmcs12) &&
5244 vmcs12->vmcs_link_pointer != -1ull)
5245 kvm_state.size += VMCS12_SIZE;
5248 if (vmx->nested.smm.vmxon)
5249 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
5251 if (vmx->nested.smm.guest_mode)
5252 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
5254 if (is_guest_mode(vcpu)) {
5255 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
5257 if (vmx->nested.nested_run_pending)
5258 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
5262 if (user_data_size < kvm_state.size)
5265 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
5268 if (!vmx_has_valid_vmcs12(vcpu))
5272 * When running L2, the authoritative vmcs12 state is in the
5273 * vmcs02. When running L1, the authoritative vmcs12 state is
5274 * in the shadow or enlightened vmcs linked to vmcs01, unless
5275 * need_vmcs12_sync is set, in which case, the authoritative
5276 * vmcs12 state is in the vmcs12 already.
5278 if (is_guest_mode(vcpu)) {
5279 sync_vmcs12(vcpu, vmcs12);
5280 } else if (!vmx->nested.need_vmcs12_sync) {
5281 if (vmx->nested.hv_evmcs)
5282 copy_enlightened_to_vmcs12(vmx);
5283 else if (enable_shadow_vmcs)
5284 copy_shadow_to_vmcs12(vmx);
5288 * Copy over the full allocated size of vmcs12 rather than just the size
5291 if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
5294 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5295 vmcs12->vmcs_link_pointer != -1ull) {
5296 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
5297 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
5302 return kvm_state.size;
5306 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
5308 void vmx_leave_nested(struct kvm_vcpu *vcpu)
5310 if (is_guest_mode(vcpu)) {
5311 to_vmx(vcpu)->nested.nested_run_pending = 0;
5312 nested_vmx_vmexit(vcpu, -1, 0, 0);
5317 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5318 struct kvm_nested_state __user *user_kvm_nested_state,
5319 struct kvm_nested_state *kvm_state)
5321 struct vcpu_vmx *vmx = to_vmx(vcpu);
5322 struct vmcs12 *vmcs12;
5326 if (kvm_state->format != 0)
5329 if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
5330 nested_enable_evmcs(vcpu, NULL);
5332 if (!nested_vmx_allowed(vcpu))
5333 return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
5335 if (kvm_state->vmx.vmxon_pa == -1ull) {
5336 if (kvm_state->vmx.smm.flags)
5339 if (kvm_state->vmx.vmcs_pa != -1ull)
5342 vmx_leave_nested(vcpu);
5346 if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
5349 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5350 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5353 if (kvm_state->vmx.smm.flags &
5354 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
5358 * SMM temporarily disables VMX, so we cannot be in guest mode,
5359 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
5362 if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
5365 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5366 !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
5369 vmx_leave_nested(vcpu);
5370 if (kvm_state->vmx.vmxon_pa == -1ull)
5373 vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
5374 ret = enter_vmx_operation(vcpu);
5378 /* Empty 'VMXON' state is permitted */
5379 if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
5382 if (kvm_state->vmx.vmcs_pa != -1ull) {
5383 if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
5384 !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
5387 set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
5388 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
5390 * Sync eVMCS upon entry as we may not have
5391 * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
5393 vmx->nested.need_vmcs12_sync = true;
5398 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
5399 vmx->nested.smm.vmxon = true;
5400 vmx->nested.vmxon = false;
5402 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
5403 vmx->nested.smm.guest_mode = true;
5406 vmcs12 = get_vmcs12(vcpu);
5407 if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
5410 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
5413 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5416 vmx->nested.nested_run_pending =
5417 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
5419 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5420 vmcs12->vmcs_link_pointer != -1ull) {
5421 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
5423 if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
5426 if (copy_from_user(shadow_vmcs12,
5427 user_kvm_nested_state->data + VMCS12_SIZE,
5431 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5432 !shadow_vmcs12->hdr.shadow_vmcs)
5436 if (nested_vmx_check_vmentry_prereqs(vcpu, vmcs12) ||
5437 nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
5440 vmx->nested.dirty_vmcs12 = true;
5441 ret = nested_vmx_enter_non_root_mode(vcpu, false);
5448 void nested_vmx_vcpu_setup(void)
5450 if (enable_shadow_vmcs) {
5452 * At vCPU creation, "VMWRITE to any supported field
5453 * in the VMCS" is supported, so use the more
5454 * permissive vmx_vmread_bitmap to specify both read
5455 * and write permissions for the shadow VMCS.
5457 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
5458 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap));
5463 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
5464 * returned for the various VMX controls MSRs when nested VMX is enabled.
5465 * The same values should also be used to verify that vmcs12 control fields are
5466 * valid during nested entry from L1 to L2.
5467 * Each of these control msrs has a low and high 32-bit half: A low bit is on
5468 * if the corresponding bit in the (32-bit) control field *must* be on, and a
5469 * bit in the high half is on if the corresponding bit in the control field
5470 * may be on. See also vmx_control_verify().
5472 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
5476 * Note that as a general rule, the high half of the MSRs (bits in
5477 * the control fields which may be 1) should be initialized by the
5478 * intersection of the underlying hardware's MSR (i.e., features which
5479 * can be supported) and the list of features we want to expose -
5480 * because they are known to be properly supported in our code.
5481 * Also, usually, the low half of the MSRs (bits which must be 1) can
5482 * be set to 0, meaning that L1 may turn off any of these bits. The
5483 * reason is that if one of these bits is necessary, it will appear
5484 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
5485 * fields of vmcs01 and vmcs02, will turn these bits off - and
5486 * nested_vmx_exit_reflected() will not pass related exits to L1.
5487 * These rules have exceptions below.
5490 /* pin-based controls */
5491 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
5492 msrs->pinbased_ctls_low,
5493 msrs->pinbased_ctls_high);
5494 msrs->pinbased_ctls_low |=
5495 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5496 msrs->pinbased_ctls_high &=
5497 PIN_BASED_EXT_INTR_MASK |
5498 PIN_BASED_NMI_EXITING |
5499 PIN_BASED_VIRTUAL_NMIS |
5500 (apicv ? PIN_BASED_POSTED_INTR : 0);
5501 msrs->pinbased_ctls_high |=
5502 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5503 PIN_BASED_VMX_PREEMPTION_TIMER;
5506 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
5507 msrs->exit_ctls_low,
5508 msrs->exit_ctls_high);
5509 msrs->exit_ctls_low =
5510 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
5512 msrs->exit_ctls_high &=
5513 #ifdef CONFIG_X86_64
5514 VM_EXIT_HOST_ADDR_SPACE_SIZE |
5516 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
5517 msrs->exit_ctls_high |=
5518 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
5519 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
5520 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
5522 /* We support free control of debug control saving. */
5523 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
5525 /* entry controls */
5526 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
5527 msrs->entry_ctls_low,
5528 msrs->entry_ctls_high);
5529 msrs->entry_ctls_low =
5530 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
5531 msrs->entry_ctls_high &=
5532 #ifdef CONFIG_X86_64
5533 VM_ENTRY_IA32E_MODE |
5535 VM_ENTRY_LOAD_IA32_PAT;
5536 msrs->entry_ctls_high |=
5537 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
5539 /* We support free control of debug control loading. */
5540 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
5542 /* cpu-based controls */
5543 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
5544 msrs->procbased_ctls_low,
5545 msrs->procbased_ctls_high);
5546 msrs->procbased_ctls_low =
5547 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5548 msrs->procbased_ctls_high &=
5549 CPU_BASED_VIRTUAL_INTR_PENDING |
5550 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
5551 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
5552 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
5553 CPU_BASED_CR3_STORE_EXITING |
5554 #ifdef CONFIG_X86_64
5555 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
5557 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
5558 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
5559 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
5560 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
5561 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
5563 * We can allow some features even when not supported by the
5564 * hardware. For example, L1 can specify an MSR bitmap - and we
5565 * can use it to avoid exits to L1 - even when L0 runs L2
5566 * without MSR bitmaps.
5568 msrs->procbased_ctls_high |=
5569 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5570 CPU_BASED_USE_MSR_BITMAPS;
5572 /* We support free control of CR3 access interception. */
5573 msrs->procbased_ctls_low &=
5574 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
5577 * secondary cpu-based controls. Do not include those that
5578 * depend on CPUID bits, they are added later by vmx_cpuid_update.
5580 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
5581 msrs->secondary_ctls_low,
5582 msrs->secondary_ctls_high);
5583 msrs->secondary_ctls_low = 0;
5584 msrs->secondary_ctls_high &=
5585 SECONDARY_EXEC_DESC |
5586 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
5587 SECONDARY_EXEC_APIC_REGISTER_VIRT |
5588 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
5589 SECONDARY_EXEC_WBINVD_EXITING;
5592 * We can emulate "VMCS shadowing," even if the hardware
5593 * doesn't support it.
5595 msrs->secondary_ctls_high |=
5596 SECONDARY_EXEC_SHADOW_VMCS;
5599 /* nested EPT: emulate EPT also to L1 */
5600 msrs->secondary_ctls_high |=
5601 SECONDARY_EXEC_ENABLE_EPT;
5602 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
5603 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
5604 if (cpu_has_vmx_ept_execute_only())
5606 VMX_EPT_EXECUTE_ONLY_BIT;
5607 msrs->ept_caps &= ept_caps;
5608 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
5609 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
5610 VMX_EPT_1GB_PAGE_BIT;
5611 if (enable_ept_ad_bits) {
5612 msrs->secondary_ctls_high |=
5613 SECONDARY_EXEC_ENABLE_PML;
5614 msrs->ept_caps |= VMX_EPT_AD_BIT;
5618 if (cpu_has_vmx_vmfunc()) {
5619 msrs->secondary_ctls_high |=
5620 SECONDARY_EXEC_ENABLE_VMFUNC;
5622 * Advertise EPTP switching unconditionally
5623 * since we emulate it
5626 msrs->vmfunc_controls =
5627 VMX_VMFUNC_EPTP_SWITCHING;
5631 * Old versions of KVM use the single-context version without
5632 * checking for support, so declare that it is supported even
5633 * though it is treated as global context. The alternative is
5634 * not failing the single-context invvpid, and it is worse.
5637 msrs->secondary_ctls_high |=
5638 SECONDARY_EXEC_ENABLE_VPID;
5639 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
5640 VMX_VPID_EXTENT_SUPPORTED_MASK;
5643 if (enable_unrestricted_guest)
5644 msrs->secondary_ctls_high |=
5645 SECONDARY_EXEC_UNRESTRICTED_GUEST;
5647 if (flexpriority_enabled)
5648 msrs->secondary_ctls_high |=
5649 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
5651 /* miscellaneous data */
5652 rdmsr(MSR_IA32_VMX_MISC,
5655 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
5657 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
5658 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
5659 VMX_MISC_ACTIVITY_HLT;
5660 msrs->misc_high = 0;
5663 * This MSR reports some information about VMX support. We
5664 * should return information about the VMX we emulate for the
5665 * guest, and the VMCS structure we give it - not about the
5666 * VMX support of the underlying hardware.
5670 VMX_BASIC_TRUE_CTLS |
5671 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
5672 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
5674 if (cpu_has_vmx_basic_inout())
5675 msrs->basic |= VMX_BASIC_INOUT;
5678 * These MSRs specify bits which the guest must keep fixed on
5679 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
5680 * We picked the standard core2 setting.
5682 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
5683 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
5684 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
5685 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
5687 /* These MSRs specify bits which the guest must keep fixed off. */
5688 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
5689 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
5691 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
5692 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
5695 void nested_vmx_hardware_unsetup(void)
5699 if (enable_shadow_vmcs) {
5700 for (i = 0; i < VMX_BITMAP_NR; i++)
5701 free_page((unsigned long)vmx_bitmap[i]);
5705 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
5709 if (!cpu_has_vmx_shadow_vmcs())
5710 enable_shadow_vmcs = 0;
5711 if (enable_shadow_vmcs) {
5712 for (i = 0; i < VMX_BITMAP_NR; i++) {
5713 vmx_bitmap[i] = (unsigned long *)
5714 __get_free_page(GFP_KERNEL);
5715 if (!vmx_bitmap[i]) {
5716 nested_vmx_hardware_unsetup();
5721 init_vmcs_shadow_fields();
5724 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear,
5725 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
5726 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld,
5727 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst,
5728 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread,
5729 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume,
5730 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite,
5731 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff,
5732 exit_handlers[EXIT_REASON_VMON] = handle_vmon,
5733 exit_handlers[EXIT_REASON_INVEPT] = handle_invept,
5734 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid,
5735 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc,
5737 kvm_x86_ops->check_nested_events = vmx_check_nested_events;
5738 kvm_x86_ops->get_nested_state = vmx_get_nested_state;
5739 kvm_x86_ops->set_nested_state = vmx_set_nested_state;
5740 kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages,
5741 kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
5742 kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;