]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/kvm/vmx/nested.c
3290e332c25ffaca3dfc686a4fd81f17b11bd0ad
[linux.git] / arch / x86 / kvm / vmx / nested.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/frame.h>
4 #include <linux/percpu.h>
5
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
8
9 #include "cpuid.h"
10 #include "hyperv.h"
11 #include "mmu.h"
12 #include "nested.h"
13 #include "trace.h"
14 #include "x86.h"
15
16 static bool __read_mostly enable_shadow_vmcs = 1;
17 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
18
19 static bool __read_mostly nested_early_check = 0;
20 module_param(nested_early_check, bool, S_IRUGO);
21
22 /*
23  * Hyper-V requires all of these, so mark them as supported even though
24  * they are just treated the same as all-context.
25  */
26 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
27         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
28         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
29         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
30         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
31
32 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
33
34 enum {
35         VMX_VMREAD_BITMAP,
36         VMX_VMWRITE_BITMAP,
37         VMX_BITMAP_NR
38 };
39 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
40
41 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
42 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
43
44 struct shadow_vmcs_field {
45         u16     encoding;
46         u16     offset;
47 };
48 static struct shadow_vmcs_field shadow_read_only_fields[] = {
49 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
50 #include "vmcs_shadow_fields.h"
51 };
52 static int max_shadow_read_only_fields =
53         ARRAY_SIZE(shadow_read_only_fields);
54
55 static struct shadow_vmcs_field shadow_read_write_fields[] = {
56 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
57 #include "vmcs_shadow_fields.h"
58 };
59 static int max_shadow_read_write_fields =
60         ARRAY_SIZE(shadow_read_write_fields);
61
62 static void init_vmcs_shadow_fields(void)
63 {
64         int i, j;
65
66         memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
67         memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
68
69         for (i = j = 0; i < max_shadow_read_only_fields; i++) {
70                 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
71                 u16 field = entry.encoding;
72
73                 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
74                     (i + 1 == max_shadow_read_only_fields ||
75                      shadow_read_only_fields[i + 1].encoding != field + 1))
76                         pr_err("Missing field from shadow_read_only_field %x\n",
77                                field + 1);
78
79                 clear_bit(field, vmx_vmread_bitmap);
80                 if (field & 1)
81 #ifdef CONFIG_X86_64
82                         continue;
83 #else
84                         entry.offset += sizeof(u32);
85 #endif
86                 shadow_read_only_fields[j++] = entry;
87         }
88         max_shadow_read_only_fields = j;
89
90         for (i = j = 0; i < max_shadow_read_write_fields; i++) {
91                 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
92                 u16 field = entry.encoding;
93
94                 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
95                     (i + 1 == max_shadow_read_write_fields ||
96                      shadow_read_write_fields[i + 1].encoding != field + 1))
97                         pr_err("Missing field from shadow_read_write_field %x\n",
98                                field + 1);
99
100                 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
101                           field <= GUEST_TR_AR_BYTES,
102                           "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
103
104                 /*
105                  * PML and the preemption timer can be emulated, but the
106                  * processor cannot vmwrite to fields that don't exist
107                  * on bare metal.
108                  */
109                 switch (field) {
110                 case GUEST_PML_INDEX:
111                         if (!cpu_has_vmx_pml())
112                                 continue;
113                         break;
114                 case VMX_PREEMPTION_TIMER_VALUE:
115                         if (!cpu_has_vmx_preemption_timer())
116                                 continue;
117                         break;
118                 case GUEST_INTR_STATUS:
119                         if (!cpu_has_vmx_apicv())
120                                 continue;
121                         break;
122                 default:
123                         break;
124                 }
125
126                 clear_bit(field, vmx_vmwrite_bitmap);
127                 clear_bit(field, vmx_vmread_bitmap);
128                 if (field & 1)
129 #ifdef CONFIG_X86_64
130                         continue;
131 #else
132                         entry.offset += sizeof(u32);
133 #endif
134                 shadow_read_write_fields[j++] = entry;
135         }
136         max_shadow_read_write_fields = j;
137 }
138
139 /*
140  * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
141  * set the success or error code of an emulated VMX instruction (as specified
142  * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
143  * instruction.
144  */
145 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
146 {
147         vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
148                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
149                             X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
150         return kvm_skip_emulated_instruction(vcpu);
151 }
152
153 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
154 {
155         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
156                         & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
157                             X86_EFLAGS_SF | X86_EFLAGS_OF))
158                         | X86_EFLAGS_CF);
159         return kvm_skip_emulated_instruction(vcpu);
160 }
161
162 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
163                                 u32 vm_instruction_error)
164 {
165         struct vcpu_vmx *vmx = to_vmx(vcpu);
166
167         /*
168          * failValid writes the error number to the current VMCS, which
169          * can't be done if there isn't a current VMCS.
170          */
171         if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
172                 return nested_vmx_failInvalid(vcpu);
173
174         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
175                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
176                             X86_EFLAGS_SF | X86_EFLAGS_OF))
177                         | X86_EFLAGS_ZF);
178         get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
179         /*
180          * We don't need to force a shadow sync because
181          * VM_INSTRUCTION_ERROR is not shadowed
182          */
183         return kvm_skip_emulated_instruction(vcpu);
184 }
185
186 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
187 {
188         /* TODO: not to reset guest simply here. */
189         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
190         pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
191 }
192
193 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
194 {
195         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
196         vmcs_write64(VMCS_LINK_POINTER, -1ull);
197 }
198
199 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
200 {
201         struct vcpu_vmx *vmx = to_vmx(vcpu);
202
203         if (!vmx->nested.hv_evmcs)
204                 return;
205
206         kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
207         vmx->nested.hv_evmcs_vmptr = -1ull;
208         vmx->nested.hv_evmcs = NULL;
209 }
210
211 /*
212  * Free whatever needs to be freed from vmx->nested when L1 goes down, or
213  * just stops using VMX.
214  */
215 static void free_nested(struct kvm_vcpu *vcpu)
216 {
217         struct vcpu_vmx *vmx = to_vmx(vcpu);
218
219         if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
220                 return;
221
222         vmx->nested.vmxon = false;
223         vmx->nested.smm.vmxon = false;
224         free_vpid(vmx->nested.vpid02);
225         vmx->nested.posted_intr_nv = -1;
226         vmx->nested.current_vmptr = -1ull;
227         if (enable_shadow_vmcs) {
228                 vmx_disable_shadow_vmcs(vmx);
229                 vmcs_clear(vmx->vmcs01.shadow_vmcs);
230                 free_vmcs(vmx->vmcs01.shadow_vmcs);
231                 vmx->vmcs01.shadow_vmcs = NULL;
232         }
233         kfree(vmx->nested.cached_vmcs12);
234         kfree(vmx->nested.cached_shadow_vmcs12);
235         /* Unpin physical memory we referred to in the vmcs02 */
236         if (vmx->nested.apic_access_page) {
237                 kvm_release_page_dirty(vmx->nested.apic_access_page);
238                 vmx->nested.apic_access_page = NULL;
239         }
240         kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
241         kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
242         vmx->nested.pi_desc = NULL;
243
244         kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
245
246         nested_release_evmcs(vcpu);
247
248         free_loaded_vmcs(&vmx->nested.vmcs02);
249 }
250
251 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
252                                      struct loaded_vmcs *prev)
253 {
254         struct vmcs_host_state *dest, *src;
255
256         if (unlikely(!vmx->guest_state_loaded))
257                 return;
258
259         src = &prev->host_state;
260         dest = &vmx->loaded_vmcs->host_state;
261
262         vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
263         dest->ldt_sel = src->ldt_sel;
264 #ifdef CONFIG_X86_64
265         dest->ds_sel = src->ds_sel;
266         dest->es_sel = src->es_sel;
267 #endif
268 }
269
270 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
271 {
272         struct vcpu_vmx *vmx = to_vmx(vcpu);
273         struct loaded_vmcs *prev;
274         int cpu;
275
276         if (vmx->loaded_vmcs == vmcs)
277                 return;
278
279         cpu = get_cpu();
280         prev = vmx->loaded_vmcs;
281         vmx->loaded_vmcs = vmcs;
282         vmx_vcpu_load_vmcs(vcpu, cpu);
283         vmx_sync_vmcs_host_state(vmx, prev);
284         put_cpu();
285
286         vm_entry_controls_reset_shadow(vmx);
287         vm_exit_controls_reset_shadow(vmx);
288         vmx_segment_cache_clear(vmx);
289 }
290
291 /*
292  * Ensure that the current vmcs of the logical processor is the
293  * vmcs01 of the vcpu before calling free_nested().
294  */
295 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
296 {
297         vcpu_load(vcpu);
298         vmx_leave_nested(vcpu);
299         vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
300         free_nested(vcpu);
301         vcpu_put(vcpu);
302 }
303
304 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
305                 struct x86_exception *fault)
306 {
307         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
308         struct vcpu_vmx *vmx = to_vmx(vcpu);
309         u32 exit_reason;
310         unsigned long exit_qualification = vcpu->arch.exit_qualification;
311
312         if (vmx->nested.pml_full) {
313                 exit_reason = EXIT_REASON_PML_FULL;
314                 vmx->nested.pml_full = false;
315                 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
316         } else if (fault->error_code & PFERR_RSVD_MASK)
317                 exit_reason = EXIT_REASON_EPT_MISCONFIG;
318         else
319                 exit_reason = EXIT_REASON_EPT_VIOLATION;
320
321         nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
322         vmcs12->guest_physical_address = fault->address;
323 }
324
325 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
326 {
327         WARN_ON(mmu_is_nested(vcpu));
328
329         vcpu->arch.mmu = &vcpu->arch.guest_mmu;
330         kvm_init_shadow_ept_mmu(vcpu,
331                         to_vmx(vcpu)->nested.msrs.ept_caps &
332                         VMX_EPT_EXECUTE_ONLY_BIT,
333                         nested_ept_ad_enabled(vcpu),
334                         nested_ept_get_cr3(vcpu));
335         vcpu->arch.mmu->set_cr3           = vmx_set_cr3;
336         vcpu->arch.mmu->get_cr3           = nested_ept_get_cr3;
337         vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
338         vcpu->arch.mmu->get_pdptr         = kvm_pdptr_read;
339
340         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
341 }
342
343 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
344 {
345         vcpu->arch.mmu = &vcpu->arch.root_mmu;
346         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
347 }
348
349 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
350                                             u16 error_code)
351 {
352         bool inequality, bit;
353
354         bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
355         inequality =
356                 (error_code & vmcs12->page_fault_error_code_mask) !=
357                  vmcs12->page_fault_error_code_match;
358         return inequality ^ bit;
359 }
360
361
362 /*
363  * KVM wants to inject page-faults which it got to the guest. This function
364  * checks whether in a nested guest, we need to inject them to L1 or L2.
365  */
366 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
367 {
368         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
369         unsigned int nr = vcpu->arch.exception.nr;
370         bool has_payload = vcpu->arch.exception.has_payload;
371         unsigned long payload = vcpu->arch.exception.payload;
372
373         if (nr == PF_VECTOR) {
374                 if (vcpu->arch.exception.nested_apf) {
375                         *exit_qual = vcpu->arch.apf.nested_apf_token;
376                         return 1;
377                 }
378                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
379                                                     vcpu->arch.exception.error_code)) {
380                         *exit_qual = has_payload ? payload : vcpu->arch.cr2;
381                         return 1;
382                 }
383         } else if (vmcs12->exception_bitmap & (1u << nr)) {
384                 if (nr == DB_VECTOR) {
385                         if (!has_payload) {
386                                 payload = vcpu->arch.dr6;
387                                 payload &= ~(DR6_FIXED_1 | DR6_BT);
388                                 payload ^= DR6_RTM;
389                         }
390                         *exit_qual = payload;
391                 } else
392                         *exit_qual = 0;
393                 return 1;
394         }
395
396         return 0;
397 }
398
399
400 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
401                 struct x86_exception *fault)
402 {
403         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
404
405         WARN_ON(!is_guest_mode(vcpu));
406
407         if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
408                 !to_vmx(vcpu)->nested.nested_run_pending) {
409                 vmcs12->vm_exit_intr_error_code = fault->error_code;
410                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
411                                   PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
412                                   INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
413                                   fault->address);
414         } else {
415                 kvm_inject_page_fault(vcpu, fault);
416         }
417 }
418
419 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
420 {
421         return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
422 }
423
424 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
425                                                struct vmcs12 *vmcs12)
426 {
427         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
428                 return 0;
429
430         if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
431             !page_address_valid(vcpu, vmcs12->io_bitmap_b))
432                 return -EINVAL;
433
434         return 0;
435 }
436
437 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
438                                                 struct vmcs12 *vmcs12)
439 {
440         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
441                 return 0;
442
443         if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
444                 return -EINVAL;
445
446         return 0;
447 }
448
449 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
450                                                 struct vmcs12 *vmcs12)
451 {
452         if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
453                 return 0;
454
455         if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
456                 return -EINVAL;
457
458         return 0;
459 }
460
461 /*
462  * Check if MSR is intercepted for L01 MSR bitmap.
463  */
464 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
465 {
466         unsigned long *msr_bitmap;
467         int f = sizeof(unsigned long);
468
469         if (!cpu_has_vmx_msr_bitmap())
470                 return true;
471
472         msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
473
474         if (msr <= 0x1fff) {
475                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
476         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
477                 msr &= 0x1fff;
478                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
479         }
480
481         return true;
482 }
483
484 /*
485  * If a msr is allowed by L0, we should check whether it is allowed by L1.
486  * The corresponding bit will be cleared unless both of L0 and L1 allow it.
487  */
488 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
489                                                unsigned long *msr_bitmap_nested,
490                                                u32 msr, int type)
491 {
492         int f = sizeof(unsigned long);
493
494         /*
495          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
496          * have the write-low and read-high bitmap offsets the wrong way round.
497          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
498          */
499         if (msr <= 0x1fff) {
500                 if (type & MSR_TYPE_R &&
501                    !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
502                         /* read-low */
503                         __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
504
505                 if (type & MSR_TYPE_W &&
506                    !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
507                         /* write-low */
508                         __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
509
510         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
511                 msr &= 0x1fff;
512                 if (type & MSR_TYPE_R &&
513                    !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
514                         /* read-high */
515                         __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
516
517                 if (type & MSR_TYPE_W &&
518                    !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
519                         /* write-high */
520                         __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
521
522         }
523 }
524
525 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
526         int msr;
527
528         for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
529                 unsigned word = msr / BITS_PER_LONG;
530
531                 msr_bitmap[word] = ~0;
532                 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
533         }
534 }
535
536 /*
537  * Merge L0's and L1's MSR bitmap, return false to indicate that
538  * we do not use the hardware.
539  */
540 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
541                                                  struct vmcs12 *vmcs12)
542 {
543         int msr;
544         unsigned long *msr_bitmap_l1;
545         unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
546         struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
547
548         /* Nothing to do if the MSR bitmap is not in use.  */
549         if (!cpu_has_vmx_msr_bitmap() ||
550             !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
551                 return false;
552
553         if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
554                 return false;
555
556         msr_bitmap_l1 = (unsigned long *)map->hva;
557
558         /*
559          * To keep the control flow simple, pay eight 8-byte writes (sixteen
560          * 4-byte writes on 32-bit systems) up front to enable intercepts for
561          * the x2APIC MSR range and selectively disable them below.
562          */
563         enable_x2apic_msr_intercepts(msr_bitmap_l0);
564
565         if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
566                 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
567                         /*
568                          * L0 need not intercept reads for MSRs between 0x800
569                          * and 0x8ff, it just lets the processor take the value
570                          * from the virtual-APIC page; take those 256 bits
571                          * directly from the L1 bitmap.
572                          */
573                         for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
574                                 unsigned word = msr / BITS_PER_LONG;
575
576                                 msr_bitmap_l0[word] = msr_bitmap_l1[word];
577                         }
578                 }
579
580                 nested_vmx_disable_intercept_for_msr(
581                         msr_bitmap_l1, msr_bitmap_l0,
582                         X2APIC_MSR(APIC_TASKPRI),
583                         MSR_TYPE_R | MSR_TYPE_W);
584
585                 if (nested_cpu_has_vid(vmcs12)) {
586                         nested_vmx_disable_intercept_for_msr(
587                                 msr_bitmap_l1, msr_bitmap_l0,
588                                 X2APIC_MSR(APIC_EOI),
589                                 MSR_TYPE_W);
590                         nested_vmx_disable_intercept_for_msr(
591                                 msr_bitmap_l1, msr_bitmap_l0,
592                                 X2APIC_MSR(APIC_SELF_IPI),
593                                 MSR_TYPE_W);
594                 }
595         }
596
597         /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
598         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
599                                              MSR_FS_BASE, MSR_TYPE_RW);
600
601         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
602                                              MSR_GS_BASE, MSR_TYPE_RW);
603
604         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
605                                              MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
606
607         /*
608          * Checking the L0->L1 bitmap is trying to verify two things:
609          *
610          * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
611          *    ensures that we do not accidentally generate an L02 MSR bitmap
612          *    from the L12 MSR bitmap that is too permissive.
613          * 2. That L1 or L2s have actually used the MSR. This avoids
614          *    unnecessarily merging of the bitmap if the MSR is unused. This
615          *    works properly because we only update the L01 MSR bitmap lazily.
616          *    So even if L0 should pass L1 these MSRs, the L01 bitmap is only
617          *    updated to reflect this when L1 (or its L2s) actually write to
618          *    the MSR.
619          */
620         if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
621                 nested_vmx_disable_intercept_for_msr(
622                                         msr_bitmap_l1, msr_bitmap_l0,
623                                         MSR_IA32_SPEC_CTRL,
624                                         MSR_TYPE_R | MSR_TYPE_W);
625
626         if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
627                 nested_vmx_disable_intercept_for_msr(
628                                         msr_bitmap_l1, msr_bitmap_l0,
629                                         MSR_IA32_PRED_CMD,
630                                         MSR_TYPE_W);
631
632         kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
633
634         return true;
635 }
636
637 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
638                                        struct vmcs12 *vmcs12)
639 {
640         struct kvm_host_map map;
641         struct vmcs12 *shadow;
642
643         if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
644             vmcs12->vmcs_link_pointer == -1ull)
645                 return;
646
647         shadow = get_shadow_vmcs12(vcpu);
648
649         if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
650                 return;
651
652         memcpy(shadow, map.hva, VMCS12_SIZE);
653         kvm_vcpu_unmap(vcpu, &map, false);
654 }
655
656 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
657                                               struct vmcs12 *vmcs12)
658 {
659         struct vcpu_vmx *vmx = to_vmx(vcpu);
660
661         if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
662             vmcs12->vmcs_link_pointer == -1ull)
663                 return;
664
665         kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
666                         get_shadow_vmcs12(vcpu), VMCS12_SIZE);
667 }
668
669 /*
670  * In nested virtualization, check if L1 has set
671  * VM_EXIT_ACK_INTR_ON_EXIT
672  */
673 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
674 {
675         return get_vmcs12(vcpu)->vm_exit_controls &
676                 VM_EXIT_ACK_INTR_ON_EXIT;
677 }
678
679 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
680 {
681         return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
682 }
683
684 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
685                                           struct vmcs12 *vmcs12)
686 {
687         if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
688             !page_address_valid(vcpu, vmcs12->apic_access_addr))
689                 return -EINVAL;
690         else
691                 return 0;
692 }
693
694 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
695                                            struct vmcs12 *vmcs12)
696 {
697         if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
698             !nested_cpu_has_apic_reg_virt(vmcs12) &&
699             !nested_cpu_has_vid(vmcs12) &&
700             !nested_cpu_has_posted_intr(vmcs12))
701                 return 0;
702
703         /*
704          * If virtualize x2apic mode is enabled,
705          * virtualize apic access must be disabled.
706          */
707         if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
708             nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
709                 return -EINVAL;
710
711         /*
712          * If virtual interrupt delivery is enabled,
713          * we must exit on external interrupts.
714          */
715         if (nested_cpu_has_vid(vmcs12) &&
716            !nested_exit_on_intr(vcpu))
717                 return -EINVAL;
718
719         /*
720          * bits 15:8 should be zero in posted_intr_nv,
721          * the descriptor address has been already checked
722          * in nested_get_vmcs12_pages.
723          *
724          * bits 5:0 of posted_intr_desc_addr should be zero.
725          */
726         if (nested_cpu_has_posted_intr(vmcs12) &&
727            (!nested_cpu_has_vid(vmcs12) ||
728             !nested_exit_intr_ack_set(vcpu) ||
729             (vmcs12->posted_intr_nv & 0xff00) ||
730             (vmcs12->posted_intr_desc_addr & 0x3f) ||
731             (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
732                 return -EINVAL;
733
734         /* tpr shadow is needed by all apicv features. */
735         if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
736                 return -EINVAL;
737
738         return 0;
739 }
740
741 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
742                                        u32 count, u64 addr)
743 {
744         int maxphyaddr;
745
746         if (count == 0)
747                 return 0;
748         maxphyaddr = cpuid_maxphyaddr(vcpu);
749         if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
750             (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
751                 return -EINVAL;
752
753         return 0;
754 }
755
756 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
757                                                      struct vmcs12 *vmcs12)
758 {
759         if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
760                                         vmcs12->vm_exit_msr_load_addr) ||
761             nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
762                                         vmcs12->vm_exit_msr_store_addr))
763                 return -EINVAL;
764
765         return 0;
766 }
767
768 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
769                                                       struct vmcs12 *vmcs12)
770 {
771         if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
772                                         vmcs12->vm_entry_msr_load_addr))
773                 return -EINVAL;
774
775         return 0;
776 }
777
778 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
779                                          struct vmcs12 *vmcs12)
780 {
781         if (!nested_cpu_has_pml(vmcs12))
782                 return 0;
783
784         if (!nested_cpu_has_ept(vmcs12) ||
785             !page_address_valid(vcpu, vmcs12->pml_address))
786                 return -EINVAL;
787
788         return 0;
789 }
790
791 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
792                                                         struct vmcs12 *vmcs12)
793 {
794         if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
795             !nested_cpu_has_ept(vmcs12))
796                 return -EINVAL;
797         return 0;
798 }
799
800 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
801                                                          struct vmcs12 *vmcs12)
802 {
803         if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
804             !nested_cpu_has_ept(vmcs12))
805                 return -EINVAL;
806         return 0;
807 }
808
809 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
810                                                  struct vmcs12 *vmcs12)
811 {
812         if (!nested_cpu_has_shadow_vmcs(vmcs12))
813                 return 0;
814
815         if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
816             !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
817                 return -EINVAL;
818
819         return 0;
820 }
821
822 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
823                                        struct vmx_msr_entry *e)
824 {
825         /* x2APIC MSR accesses are not allowed */
826         if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
827                 return -EINVAL;
828         if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
829             e->index == MSR_IA32_UCODE_REV)
830                 return -EINVAL;
831         if (e->reserved != 0)
832                 return -EINVAL;
833         return 0;
834 }
835
836 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
837                                      struct vmx_msr_entry *e)
838 {
839         if (e->index == MSR_FS_BASE ||
840             e->index == MSR_GS_BASE ||
841             e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
842             nested_vmx_msr_check_common(vcpu, e))
843                 return -EINVAL;
844         return 0;
845 }
846
847 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
848                                       struct vmx_msr_entry *e)
849 {
850         if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
851             nested_vmx_msr_check_common(vcpu, e))
852                 return -EINVAL;
853         return 0;
854 }
855
856 /*
857  * Load guest's/host's msr at nested entry/exit.
858  * return 0 for success, entry index for failure.
859  */
860 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
861 {
862         u32 i;
863         struct vmx_msr_entry e;
864         struct msr_data msr;
865
866         msr.host_initiated = false;
867         for (i = 0; i < count; i++) {
868                 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
869                                         &e, sizeof(e))) {
870                         pr_debug_ratelimited(
871                                 "%s cannot read MSR entry (%u, 0x%08llx)\n",
872                                 __func__, i, gpa + i * sizeof(e));
873                         goto fail;
874                 }
875                 if (nested_vmx_load_msr_check(vcpu, &e)) {
876                         pr_debug_ratelimited(
877                                 "%s check failed (%u, 0x%x, 0x%x)\n",
878                                 __func__, i, e.index, e.reserved);
879                         goto fail;
880                 }
881                 msr.index = e.index;
882                 msr.data = e.value;
883                 if (kvm_set_msr(vcpu, &msr)) {
884                         pr_debug_ratelimited(
885                                 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
886                                 __func__, i, e.index, e.value);
887                         goto fail;
888                 }
889         }
890         return 0;
891 fail:
892         return i + 1;
893 }
894
895 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
896 {
897         u32 i;
898         struct vmx_msr_entry e;
899
900         for (i = 0; i < count; i++) {
901                 struct msr_data msr_info;
902                 if (kvm_vcpu_read_guest(vcpu,
903                                         gpa + i * sizeof(e),
904                                         &e, 2 * sizeof(u32))) {
905                         pr_debug_ratelimited(
906                                 "%s cannot read MSR entry (%u, 0x%08llx)\n",
907                                 __func__, i, gpa + i * sizeof(e));
908                         return -EINVAL;
909                 }
910                 if (nested_vmx_store_msr_check(vcpu, &e)) {
911                         pr_debug_ratelimited(
912                                 "%s check failed (%u, 0x%x, 0x%x)\n",
913                                 __func__, i, e.index, e.reserved);
914                         return -EINVAL;
915                 }
916                 msr_info.host_initiated = false;
917                 msr_info.index = e.index;
918                 if (kvm_get_msr(vcpu, &msr_info)) {
919                         pr_debug_ratelimited(
920                                 "%s cannot read MSR (%u, 0x%x)\n",
921                                 __func__, i, e.index);
922                         return -EINVAL;
923                 }
924                 if (kvm_vcpu_write_guest(vcpu,
925                                          gpa + i * sizeof(e) +
926                                              offsetof(struct vmx_msr_entry, value),
927                                          &msr_info.data, sizeof(msr_info.data))) {
928                         pr_debug_ratelimited(
929                                 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
930                                 __func__, i, e.index, msr_info.data);
931                         return -EINVAL;
932                 }
933         }
934         return 0;
935 }
936
937 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
938 {
939         unsigned long invalid_mask;
940
941         invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
942         return (val & invalid_mask) == 0;
943 }
944
945 /*
946  * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
947  * emulating VM entry into a guest with EPT enabled.
948  * Returns 0 on success, 1 on failure. Invalid state exit qualification code
949  * is assigned to entry_failure_code on failure.
950  */
951 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
952                                u32 *entry_failure_code)
953 {
954         if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
955                 if (!nested_cr3_valid(vcpu, cr3)) {
956                         *entry_failure_code = ENTRY_FAIL_DEFAULT;
957                         return -EINVAL;
958                 }
959
960                 /*
961                  * If PAE paging and EPT are both on, CR3 is not used by the CPU and
962                  * must not be dereferenced.
963                  */
964                 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
965                     !nested_ept) {
966                         if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
967                                 *entry_failure_code = ENTRY_FAIL_PDPTE;
968                                 return -EINVAL;
969                         }
970                 }
971         }
972
973         if (!nested_ept)
974                 kvm_mmu_new_cr3(vcpu, cr3, false);
975
976         vcpu->arch.cr3 = cr3;
977         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
978
979         kvm_init_mmu(vcpu, false);
980
981         return 0;
982 }
983
984 /*
985  * Returns if KVM is able to config CPU to tag TLB entries
986  * populated by L2 differently than TLB entries populated
987  * by L1.
988  *
989  * If L1 uses EPT, then TLB entries are tagged with different EPTP.
990  *
991  * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
992  * with different VPID (L1 entries are tagged with vmx->vpid
993  * while L2 entries are tagged with vmx->nested.vpid02).
994  */
995 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
996 {
997         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
998
999         return nested_cpu_has_ept(vmcs12) ||
1000                (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1001 }
1002
1003 static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
1004 {
1005         struct vcpu_vmx *vmx = to_vmx(vcpu);
1006
1007         return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
1008 }
1009
1010
1011 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
1012 {
1013         return fixed_bits_valid(control, low, high);
1014 }
1015
1016 static inline u64 vmx_control_msr(u32 low, u32 high)
1017 {
1018         return low | ((u64)high << 32);
1019 }
1020
1021 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1022 {
1023         superset &= mask;
1024         subset &= mask;
1025
1026         return (superset | subset) == superset;
1027 }
1028
1029 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1030 {
1031         const u64 feature_and_reserved =
1032                 /* feature (except bit 48; see below) */
1033                 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1034                 /* reserved */
1035                 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1036         u64 vmx_basic = vmx->nested.msrs.basic;
1037
1038         if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1039                 return -EINVAL;
1040
1041         /*
1042          * KVM does not emulate a version of VMX that constrains physical
1043          * addresses of VMX structures (e.g. VMCS) to 32-bits.
1044          */
1045         if (data & BIT_ULL(48))
1046                 return -EINVAL;
1047
1048         if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1049             vmx_basic_vmcs_revision_id(data))
1050                 return -EINVAL;
1051
1052         if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1053                 return -EINVAL;
1054
1055         vmx->nested.msrs.basic = data;
1056         return 0;
1057 }
1058
1059 static int
1060 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1061 {
1062         u64 supported;
1063         u32 *lowp, *highp;
1064
1065         switch (msr_index) {
1066         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1067                 lowp = &vmx->nested.msrs.pinbased_ctls_low;
1068                 highp = &vmx->nested.msrs.pinbased_ctls_high;
1069                 break;
1070         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1071                 lowp = &vmx->nested.msrs.procbased_ctls_low;
1072                 highp = &vmx->nested.msrs.procbased_ctls_high;
1073                 break;
1074         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1075                 lowp = &vmx->nested.msrs.exit_ctls_low;
1076                 highp = &vmx->nested.msrs.exit_ctls_high;
1077                 break;
1078         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1079                 lowp = &vmx->nested.msrs.entry_ctls_low;
1080                 highp = &vmx->nested.msrs.entry_ctls_high;
1081                 break;
1082         case MSR_IA32_VMX_PROCBASED_CTLS2:
1083                 lowp = &vmx->nested.msrs.secondary_ctls_low;
1084                 highp = &vmx->nested.msrs.secondary_ctls_high;
1085                 break;
1086         default:
1087                 BUG();
1088         }
1089
1090         supported = vmx_control_msr(*lowp, *highp);
1091
1092         /* Check must-be-1 bits are still 1. */
1093         if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1094                 return -EINVAL;
1095
1096         /* Check must-be-0 bits are still 0. */
1097         if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1098                 return -EINVAL;
1099
1100         *lowp = data;
1101         *highp = data >> 32;
1102         return 0;
1103 }
1104
1105 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1106 {
1107         const u64 feature_and_reserved_bits =
1108                 /* feature */
1109                 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1110                 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1111                 /* reserved */
1112                 GENMASK_ULL(13, 9) | BIT_ULL(31);
1113         u64 vmx_misc;
1114
1115         vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
1116                                    vmx->nested.msrs.misc_high);
1117
1118         if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1119                 return -EINVAL;
1120
1121         if ((vmx->nested.msrs.pinbased_ctls_high &
1122              PIN_BASED_VMX_PREEMPTION_TIMER) &&
1123             vmx_misc_preemption_timer_rate(data) !=
1124             vmx_misc_preemption_timer_rate(vmx_misc))
1125                 return -EINVAL;
1126
1127         if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1128                 return -EINVAL;
1129
1130         if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1131                 return -EINVAL;
1132
1133         if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1134                 return -EINVAL;
1135
1136         vmx->nested.msrs.misc_low = data;
1137         vmx->nested.msrs.misc_high = data >> 32;
1138
1139         return 0;
1140 }
1141
1142 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1143 {
1144         u64 vmx_ept_vpid_cap;
1145
1146         vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
1147                                            vmx->nested.msrs.vpid_caps);
1148
1149         /* Every bit is either reserved or a feature bit. */
1150         if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1151                 return -EINVAL;
1152
1153         vmx->nested.msrs.ept_caps = data;
1154         vmx->nested.msrs.vpid_caps = data >> 32;
1155         return 0;
1156 }
1157
1158 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1159 {
1160         u64 *msr;
1161
1162         switch (msr_index) {
1163         case MSR_IA32_VMX_CR0_FIXED0:
1164                 msr = &vmx->nested.msrs.cr0_fixed0;
1165                 break;
1166         case MSR_IA32_VMX_CR4_FIXED0:
1167                 msr = &vmx->nested.msrs.cr4_fixed0;
1168                 break;
1169         default:
1170                 BUG();
1171         }
1172
1173         /*
1174          * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1175          * must be 1 in the restored value.
1176          */
1177         if (!is_bitwise_subset(data, *msr, -1ULL))
1178                 return -EINVAL;
1179
1180         *msr = data;
1181         return 0;
1182 }
1183
1184 /*
1185  * Called when userspace is restoring VMX MSRs.
1186  *
1187  * Returns 0 on success, non-0 otherwise.
1188  */
1189 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1190 {
1191         struct vcpu_vmx *vmx = to_vmx(vcpu);
1192
1193         /*
1194          * Don't allow changes to the VMX capability MSRs while the vCPU
1195          * is in VMX operation.
1196          */
1197         if (vmx->nested.vmxon)
1198                 return -EBUSY;
1199
1200         switch (msr_index) {
1201         case MSR_IA32_VMX_BASIC:
1202                 return vmx_restore_vmx_basic(vmx, data);
1203         case MSR_IA32_VMX_PINBASED_CTLS:
1204         case MSR_IA32_VMX_PROCBASED_CTLS:
1205         case MSR_IA32_VMX_EXIT_CTLS:
1206         case MSR_IA32_VMX_ENTRY_CTLS:
1207                 /*
1208                  * The "non-true" VMX capability MSRs are generated from the
1209                  * "true" MSRs, so we do not support restoring them directly.
1210                  *
1211                  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1212                  * should restore the "true" MSRs with the must-be-1 bits
1213                  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1214                  * DEFAULT SETTINGS".
1215                  */
1216                 return -EINVAL;
1217         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1218         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1219         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1220         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1221         case MSR_IA32_VMX_PROCBASED_CTLS2:
1222                 return vmx_restore_control_msr(vmx, msr_index, data);
1223         case MSR_IA32_VMX_MISC:
1224                 return vmx_restore_vmx_misc(vmx, data);
1225         case MSR_IA32_VMX_CR0_FIXED0:
1226         case MSR_IA32_VMX_CR4_FIXED0:
1227                 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1228         case MSR_IA32_VMX_CR0_FIXED1:
1229         case MSR_IA32_VMX_CR4_FIXED1:
1230                 /*
1231                  * These MSRs are generated based on the vCPU's CPUID, so we
1232                  * do not support restoring them directly.
1233                  */
1234                 return -EINVAL;
1235         case MSR_IA32_VMX_EPT_VPID_CAP:
1236                 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1237         case MSR_IA32_VMX_VMCS_ENUM:
1238                 vmx->nested.msrs.vmcs_enum = data;
1239                 return 0;
1240         default:
1241                 /*
1242                  * The rest of the VMX capability MSRs do not support restore.
1243                  */
1244                 return -EINVAL;
1245         }
1246 }
1247
1248 /* Returns 0 on success, non-0 otherwise. */
1249 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1250 {
1251         switch (msr_index) {
1252         case MSR_IA32_VMX_BASIC:
1253                 *pdata = msrs->basic;
1254                 break;
1255         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1256         case MSR_IA32_VMX_PINBASED_CTLS:
1257                 *pdata = vmx_control_msr(
1258                         msrs->pinbased_ctls_low,
1259                         msrs->pinbased_ctls_high);
1260                 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1261                         *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1262                 break;
1263         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1264         case MSR_IA32_VMX_PROCBASED_CTLS:
1265                 *pdata = vmx_control_msr(
1266                         msrs->procbased_ctls_low,
1267                         msrs->procbased_ctls_high);
1268                 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1269                         *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1270                 break;
1271         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1272         case MSR_IA32_VMX_EXIT_CTLS:
1273                 *pdata = vmx_control_msr(
1274                         msrs->exit_ctls_low,
1275                         msrs->exit_ctls_high);
1276                 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1277                         *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1278                 break;
1279         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1280         case MSR_IA32_VMX_ENTRY_CTLS:
1281                 *pdata = vmx_control_msr(
1282                         msrs->entry_ctls_low,
1283                         msrs->entry_ctls_high);
1284                 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1285                         *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1286                 break;
1287         case MSR_IA32_VMX_MISC:
1288                 *pdata = vmx_control_msr(
1289                         msrs->misc_low,
1290                         msrs->misc_high);
1291                 break;
1292         case MSR_IA32_VMX_CR0_FIXED0:
1293                 *pdata = msrs->cr0_fixed0;
1294                 break;
1295         case MSR_IA32_VMX_CR0_FIXED1:
1296                 *pdata = msrs->cr0_fixed1;
1297                 break;
1298         case MSR_IA32_VMX_CR4_FIXED0:
1299                 *pdata = msrs->cr4_fixed0;
1300                 break;
1301         case MSR_IA32_VMX_CR4_FIXED1:
1302                 *pdata = msrs->cr4_fixed1;
1303                 break;
1304         case MSR_IA32_VMX_VMCS_ENUM:
1305                 *pdata = msrs->vmcs_enum;
1306                 break;
1307         case MSR_IA32_VMX_PROCBASED_CTLS2:
1308                 *pdata = vmx_control_msr(
1309                         msrs->secondary_ctls_low,
1310                         msrs->secondary_ctls_high);
1311                 break;
1312         case MSR_IA32_VMX_EPT_VPID_CAP:
1313                 *pdata = msrs->ept_caps |
1314                         ((u64)msrs->vpid_caps << 32);
1315                 break;
1316         case MSR_IA32_VMX_VMFUNC:
1317                 *pdata = msrs->vmfunc_controls;
1318                 break;
1319         default:
1320                 return 1;
1321         }
1322
1323         return 0;
1324 }
1325
1326 /*
1327  * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1328  * been modified by the L1 guest.  Note, "writable" in this context means
1329  * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1330  * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1331  * VM-exit information fields (which are actually writable if the vCPU is
1332  * configured to support "VMWRITE to any supported field in the VMCS").
1333  */
1334 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1335 {
1336         struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1337         struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1338         struct shadow_vmcs_field field;
1339         unsigned long val;
1340         int i;
1341
1342         preempt_disable();
1343
1344         vmcs_load(shadow_vmcs);
1345
1346         for (i = 0; i < max_shadow_read_write_fields; i++) {
1347                 field = shadow_read_write_fields[i];
1348                 val = __vmcs_readl(field.encoding);
1349                 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1350         }
1351
1352         vmcs_clear(shadow_vmcs);
1353         vmcs_load(vmx->loaded_vmcs->vmcs);
1354
1355         preempt_enable();
1356 }
1357
1358 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1359 {
1360         const struct shadow_vmcs_field *fields[] = {
1361                 shadow_read_write_fields,
1362                 shadow_read_only_fields
1363         };
1364         const int max_fields[] = {
1365                 max_shadow_read_write_fields,
1366                 max_shadow_read_only_fields
1367         };
1368         struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1369         struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1370         struct shadow_vmcs_field field;
1371         unsigned long val;
1372         int i, q;
1373
1374         vmcs_load(shadow_vmcs);
1375
1376         for (q = 0; q < ARRAY_SIZE(fields); q++) {
1377                 for (i = 0; i < max_fields[q]; i++) {
1378                         field = fields[q][i];
1379                         val = vmcs12_read_any(vmcs12, field.encoding,
1380                                               field.offset);
1381                         __vmcs_writel(field.encoding, val);
1382                 }
1383         }
1384
1385         vmcs_clear(shadow_vmcs);
1386         vmcs_load(vmx->loaded_vmcs->vmcs);
1387 }
1388
1389 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
1390 {
1391         struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1392         struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1393
1394         /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1395         vmcs12->tpr_threshold = evmcs->tpr_threshold;
1396         vmcs12->guest_rip = evmcs->guest_rip;
1397
1398         if (unlikely(!(evmcs->hv_clean_fields &
1399                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1400                 vmcs12->guest_rsp = evmcs->guest_rsp;
1401                 vmcs12->guest_rflags = evmcs->guest_rflags;
1402                 vmcs12->guest_interruptibility_info =
1403                         evmcs->guest_interruptibility_info;
1404         }
1405
1406         if (unlikely(!(evmcs->hv_clean_fields &
1407                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1408                 vmcs12->cpu_based_vm_exec_control =
1409                         evmcs->cpu_based_vm_exec_control;
1410         }
1411
1412         if (unlikely(!(evmcs->hv_clean_fields &
1413                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1414                 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1415         }
1416
1417         if (unlikely(!(evmcs->hv_clean_fields &
1418                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1419                 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1420         }
1421
1422         if (unlikely(!(evmcs->hv_clean_fields &
1423                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1424                 vmcs12->vm_entry_intr_info_field =
1425                         evmcs->vm_entry_intr_info_field;
1426                 vmcs12->vm_entry_exception_error_code =
1427                         evmcs->vm_entry_exception_error_code;
1428                 vmcs12->vm_entry_instruction_len =
1429                         evmcs->vm_entry_instruction_len;
1430         }
1431
1432         if (unlikely(!(evmcs->hv_clean_fields &
1433                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1434                 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1435                 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1436                 vmcs12->host_cr0 = evmcs->host_cr0;
1437                 vmcs12->host_cr3 = evmcs->host_cr3;
1438                 vmcs12->host_cr4 = evmcs->host_cr4;
1439                 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1440                 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1441                 vmcs12->host_rip = evmcs->host_rip;
1442                 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1443                 vmcs12->host_es_selector = evmcs->host_es_selector;
1444                 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1445                 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1446                 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1447                 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1448                 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1449                 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1450         }
1451
1452         if (unlikely(!(evmcs->hv_clean_fields &
1453                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1454                 vmcs12->pin_based_vm_exec_control =
1455                         evmcs->pin_based_vm_exec_control;
1456                 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1457                 vmcs12->secondary_vm_exec_control =
1458                         evmcs->secondary_vm_exec_control;
1459         }
1460
1461         if (unlikely(!(evmcs->hv_clean_fields &
1462                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1463                 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1464                 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1465         }
1466
1467         if (unlikely(!(evmcs->hv_clean_fields &
1468                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1469                 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1470         }
1471
1472         if (unlikely(!(evmcs->hv_clean_fields &
1473                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1474                 vmcs12->guest_es_base = evmcs->guest_es_base;
1475                 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1476                 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1477                 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1478                 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1479                 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1480                 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1481                 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1482                 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1483                 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1484                 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1485                 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1486                 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1487                 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1488                 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1489                 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1490                 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1491                 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1492                 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1493                 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1494                 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1495                 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1496                 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1497                 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1498                 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1499                 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1500                 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1501                 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1502                 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1503                 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1504                 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1505                 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1506                 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1507                 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1508                 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1509                 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1510         }
1511
1512         if (unlikely(!(evmcs->hv_clean_fields &
1513                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1514                 vmcs12->tsc_offset = evmcs->tsc_offset;
1515                 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1516                 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1517         }
1518
1519         if (unlikely(!(evmcs->hv_clean_fields &
1520                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1521                 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1522                 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1523                 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1524                 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1525                 vmcs12->guest_cr0 = evmcs->guest_cr0;
1526                 vmcs12->guest_cr3 = evmcs->guest_cr3;
1527                 vmcs12->guest_cr4 = evmcs->guest_cr4;
1528                 vmcs12->guest_dr7 = evmcs->guest_dr7;
1529         }
1530
1531         if (unlikely(!(evmcs->hv_clean_fields &
1532                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1533                 vmcs12->host_fs_base = evmcs->host_fs_base;
1534                 vmcs12->host_gs_base = evmcs->host_gs_base;
1535                 vmcs12->host_tr_base = evmcs->host_tr_base;
1536                 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1537                 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1538                 vmcs12->host_rsp = evmcs->host_rsp;
1539         }
1540
1541         if (unlikely(!(evmcs->hv_clean_fields &
1542                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1543                 vmcs12->ept_pointer = evmcs->ept_pointer;
1544                 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1545         }
1546
1547         if (unlikely(!(evmcs->hv_clean_fields &
1548                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1549                 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1550                 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1551                 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1552                 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1553                 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1554                 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1555                 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1556                 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1557                 vmcs12->guest_pending_dbg_exceptions =
1558                         evmcs->guest_pending_dbg_exceptions;
1559                 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1560                 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1561                 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1562                 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1563                 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1564         }
1565
1566         /*
1567          * Not used?
1568          * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1569          * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1570          * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1571          * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
1572          * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
1573          * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
1574          * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
1575          * vmcs12->page_fault_error_code_mask =
1576          *              evmcs->page_fault_error_code_mask;
1577          * vmcs12->page_fault_error_code_match =
1578          *              evmcs->page_fault_error_code_match;
1579          * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1580          * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1581          * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1582          * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1583          */
1584
1585         /*
1586          * Read only fields:
1587          * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1588          * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1589          * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1590          * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1591          * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1592          * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1593          * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1594          * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1595          * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1596          * vmcs12->exit_qualification = evmcs->exit_qualification;
1597          * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1598          *
1599          * Not present in struct vmcs12:
1600          * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1601          * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1602          * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1603          * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1604          */
1605
1606         return 0;
1607 }
1608
1609 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1610 {
1611         struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1612         struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1613
1614         /*
1615          * Should not be changed by KVM:
1616          *
1617          * evmcs->host_es_selector = vmcs12->host_es_selector;
1618          * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1619          * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1620          * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1621          * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1622          * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1623          * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1624          * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1625          * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1626          * evmcs->host_cr0 = vmcs12->host_cr0;
1627          * evmcs->host_cr3 = vmcs12->host_cr3;
1628          * evmcs->host_cr4 = vmcs12->host_cr4;
1629          * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1630          * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1631          * evmcs->host_rip = vmcs12->host_rip;
1632          * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1633          * evmcs->host_fs_base = vmcs12->host_fs_base;
1634          * evmcs->host_gs_base = vmcs12->host_gs_base;
1635          * evmcs->host_tr_base = vmcs12->host_tr_base;
1636          * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1637          * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1638          * evmcs->host_rsp = vmcs12->host_rsp;
1639          * sync_vmcs02_to_vmcs12() doesn't read these:
1640          * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1641          * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1642          * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1643          * evmcs->ept_pointer = vmcs12->ept_pointer;
1644          * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1645          * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1646          * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1647          * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1648          * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
1649          * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
1650          * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
1651          * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
1652          * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1653          * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1654          * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1655          * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1656          * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1657          * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1658          * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1659          * evmcs->page_fault_error_code_mask =
1660          *              vmcs12->page_fault_error_code_mask;
1661          * evmcs->page_fault_error_code_match =
1662          *              vmcs12->page_fault_error_code_match;
1663          * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1664          * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1665          * evmcs->tsc_offset = vmcs12->tsc_offset;
1666          * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1667          * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1668          * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1669          * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1670          * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1671          * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1672          * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1673          * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1674          *
1675          * Not present in struct vmcs12:
1676          * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1677          * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1678          * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1679          * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1680          */
1681
1682         evmcs->guest_es_selector = vmcs12->guest_es_selector;
1683         evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1684         evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1685         evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1686         evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1687         evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1688         evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1689         evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1690
1691         evmcs->guest_es_limit = vmcs12->guest_es_limit;
1692         evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1693         evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1694         evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1695         evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1696         evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1697         evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1698         evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1699         evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1700         evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1701
1702         evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1703         evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1704         evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1705         evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1706         evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1707         evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1708         evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1709         evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1710
1711         evmcs->guest_es_base = vmcs12->guest_es_base;
1712         evmcs->guest_cs_base = vmcs12->guest_cs_base;
1713         evmcs->guest_ss_base = vmcs12->guest_ss_base;
1714         evmcs->guest_ds_base = vmcs12->guest_ds_base;
1715         evmcs->guest_fs_base = vmcs12->guest_fs_base;
1716         evmcs->guest_gs_base = vmcs12->guest_gs_base;
1717         evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1718         evmcs->guest_tr_base = vmcs12->guest_tr_base;
1719         evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1720         evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1721
1722         evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1723         evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1724
1725         evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1726         evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1727         evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1728         evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1729
1730         evmcs->guest_pending_dbg_exceptions =
1731                 vmcs12->guest_pending_dbg_exceptions;
1732         evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1733         evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1734
1735         evmcs->guest_activity_state = vmcs12->guest_activity_state;
1736         evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1737
1738         evmcs->guest_cr0 = vmcs12->guest_cr0;
1739         evmcs->guest_cr3 = vmcs12->guest_cr3;
1740         evmcs->guest_cr4 = vmcs12->guest_cr4;
1741         evmcs->guest_dr7 = vmcs12->guest_dr7;
1742
1743         evmcs->guest_physical_address = vmcs12->guest_physical_address;
1744
1745         evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1746         evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1747         evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1748         evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1749         evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1750         evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1751         evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1752         evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1753
1754         evmcs->exit_qualification = vmcs12->exit_qualification;
1755
1756         evmcs->guest_linear_address = vmcs12->guest_linear_address;
1757         evmcs->guest_rsp = vmcs12->guest_rsp;
1758         evmcs->guest_rflags = vmcs12->guest_rflags;
1759
1760         evmcs->guest_interruptibility_info =
1761                 vmcs12->guest_interruptibility_info;
1762         evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1763         evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1764         evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1765         evmcs->vm_entry_exception_error_code =
1766                 vmcs12->vm_entry_exception_error_code;
1767         evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1768
1769         evmcs->guest_rip = vmcs12->guest_rip;
1770
1771         evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1772
1773         return 0;
1774 }
1775
1776 /*
1777  * This is an equivalent of the nested hypervisor executing the vmptrld
1778  * instruction.
1779  */
1780 static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
1781                                                  bool from_launch)
1782 {
1783         struct vcpu_vmx *vmx = to_vmx(vcpu);
1784         struct hv_vp_assist_page assist_page;
1785
1786         if (likely(!vmx->nested.enlightened_vmcs_enabled))
1787                 return 1;
1788
1789         if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
1790                 return 1;
1791
1792         if (unlikely(!assist_page.enlighten_vmentry))
1793                 return 1;
1794
1795         if (unlikely(assist_page.current_nested_vmcs !=
1796                      vmx->nested.hv_evmcs_vmptr)) {
1797
1798                 if (!vmx->nested.hv_evmcs)
1799                         vmx->nested.current_vmptr = -1ull;
1800
1801                 nested_release_evmcs(vcpu);
1802
1803                 if (kvm_vcpu_map(vcpu, gpa_to_gfn(assist_page.current_nested_vmcs),
1804                                  &vmx->nested.hv_evmcs_map))
1805                         return 0;
1806
1807                 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
1808
1809                 /*
1810                  * Currently, KVM only supports eVMCS version 1
1811                  * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1812                  * value to first u32 field of eVMCS which should specify eVMCS
1813                  * VersionNumber.
1814                  *
1815                  * Guest should be aware of supported eVMCS versions by host by
1816                  * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
1817                  * expected to set this CPUID leaf according to the value
1818                  * returned in vmcs_version from nested_enable_evmcs().
1819                  *
1820                  * However, it turns out that Microsoft Hyper-V fails to comply
1821                  * to their own invented interface: When Hyper-V use eVMCS, it
1822                  * just sets first u32 field of eVMCS to revision_id specified
1823                  * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
1824                  * which is one of the supported versions specified in
1825                  * CPUID.0x4000000A.EAX[0:15].
1826                  *
1827                  * To overcome Hyper-V bug, we accept here either a supported
1828                  * eVMCS version or VMCS12 revision_id as valid values for first
1829                  * u32 field of eVMCS.
1830                  */
1831                 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
1832                     (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
1833                         nested_release_evmcs(vcpu);
1834                         return 0;
1835                 }
1836
1837                 vmx->nested.dirty_vmcs12 = true;
1838                 /*
1839                  * As we keep L2 state for one guest only 'hv_clean_fields' mask
1840                  * can't be used when we switch between them. Reset it here for
1841                  * simplicity.
1842                  */
1843                 vmx->nested.hv_evmcs->hv_clean_fields &=
1844                         ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1845                 vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs;
1846
1847                 /*
1848                  * Unlike normal vmcs12, enlightened vmcs12 is not fully
1849                  * reloaded from guest's memory (read only fields, fields not
1850                  * present in struct hv_enlightened_vmcs, ...). Make sure there
1851                  * are no leftovers.
1852                  */
1853                 if (from_launch) {
1854                         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1855                         memset(vmcs12, 0, sizeof(*vmcs12));
1856                         vmcs12->hdr.revision_id = VMCS12_REVISION;
1857                 }
1858
1859         }
1860         return 1;
1861 }
1862
1863 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
1864 {
1865         struct vcpu_vmx *vmx = to_vmx(vcpu);
1866
1867         /*
1868          * hv_evmcs may end up being not mapped after migration (when
1869          * L2 was running), map it here to make sure vmcs12 changes are
1870          * properly reflected.
1871          */
1872         if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
1873                 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
1874
1875         if (vmx->nested.hv_evmcs) {
1876                 copy_vmcs12_to_enlightened(vmx);
1877                 /* All fields are clean */
1878                 vmx->nested.hv_evmcs->hv_clean_fields |=
1879                         HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1880         } else {
1881                 copy_vmcs12_to_shadow(vmx);
1882         }
1883
1884         vmx->nested.need_vmcs12_to_shadow_sync = false;
1885 }
1886
1887 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
1888 {
1889         struct vcpu_vmx *vmx =
1890                 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
1891
1892         vmx->nested.preemption_timer_expired = true;
1893         kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
1894         kvm_vcpu_kick(&vmx->vcpu);
1895
1896         return HRTIMER_NORESTART;
1897 }
1898
1899 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
1900 {
1901         u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
1902         struct vcpu_vmx *vmx = to_vmx(vcpu);
1903
1904         /*
1905          * A timer value of zero is architecturally guaranteed to cause
1906          * a VMExit prior to executing any instructions in the guest.
1907          */
1908         if (preemption_timeout == 0) {
1909                 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
1910                 return;
1911         }
1912
1913         if (vcpu->arch.virtual_tsc_khz == 0)
1914                 return;
1915
1916         preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
1917         preemption_timeout *= 1000000;
1918         do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
1919         hrtimer_start(&vmx->nested.preemption_timer,
1920                       ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
1921 }
1922
1923 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1924 {
1925         if (vmx->nested.nested_run_pending &&
1926             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
1927                 return vmcs12->guest_ia32_efer;
1928         else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
1929                 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
1930         else
1931                 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
1932 }
1933
1934 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
1935 {
1936         /*
1937          * If vmcs02 hasn't been initialized, set the constant vmcs02 state
1938          * according to L0's settings (vmcs12 is irrelevant here).  Host
1939          * fields that come from L0 and are not constant, e.g. HOST_CR3,
1940          * will be set as needed prior to VMLAUNCH/VMRESUME.
1941          */
1942         if (vmx->nested.vmcs02_initialized)
1943                 return;
1944         vmx->nested.vmcs02_initialized = true;
1945
1946         /*
1947          * We don't care what the EPTP value is we just need to guarantee
1948          * it's valid so we don't get a false positive when doing early
1949          * consistency checks.
1950          */
1951         if (enable_ept && nested_early_check)
1952                 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
1953
1954         /* All VMFUNCs are currently emulated through L0 vmexits.  */
1955         if (cpu_has_vmx_vmfunc())
1956                 vmcs_write64(VM_FUNCTION_CONTROL, 0);
1957
1958         if (cpu_has_vmx_posted_intr())
1959                 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
1960
1961         if (cpu_has_vmx_msr_bitmap())
1962                 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
1963
1964         /*
1965          * The PML address never changes, so it is constant in vmcs02.
1966          * Conceptually we want to copy the PML index from vmcs01 here,
1967          * and then back to vmcs01 on nested vmexit.  But since we flush
1968          * the log and reset GUEST_PML_INDEX on each vmexit, the PML
1969          * index is also effectively constant in vmcs02.
1970          */
1971         if (enable_pml) {
1972                 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
1973                 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
1974         }
1975
1976         if (cpu_has_vmx_encls_vmexit())
1977                 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
1978
1979         /*
1980          * Set the MSR load/store lists to match L0's settings.  Only the
1981          * addresses are constant (for vmcs02), the counts can change based
1982          * on L2's behavior, e.g. switching to/from long mode.
1983          */
1984         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1985         vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
1986         vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
1987
1988         vmx_set_constant_host_state(vmx);
1989 }
1990
1991 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
1992                                       struct vmcs12 *vmcs12)
1993 {
1994         prepare_vmcs02_constant_state(vmx);
1995
1996         vmcs_write64(VMCS_LINK_POINTER, -1ull);
1997
1998         if (enable_vpid) {
1999                 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2000                         vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2001                 else
2002                         vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2003         }
2004 }
2005
2006 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2007 {
2008         u32 exec_control, vmcs12_exec_ctrl;
2009         u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2010
2011         if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
2012                 prepare_vmcs02_early_rare(vmx, vmcs12);
2013
2014         /*
2015          * PIN CONTROLS
2016          */
2017         exec_control = vmcs12->pin_based_vm_exec_control;
2018
2019         /* Preemption timer setting is computed directly in vmx_vcpu_run.  */
2020         exec_control |= vmcs_config.pin_based_exec_ctrl;
2021         exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2022         vmx->loaded_vmcs->hv_timer_armed = false;
2023
2024         /* Posted interrupts setting is only taken from vmcs12.  */
2025         if (nested_cpu_has_posted_intr(vmcs12)) {
2026                 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2027                 vmx->nested.pi_pending = false;
2028         } else {
2029                 exec_control &= ~PIN_BASED_POSTED_INTR;
2030         }
2031         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
2032
2033         /*
2034          * EXEC CONTROLS
2035          */
2036         exec_control = vmx_exec_control(vmx); /* L0's desires */
2037         exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2038         exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
2039         exec_control &= ~CPU_BASED_TPR_SHADOW;
2040         exec_control |= vmcs12->cpu_based_vm_exec_control;
2041
2042         if (exec_control & CPU_BASED_TPR_SHADOW)
2043                 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2044 #ifdef CONFIG_X86_64
2045         else
2046                 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2047                                 CPU_BASED_CR8_STORE_EXITING;
2048 #endif
2049
2050         /*
2051          * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2052          * for I/O port accesses.
2053          */
2054         exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2055         exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2056         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
2057
2058         /*
2059          * SECONDARY EXEC CONTROLS
2060          */
2061         if (cpu_has_secondary_exec_ctrls()) {
2062                 exec_control = vmx->secondary_exec_control;
2063
2064                 /* Take the following fields only from vmcs12 */
2065                 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2066                                   SECONDARY_EXEC_ENABLE_INVPCID |
2067                                   SECONDARY_EXEC_RDTSCP |
2068                                   SECONDARY_EXEC_XSAVES |
2069                                   SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2070                                   SECONDARY_EXEC_APIC_REGISTER_VIRT |
2071                                   SECONDARY_EXEC_ENABLE_VMFUNC);
2072                 if (nested_cpu_has(vmcs12,
2073                                    CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
2074                         vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
2075                                 ~SECONDARY_EXEC_ENABLE_PML;
2076                         exec_control |= vmcs12_exec_ctrl;
2077                 }
2078
2079                 /* VMCS shadowing for L2 is emulated for now */
2080                 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2081
2082                 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2083                         vmcs_write16(GUEST_INTR_STATUS,
2084                                 vmcs12->guest_intr_status);
2085
2086                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2087         }
2088
2089         /*
2090          * ENTRY CONTROLS
2091          *
2092          * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2093          * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2094          * on the related bits (if supported by the CPU) in the hope that
2095          * we can avoid VMWrites during vmx_set_efer().
2096          */
2097         exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
2098                         ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
2099         if (cpu_has_load_ia32_efer()) {
2100                 if (guest_efer & EFER_LMA)
2101                         exec_control |= VM_ENTRY_IA32E_MODE;
2102                 if (guest_efer != host_efer)
2103                         exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2104         }
2105         vm_entry_controls_init(vmx, exec_control);
2106
2107         /*
2108          * EXIT CONTROLS
2109          *
2110          * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2111          * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2112          * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2113          */
2114         exec_control = vmx_vmexit_ctrl();
2115         if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2116                 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2117         vm_exit_controls_init(vmx, exec_control);
2118
2119         /*
2120          * Interrupt/Exception Fields
2121          */
2122         if (vmx->nested.nested_run_pending) {
2123                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2124                              vmcs12->vm_entry_intr_info_field);
2125                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2126                              vmcs12->vm_entry_exception_error_code);
2127                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2128                              vmcs12->vm_entry_instruction_len);
2129                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2130                              vmcs12->guest_interruptibility_info);
2131                 vmx->loaded_vmcs->nmi_known_unmasked =
2132                         !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2133         } else {
2134                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2135         }
2136 }
2137
2138 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2139 {
2140         struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2141
2142         if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2143                            HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2144                 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2145                 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2146                 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2147                 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2148                 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2149                 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2150                 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2151                 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2152                 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2153                 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2154                 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2155                 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2156                 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2157                 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2158                 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2159                 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2160                 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2161                 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2162                 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2163                 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2164                 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2165                 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2166                 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2167                 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2168                 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2169                 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2170                 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2171                 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2172                 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2173                 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2174                 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2175                 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2176                 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2177                 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2178                 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2179                 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2180         }
2181
2182         if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2183                            HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2184                 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2185                 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2186                             vmcs12->guest_pending_dbg_exceptions);
2187                 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2188                 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2189
2190                 /*
2191                  * L1 may access the L2's PDPTR, so save them to construct
2192                  * vmcs12
2193                  */
2194                 if (enable_ept) {
2195                         vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2196                         vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2197                         vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2198                         vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2199                 }
2200         }
2201
2202         if (nested_cpu_has_xsaves(vmcs12))
2203                 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2204
2205         /*
2206          * Whether page-faults are trapped is determined by a combination of
2207          * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
2208          * If enable_ept, L0 doesn't care about page faults and we should
2209          * set all of these to L1's desires. However, if !enable_ept, L0 does
2210          * care about (at least some) page faults, and because it is not easy
2211          * (if at all possible?) to merge L0 and L1's desires, we simply ask
2212          * to exit on each and every L2 page fault. This is done by setting
2213          * MASK=MATCH=0 and (see below) EB.PF=1.
2214          * Note that below we don't need special code to set EB.PF beyond the
2215          * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2216          * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2217          * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2218          */
2219         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
2220                 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
2221         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
2222                 enable_ept ? vmcs12->page_fault_error_code_match : 0);
2223
2224         if (cpu_has_vmx_apicv()) {
2225                 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2226                 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2227                 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2228                 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2229         }
2230
2231         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2232         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2233
2234         set_cr4_guest_host_mask(vmx);
2235
2236         if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2237             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2238                 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2239 }
2240
2241 /*
2242  * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2243  * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2244  * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2245  * guest in a way that will both be appropriate to L1's requests, and our
2246  * needs. In addition to modifying the active vmcs (which is vmcs02), this
2247  * function also has additional necessary side-effects, like setting various
2248  * vcpu->arch fields.
2249  * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2250  * is assigned to entry_failure_code on failure.
2251  */
2252 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2253                           u32 *entry_failure_code)
2254 {
2255         struct vcpu_vmx *vmx = to_vmx(vcpu);
2256
2257         if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) {
2258                 prepare_vmcs02_rare(vmx, vmcs12);
2259                 vmx->nested.dirty_vmcs12 = false;
2260         }
2261
2262         if (vmx->nested.nested_run_pending &&
2263             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2264                 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2265                 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2266         } else {
2267                 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2268                 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2269         }
2270         if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2271             !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2272                 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
2273         vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2274
2275         /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2276          * bitwise-or of what L1 wants to trap for L2, and what we want to
2277          * trap. Note that CR0.TS also needs updating - we do this later.
2278          */
2279         update_exception_bitmap(vcpu);
2280         vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2281         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2282
2283         if (vmx->nested.nested_run_pending &&
2284             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2285                 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2286                 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2287         } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2288                 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2289         }
2290
2291         vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2292
2293         if (kvm_has_tsc_control)
2294                 decache_tsc_multiplier(vmx);
2295
2296         if (enable_vpid) {
2297                 /*
2298                  * There is no direct mapping between vpid02 and vpid12, the
2299                  * vpid02 is per-vCPU for L0 and reused while the value of
2300                  * vpid12 is changed w/ one invvpid during nested vmentry.
2301                  * The vpid12 is allocated by L1 for L2, so it will not
2302                  * influence global bitmap(for vpid01 and vpid02 allocation)
2303                  * even if spawn a lot of nested vCPUs.
2304                  */
2305                 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
2306                         if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
2307                                 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
2308                                 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
2309                         }
2310                 } else {
2311                         /*
2312                          * If L1 use EPT, then L0 needs to execute INVEPT on
2313                          * EPTP02 instead of EPTP01. Therefore, delay TLB
2314                          * flush until vmcs02->eptp is fully updated by
2315                          * KVM_REQ_LOAD_CR3. Note that this assumes
2316                          * KVM_REQ_TLB_FLUSH is evaluated after
2317                          * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
2318                          */
2319                         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2320                 }
2321         }
2322
2323         if (nested_cpu_has_ept(vmcs12))
2324                 nested_ept_init_mmu_context(vcpu);
2325         else if (nested_cpu_has2(vmcs12,
2326                                  SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2327                 vmx_flush_tlb(vcpu, true);
2328
2329         /*
2330          * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2331          * bits which we consider mandatory enabled.
2332          * The CR0_READ_SHADOW is what L2 should have expected to read given
2333          * the specifications by L1; It's not enough to take
2334          * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2335          * have more bits than L1 expected.
2336          */
2337         vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2338         vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2339
2340         vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2341         vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2342
2343         vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2344         /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2345         vmx_set_efer(vcpu, vcpu->arch.efer);
2346
2347         /*
2348          * Guest state is invalid and unrestricted guest is disabled,
2349          * which means L1 attempted VMEntry to L2 with invalid state.
2350          * Fail the VMEntry.
2351          */
2352         if (vmx->emulation_required) {
2353                 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2354                 return -EINVAL;
2355         }
2356
2357         /* Shadow page tables on either EPT or shadow page tables. */
2358         if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2359                                 entry_failure_code))
2360                 return -EINVAL;
2361
2362         if (!enable_ept)
2363                 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2364
2365         kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2366         kvm_rip_write(vcpu, vmcs12->guest_rip);
2367         return 0;
2368 }
2369
2370 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2371 {
2372         if (!nested_cpu_has_nmi_exiting(vmcs12) &&
2373             nested_cpu_has_virtual_nmis(vmcs12))
2374                 return -EINVAL;
2375
2376         if (!nested_cpu_has_virtual_nmis(vmcs12) &&
2377             nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
2378                 return -EINVAL;
2379
2380         return 0;
2381 }
2382
2383 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
2384 {
2385         struct vcpu_vmx *vmx = to_vmx(vcpu);
2386         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2387
2388         /* Check for memory type validity */
2389         switch (address & VMX_EPTP_MT_MASK) {
2390         case VMX_EPTP_MT_UC:
2391                 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
2392                         return false;
2393                 break;
2394         case VMX_EPTP_MT_WB:
2395                 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
2396                         return false;
2397                 break;
2398         default:
2399                 return false;
2400         }
2401
2402         /* only 4 levels page-walk length are valid */
2403         if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
2404                 return false;
2405
2406         /* Reserved bits should not be set */
2407         if (address >> maxphyaddr || ((address >> 7) & 0x1f))
2408                 return false;
2409
2410         /* AD, if set, should be supported */
2411         if (address & VMX_EPTP_AD_ENABLE_BIT) {
2412                 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
2413                         return false;
2414         }
2415
2416         return true;
2417 }
2418
2419 /*
2420  * Checks related to VM-Execution Control Fields
2421  */
2422 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2423                                               struct vmcs12 *vmcs12)
2424 {
2425         struct vcpu_vmx *vmx = to_vmx(vcpu);
2426
2427         if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2428                                 vmx->nested.msrs.pinbased_ctls_low,
2429                                 vmx->nested.msrs.pinbased_ctls_high) ||
2430             !vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2431                                 vmx->nested.msrs.procbased_ctls_low,
2432                                 vmx->nested.msrs.procbased_ctls_high))
2433                 return -EINVAL;
2434
2435         if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2436             !vmx_control_verify(vmcs12->secondary_vm_exec_control,
2437                                  vmx->nested.msrs.secondary_ctls_low,
2438                                  vmx->nested.msrs.secondary_ctls_high))
2439                 return -EINVAL;
2440
2441         if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) ||
2442             nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2443             nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2444             nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2445             nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2446             nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2447             nested_vmx_check_nmi_controls(vmcs12) ||
2448             nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2449             nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2450             nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2451             nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2452             (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2453                 return -EINVAL;
2454
2455         if (!nested_cpu_has_preemption_timer(vmcs12) &&
2456             nested_cpu_has_save_preemption_timer(vmcs12))
2457                 return -EINVAL;
2458
2459         if (nested_cpu_has_ept(vmcs12) &&
2460             !valid_ept_address(vcpu, vmcs12->ept_pointer))
2461                 return -EINVAL;
2462
2463         if (nested_cpu_has_vmfunc(vmcs12)) {
2464                 if (vmcs12->vm_function_control &
2465                     ~vmx->nested.msrs.vmfunc_controls)
2466                         return -EINVAL;
2467
2468                 if (nested_cpu_has_eptp_switching(vmcs12)) {
2469                         if (!nested_cpu_has_ept(vmcs12) ||
2470                             !page_address_valid(vcpu, vmcs12->eptp_list_address))
2471                                 return -EINVAL;
2472                 }
2473         }
2474
2475         return 0;
2476 }
2477
2478 /*
2479  * Checks related to VM-Exit Control Fields
2480  */
2481 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2482                                          struct vmcs12 *vmcs12)
2483 {
2484         struct vcpu_vmx *vmx = to_vmx(vcpu);
2485
2486         if (!vmx_control_verify(vmcs12->vm_exit_controls,
2487                                 vmx->nested.msrs.exit_ctls_low,
2488                                 vmx->nested.msrs.exit_ctls_high) ||
2489             nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
2490                 return -EINVAL;
2491
2492         return 0;
2493 }
2494
2495 /*
2496  * Checks related to VM-Entry Control Fields
2497  */
2498 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2499                                           struct vmcs12 *vmcs12)
2500 {
2501         struct vcpu_vmx *vmx = to_vmx(vcpu);
2502
2503         if (!vmx_control_verify(vmcs12->vm_entry_controls,
2504                                 vmx->nested.msrs.entry_ctls_low,
2505                                 vmx->nested.msrs.entry_ctls_high))
2506                 return -EINVAL;
2507
2508         /*
2509          * From the Intel SDM, volume 3:
2510          * Fields relevant to VM-entry event injection must be set properly.
2511          * These fields are the VM-entry interruption-information field, the
2512          * VM-entry exception error code, and the VM-entry instruction length.
2513          */
2514         if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2515                 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2516                 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2517                 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2518                 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2519                 bool should_have_error_code;
2520                 bool urg = nested_cpu_has2(vmcs12,
2521                                            SECONDARY_EXEC_UNRESTRICTED_GUEST);
2522                 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2523
2524                 /* VM-entry interruption-info field: interruption type */
2525                 if (intr_type == INTR_TYPE_RESERVED ||
2526                     (intr_type == INTR_TYPE_OTHER_EVENT &&
2527                      !nested_cpu_supports_monitor_trap_flag(vcpu)))
2528                         return -EINVAL;
2529
2530                 /* VM-entry interruption-info field: vector */
2531                 if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2532                     (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2533                     (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2534                         return -EINVAL;
2535
2536                 /* VM-entry interruption-info field: deliver error code */
2537                 should_have_error_code =
2538                         intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2539                         x86_exception_has_error_code(vector);
2540                 if (has_error_code != should_have_error_code)
2541                         return -EINVAL;
2542
2543                 /* VM-entry exception error code */
2544                 if (has_error_code &&
2545                     vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
2546                         return -EINVAL;
2547
2548                 /* VM-entry interruption-info field: reserved bits */
2549                 if (intr_info & INTR_INFO_RESVD_BITS_MASK)
2550                         return -EINVAL;
2551
2552                 /* VM-entry instruction length */
2553                 switch (intr_type) {
2554                 case INTR_TYPE_SOFT_EXCEPTION:
2555                 case INTR_TYPE_SOFT_INTR:
2556                 case INTR_TYPE_PRIV_SW_EXCEPTION:
2557                         if ((vmcs12->vm_entry_instruction_len > 15) ||
2558                             (vmcs12->vm_entry_instruction_len == 0 &&
2559                              !nested_cpu_has_zero_length_injection(vcpu)))
2560                                 return -EINVAL;
2561                 }
2562         }
2563
2564         if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2565                 return -EINVAL;
2566
2567         return 0;
2568 }
2569
2570 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2571                                      struct vmcs12 *vmcs12)
2572 {
2573         if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2574             nested_check_vm_exit_controls(vcpu, vmcs12) ||
2575             nested_check_vm_entry_controls(vcpu, vmcs12))
2576                 return -EINVAL;
2577
2578         return 0;
2579 }
2580
2581 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2582                                        struct vmcs12 *vmcs12)
2583 {
2584         bool ia32e;
2585
2586         if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
2587             !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
2588             !nested_cr3_valid(vcpu, vmcs12->host_cr3))
2589                 return -EINVAL;
2590
2591         if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) ||
2592             is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
2593                 return -EINVAL;
2594
2595         if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2596             !kvm_pat_valid(vmcs12->host_ia32_pat))
2597                 return -EINVAL;
2598
2599         /*
2600          * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2601          * IA32_EFER MSR must be 0 in the field for that register. In addition,
2602          * the values of the LMA and LME bits in the field must each be that of
2603          * the host address-space size VM-exit control.
2604          */
2605         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2606                 ia32e = (vmcs12->vm_exit_controls &
2607                          VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
2608                 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
2609                     ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
2610                     ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
2611                         return -EINVAL;
2612         }
2613
2614         return 0;
2615 }
2616
2617 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2618                                           struct vmcs12 *vmcs12)
2619 {
2620         int r = 0;
2621         struct vmcs12 *shadow;
2622         struct kvm_host_map map;
2623
2624         if (vmcs12->vmcs_link_pointer == -1ull)
2625                 return 0;
2626
2627         if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
2628                 return -EINVAL;
2629
2630         if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
2631                 return -EINVAL;
2632
2633         shadow = map.hva;
2634
2635         if (shadow->hdr.revision_id != VMCS12_REVISION ||
2636             shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
2637                 r = -EINVAL;
2638
2639         kvm_vcpu_unmap(vcpu, &map, false);
2640         return r;
2641 }
2642
2643 /*
2644  * Checks related to Guest Non-register State
2645  */
2646 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2647 {
2648         if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2649             vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
2650                 return -EINVAL;
2651
2652         return 0;
2653 }
2654
2655 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2656                                         struct vmcs12 *vmcs12,
2657                                         u32 *exit_qual)
2658 {
2659         bool ia32e;
2660
2661         *exit_qual = ENTRY_FAIL_DEFAULT;
2662
2663         if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
2664             !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
2665                 return -EINVAL;
2666
2667         if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
2668             !kvm_pat_valid(vmcs12->guest_ia32_pat))
2669                 return -EINVAL;
2670
2671         if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
2672                 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
2673                 return -EINVAL;
2674         }
2675
2676         /*
2677          * If the load IA32_EFER VM-entry control is 1, the following checks
2678          * are performed on the field for the IA32_EFER MSR:
2679          * - Bits reserved in the IA32_EFER MSR must be 0.
2680          * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2681          *   the IA-32e mode guest VM-exit control. It must also be identical
2682          *   to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
2683          *   CR0.PG) is 1.
2684          */
2685         if (to_vmx(vcpu)->nested.nested_run_pending &&
2686             (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
2687                 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
2688                 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
2689                     ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
2690                     ((vmcs12->guest_cr0 & X86_CR0_PG) &&
2691                      ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
2692                         return -EINVAL;
2693         }
2694
2695         if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
2696             (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
2697              (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
2698                 return -EINVAL;
2699
2700         if (nested_check_guest_non_reg_state(vmcs12))
2701                 return -EINVAL;
2702
2703         return 0;
2704 }
2705
2706 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
2707 {
2708         struct vcpu_vmx *vmx = to_vmx(vcpu);
2709         unsigned long cr3, cr4;
2710         bool vm_fail;
2711
2712         if (!nested_early_check)
2713                 return 0;
2714
2715         if (vmx->msr_autoload.host.nr)
2716                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2717         if (vmx->msr_autoload.guest.nr)
2718                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2719
2720         preempt_disable();
2721
2722         vmx_prepare_switch_to_guest(vcpu);
2723
2724         /*
2725          * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
2726          * which is reserved to '1' by hardware.  GUEST_RFLAGS is guaranteed to
2727          * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
2728          * there is no need to preserve other bits or save/restore the field.
2729          */
2730         vmcs_writel(GUEST_RFLAGS, 0);
2731
2732         cr3 = __get_current_cr3_fast();
2733         if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
2734                 vmcs_writel(HOST_CR3, cr3);
2735                 vmx->loaded_vmcs->host_state.cr3 = cr3;
2736         }
2737
2738         cr4 = cr4_read_shadow();
2739         if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
2740                 vmcs_writel(HOST_CR4, cr4);
2741                 vmx->loaded_vmcs->host_state.cr4 = cr4;
2742         }
2743
2744         asm(
2745                 "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
2746                 "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2747                 "je 1f \n\t"
2748                 __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
2749                 "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2750                 "1: \n\t"
2751                 "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
2752
2753                 /* Check if vmlaunch or vmresume is needed */
2754                 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
2755
2756                 /*
2757                  * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
2758                  * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
2759                  * Valid.  vmx_vmenter() directly "returns" RFLAGS, and so the
2760                  * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
2761                  */
2762                 "call vmx_vmenter\n\t"
2763
2764                 CC_SET(be)
2765               : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
2766               : [HOST_RSP]"r"((unsigned long)HOST_RSP),
2767                 [loaded_vmcs]"r"(vmx->loaded_vmcs),
2768                 [launched]"i"(offsetof(struct loaded_vmcs, launched)),
2769                 [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
2770                 [wordsize]"i"(sizeof(ulong))
2771               : "memory"
2772         );
2773
2774         if (vmx->msr_autoload.host.nr)
2775                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2776         if (vmx->msr_autoload.guest.nr)
2777                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2778
2779         if (vm_fail) {
2780                 preempt_enable();
2781                 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
2782                              VMXERR_ENTRY_INVALID_CONTROL_FIELD);
2783                 return 1;
2784         }
2785
2786         /*
2787          * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
2788          */
2789         local_irq_enable();
2790         if (hw_breakpoint_active())
2791                 set_debugreg(__this_cpu_read(cpu_dr7), 7);
2792         preempt_enable();
2793
2794         /*
2795          * A non-failing VMEntry means we somehow entered guest mode with
2796          * an illegal RIP, and that's just the tip of the iceberg.  There
2797          * is no telling what memory has been modified or what state has
2798          * been exposed to unknown code.  Hitting this all but guarantees
2799          * a (very critical) hardware issue.
2800          */
2801         WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
2802                 VMX_EXIT_REASONS_FAILED_VMENTRY));
2803
2804         return 0;
2805 }
2806
2807 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
2808                                                  struct vmcs12 *vmcs12);
2809
2810 static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
2811 {
2812         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2813         struct vcpu_vmx *vmx = to_vmx(vcpu);
2814         struct kvm_host_map *map;
2815         struct page *page;
2816         u64 hpa;
2817
2818         if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2819                 /*
2820                  * Translate L1 physical address to host physical
2821                  * address for vmcs02. Keep the page pinned, so this
2822                  * physical address remains valid. We keep a reference
2823                  * to it so we can release it later.
2824                  */
2825                 if (vmx->nested.apic_access_page) { /* shouldn't happen */
2826                         kvm_release_page_dirty(vmx->nested.apic_access_page);
2827                         vmx->nested.apic_access_page = NULL;
2828                 }
2829                 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
2830                 /*
2831                  * If translation failed, no matter: This feature asks
2832                  * to exit when accessing the given address, and if it
2833                  * can never be accessed, this feature won't do
2834                  * anything anyway.
2835                  */
2836                 if (!is_error_page(page)) {
2837                         vmx->nested.apic_access_page = page;
2838                         hpa = page_to_phys(vmx->nested.apic_access_page);
2839                         vmcs_write64(APIC_ACCESS_ADDR, hpa);
2840                 } else {
2841                         vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
2842                                         SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
2843                 }
2844         }
2845
2846         if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
2847                 map = &vmx->nested.virtual_apic_map;
2848
2849                 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
2850                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
2851                 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
2852                            nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
2853                            !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2854                         /*
2855                          * The processor will never use the TPR shadow, simply
2856                          * clear the bit from the execution control.  Such a
2857                          * configuration is useless, but it happens in tests.
2858                          * For any other configuration, failing the vm entry is
2859                          * _not_ what the processor does but it's basically the
2860                          * only possibility we have.
2861                          */
2862                         vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
2863                                         CPU_BASED_TPR_SHADOW);
2864                 } else {
2865                         /*
2866                          * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
2867                          * force VM-Entry to fail.
2868                          */
2869                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
2870                 }
2871         }
2872
2873         if (nested_cpu_has_posted_intr(vmcs12)) {
2874                 map = &vmx->nested.pi_desc_map;
2875
2876                 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
2877                         vmx->nested.pi_desc =
2878                                 (struct pi_desc *)(((void *)map->hva) +
2879                                 offset_in_page(vmcs12->posted_intr_desc_addr));
2880                         vmcs_write64(POSTED_INTR_DESC_ADDR,
2881                                      pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
2882                 }
2883         }
2884         if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
2885                 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
2886                               CPU_BASED_USE_MSR_BITMAPS);
2887         else
2888                 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
2889                                 CPU_BASED_USE_MSR_BITMAPS);
2890 }
2891
2892 /*
2893  * Intel's VMX Instruction Reference specifies a common set of prerequisites
2894  * for running VMX instructions (except VMXON, whose prerequisites are
2895  * slightly different). It also specifies what exception to inject otherwise.
2896  * Note that many of these exceptions have priority over VM exits, so they
2897  * don't have to be checked again here.
2898  */
2899 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
2900 {
2901         if (!to_vmx(vcpu)->nested.vmxon) {
2902                 kvm_queue_exception(vcpu, UD_VECTOR);
2903                 return 0;
2904         }
2905
2906         if (vmx_get_cpl(vcpu)) {
2907                 kvm_inject_gp(vcpu, 0);
2908                 return 0;
2909         }
2910
2911         return 1;
2912 }
2913
2914 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
2915 {
2916         u8 rvi = vmx_get_rvi();
2917         u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
2918
2919         return ((rvi & 0xf0) > (vppr & 0xf0));
2920 }
2921
2922 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
2923                                    struct vmcs12 *vmcs12);
2924
2925 /*
2926  * If from_vmentry is false, this is being called from state restore (either RSM
2927  * or KVM_SET_NESTED_STATE).  Otherwise it's called from vmlaunch/vmresume.
2928 + *
2929 + * Returns:
2930 + *   0 - success, i.e. proceed with actual VMEnter
2931 + *   1 - consistency check VMExit
2932 + *  -1 - consistency check VMFail
2933  */
2934 int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
2935 {
2936         struct vcpu_vmx *vmx = to_vmx(vcpu);
2937         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2938         bool evaluate_pending_interrupts;
2939         u32 exit_reason = EXIT_REASON_INVALID_STATE;
2940         u32 exit_qual;
2941
2942         evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
2943                 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
2944         if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
2945                 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
2946
2947         if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
2948                 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
2949         if (kvm_mpx_supported() &&
2950                 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2951                 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
2952
2953         vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
2954
2955         prepare_vmcs02_early(vmx, vmcs12);
2956
2957         if (from_vmentry) {
2958                 nested_get_vmcs12_pages(vcpu);
2959
2960                 if (nested_vmx_check_vmentry_hw(vcpu)) {
2961                         vmx_switch_vmcs(vcpu, &vmx->vmcs01);
2962                         return -1;
2963                 }
2964
2965                 if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
2966                         goto vmentry_fail_vmexit;
2967         }
2968
2969         enter_guest_mode(vcpu);
2970         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
2971                 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
2972
2973         if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
2974                 goto vmentry_fail_vmexit_guest_mode;
2975
2976         if (from_vmentry) {
2977                 exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
2978                 exit_qual = nested_vmx_load_msr(vcpu,
2979                                                 vmcs12->vm_entry_msr_load_addr,
2980                                                 vmcs12->vm_entry_msr_load_count);
2981                 if (exit_qual)
2982                         goto vmentry_fail_vmexit_guest_mode;
2983         } else {
2984                 /*
2985                  * The MMU is not initialized to point at the right entities yet and
2986                  * "get pages" would need to read data from the guest (i.e. we will
2987                  * need to perform gpa to hpa translation). Request a call
2988                  * to nested_get_vmcs12_pages before the next VM-entry.  The MSRs
2989                  * have already been set at vmentry time and should not be reset.
2990                  */
2991                 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
2992         }
2993
2994         /*
2995          * If L1 had a pending IRQ/NMI until it executed
2996          * VMLAUNCH/VMRESUME which wasn't delivered because it was
2997          * disallowed (e.g. interrupts disabled), L0 needs to
2998          * evaluate if this pending event should cause an exit from L2
2999          * to L1 or delivered directly to L2 (e.g. In case L1 don't
3000          * intercept EXTERNAL_INTERRUPT).
3001          *
3002          * Usually this would be handled by the processor noticing an
3003          * IRQ/NMI window request, or checking RVI during evaluation of
3004          * pending virtual interrupts.  However, this setting was done
3005          * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3006          * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3007          */
3008         if (unlikely(evaluate_pending_interrupts))
3009                 kvm_make_request(KVM_REQ_EVENT, vcpu);
3010
3011         /*
3012          * Do not start the preemption timer hrtimer until after we know
3013          * we are successful, so that only nested_vmx_vmexit needs to cancel
3014          * the timer.
3015          */
3016         vmx->nested.preemption_timer_expired = false;
3017         if (nested_cpu_has_preemption_timer(vmcs12))
3018                 vmx_start_preemption_timer(vcpu);
3019
3020         /*
3021          * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3022          * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3023          * returned as far as L1 is concerned. It will only return (and set
3024          * the success flag) when L2 exits (see nested_vmx_vmexit()).
3025          */
3026         return 0;
3027
3028         /*
3029          * A failed consistency check that leads to a VMExit during L1's
3030          * VMEnter to L2 is a variation of a normal VMexit, as explained in
3031          * 26.7 "VM-entry failures during or after loading guest state".
3032          */
3033 vmentry_fail_vmexit_guest_mode:
3034         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3035                 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3036         leave_guest_mode(vcpu);
3037
3038 vmentry_fail_vmexit:
3039         vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3040
3041         if (!from_vmentry)
3042                 return 1;
3043
3044         load_vmcs12_host_state(vcpu, vmcs12);
3045         vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
3046         vmcs12->exit_qualification = exit_qual;
3047         if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
3048                 vmx->nested.need_vmcs12_to_shadow_sync = true;
3049         return 1;
3050 }
3051
3052 /*
3053  * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3054  * for running an L2 nested guest.
3055  */
3056 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3057 {
3058         struct vmcs12 *vmcs12;
3059         struct vcpu_vmx *vmx = to_vmx(vcpu);
3060         u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3061         int ret;
3062
3063         if (!nested_vmx_check_permission(vcpu))
3064                 return 1;
3065
3066         if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true))
3067                 return 1;
3068
3069         if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
3070                 return nested_vmx_failInvalid(vcpu);
3071
3072         vmcs12 = get_vmcs12(vcpu);
3073
3074         /*
3075          * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3076          * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3077          * rather than RFLAGS.ZF, and no error number is stored to the
3078          * VM-instruction error field.
3079          */
3080         if (vmcs12->hdr.shadow_vmcs)
3081                 return nested_vmx_failInvalid(vcpu);
3082
3083         if (vmx->nested.hv_evmcs) {
3084                 copy_enlightened_to_vmcs12(vmx);
3085                 /* Enlightened VMCS doesn't have launch state */
3086                 vmcs12->launch_state = !launch;
3087         } else if (enable_shadow_vmcs) {
3088                 copy_shadow_to_vmcs12(vmx);
3089         }
3090
3091         /*
3092          * The nested entry process starts with enforcing various prerequisites
3093          * on vmcs12 as required by the Intel SDM, and act appropriately when
3094          * they fail: As the SDM explains, some conditions should cause the
3095          * instruction to fail, while others will cause the instruction to seem
3096          * to succeed, but return an EXIT_REASON_INVALID_STATE.
3097          * To speed up the normal (success) code path, we should avoid checking
3098          * for misconfigurations which will anyway be caught by the processor
3099          * when using the merged vmcs02.
3100          */
3101         if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
3102                 return nested_vmx_failValid(vcpu,
3103                         VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3104
3105         if (vmcs12->launch_state == launch)
3106                 return nested_vmx_failValid(vcpu,
3107                         launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3108                                : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3109
3110         if (nested_vmx_check_controls(vcpu, vmcs12))
3111                 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3112
3113         if (nested_vmx_check_host_state(vcpu, vmcs12))
3114                 return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3115
3116         /*
3117          * We're finally done with prerequisite checking, and can start with
3118          * the nested entry.
3119          */
3120         vmx->nested.nested_run_pending = 1;
3121         ret = nested_vmx_enter_non_root_mode(vcpu, true);
3122         vmx->nested.nested_run_pending = !ret;
3123         if (ret > 0)
3124                 return 1;
3125         else if (ret)
3126                 return nested_vmx_failValid(vcpu,
3127                         VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3128
3129         /* Hide L1D cache contents from the nested guest.  */
3130         vmx->vcpu.arch.l1tf_flush_l1d = true;
3131
3132         /*
3133          * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3134          * also be used as part of restoring nVMX state for
3135          * snapshot restore (migration).
3136          *
3137          * In this flow, it is assumed that vmcs12 cache was
3138          * trasferred as part of captured nVMX state and should
3139          * therefore not be read from guest memory (which may not
3140          * exist on destination host yet).
3141          */
3142         nested_cache_shadow_vmcs12(vcpu, vmcs12);
3143
3144         /*
3145          * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3146          * awakened by event injection or by an NMI-window VM-exit or
3147          * by an interrupt-window VM-exit, halt the vcpu.
3148          */
3149         if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
3150             !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3151             !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
3152             !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
3153               (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3154                 vmx->nested.nested_run_pending = 0;
3155                 return kvm_vcpu_halt(vcpu);
3156         }
3157         return 1;
3158 }
3159
3160 /*
3161  * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3162  * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
3163  * This function returns the new value we should put in vmcs12.guest_cr0.
3164  * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3165  *  1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3166  *     available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3167  *     didn't trap the bit, because if L1 did, so would L0).
3168  *  2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3169  *     been modified by L2, and L1 knows it. So just leave the old value of
3170  *     the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3171  *     isn't relevant, because if L0 traps this bit it can set it to anything.
3172  *  3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3173  *     changed these bits, and therefore they need to be updated, but L0
3174  *     didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3175  *     put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3176  */
3177 static inline unsigned long
3178 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3179 {
3180         return
3181         /*1*/   (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3182         /*2*/   (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3183         /*3*/   (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3184                         vcpu->arch.cr0_guest_owned_bits));
3185 }
3186
3187 static inline unsigned long
3188 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3189 {
3190         return
3191         /*1*/   (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3192         /*2*/   (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3193         /*3*/   (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3194                         vcpu->arch.cr4_guest_owned_bits));
3195 }
3196
3197 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3198                                       struct vmcs12 *vmcs12)
3199 {
3200         u32 idt_vectoring;
3201         unsigned int nr;
3202
3203         if (vcpu->arch.exception.injected) {
3204                 nr = vcpu->arch.exception.nr;
3205                 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3206
3207                 if (kvm_exception_is_soft(nr)) {
3208                         vmcs12->vm_exit_instruction_len =
3209                                 vcpu->arch.event_exit_inst_len;
3210                         idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3211                 } else
3212                         idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3213
3214                 if (vcpu->arch.exception.has_error_code) {
3215                         idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3216                         vmcs12->idt_vectoring_error_code =
3217                                 vcpu->arch.exception.error_code;
3218                 }
3219
3220                 vmcs12->idt_vectoring_info_field = idt_vectoring;
3221         } else if (vcpu->arch.nmi_injected) {
3222                 vmcs12->idt_vectoring_info_field =
3223                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3224         } else if (vcpu->arch.interrupt.injected) {
3225                 nr = vcpu->arch.interrupt.nr;
3226                 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3227
3228                 if (vcpu->arch.interrupt.soft) {
3229                         idt_vectoring |= INTR_TYPE_SOFT_INTR;
3230                         vmcs12->vm_entry_instruction_len =
3231                                 vcpu->arch.event_exit_inst_len;
3232                 } else
3233                         idt_vectoring |= INTR_TYPE_EXT_INTR;
3234
3235                 vmcs12->idt_vectoring_info_field = idt_vectoring;
3236         }
3237 }
3238
3239
3240 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3241 {
3242         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3243         gfn_t gfn;
3244
3245         /*
3246          * Don't need to mark the APIC access page dirty; it is never
3247          * written to by the CPU during APIC virtualization.
3248          */
3249
3250         if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3251                 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3252                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3253         }
3254
3255         if (nested_cpu_has_posted_intr(vmcs12)) {
3256                 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3257                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3258         }
3259 }
3260
3261 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3262 {
3263         struct vcpu_vmx *vmx = to_vmx(vcpu);
3264         int max_irr;
3265         void *vapic_page;
3266         u16 status;
3267
3268         if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
3269                 return;
3270
3271         vmx->nested.pi_pending = false;
3272         if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3273                 return;
3274
3275         max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3276         if (max_irr != 256) {
3277                 vapic_page = vmx->nested.virtual_apic_map.hva;
3278                 if (!vapic_page)
3279                         return;
3280
3281                 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3282                         vapic_page, &max_irr);
3283                 status = vmcs_read16(GUEST_INTR_STATUS);
3284                 if ((u8)max_irr > ((u8)status & 0xff)) {
3285                         status &= ~0xff;
3286                         status |= (u8)max_irr;
3287                         vmcs_write16(GUEST_INTR_STATUS, status);
3288                 }
3289         }
3290
3291         nested_mark_vmcs12_pages_dirty(vcpu);
3292 }
3293
3294 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3295                                                unsigned long exit_qual)
3296 {
3297         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3298         unsigned int nr = vcpu->arch.exception.nr;
3299         u32 intr_info = nr | INTR_INFO_VALID_MASK;
3300
3301         if (vcpu->arch.exception.has_error_code) {
3302                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3303                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3304         }
3305
3306         if (kvm_exception_is_soft(nr))
3307                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3308         else
3309                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3310
3311         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3312             vmx_get_nmi_mask(vcpu))
3313                 intr_info |= INTR_INFO_UNBLOCK_NMI;
3314
3315         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3316 }
3317
3318 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
3319 {
3320         struct vcpu_vmx *vmx = to_vmx(vcpu);
3321         unsigned long exit_qual;
3322         bool block_nested_events =
3323             vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3324
3325         if (vcpu->arch.exception.pending &&
3326                 nested_vmx_check_exception(vcpu, &exit_qual)) {
3327                 if (block_nested_events)
3328                         return -EBUSY;
3329                 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3330                 return 0;
3331         }
3332
3333         if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3334             vmx->nested.preemption_timer_expired) {
3335                 if (block_nested_events)
3336                         return -EBUSY;
3337                 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3338                 return 0;
3339         }
3340
3341         if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
3342                 if (block_nested_events)
3343                         return -EBUSY;
3344                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3345                                   NMI_VECTOR | INTR_TYPE_NMI_INTR |
3346                                   INTR_INFO_VALID_MASK, 0);
3347                 /*
3348                  * The NMI-triggered VM exit counts as injection:
3349                  * clear this one and block further NMIs.
3350                  */
3351                 vcpu->arch.nmi_pending = 0;
3352                 vmx_set_nmi_mask(vcpu, true);
3353                 return 0;
3354         }
3355
3356         if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
3357             nested_exit_on_intr(vcpu)) {
3358                 if (block_nested_events)
3359                         return -EBUSY;
3360                 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3361                 return 0;
3362         }
3363
3364         vmx_complete_nested_posted_interrupt(vcpu);
3365         return 0;
3366 }
3367
3368 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3369 {
3370         ktime_t remaining =
3371                 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3372         u64 value;
3373
3374         if (ktime_to_ns(remaining) <= 0)
3375                 return 0;
3376
3377         value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3378         do_div(value, 1000000);
3379         return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3380 }
3381
3382 static bool is_vmcs12_ext_field(unsigned long field)
3383 {
3384         switch (field) {
3385         case GUEST_ES_SELECTOR:
3386         case GUEST_CS_SELECTOR:
3387         case GUEST_SS_SELECTOR:
3388         case GUEST_DS_SELECTOR:
3389         case GUEST_FS_SELECTOR:
3390         case GUEST_GS_SELECTOR:
3391         case GUEST_LDTR_SELECTOR:
3392         case GUEST_TR_SELECTOR:
3393         case GUEST_ES_LIMIT:
3394         case GUEST_CS_LIMIT:
3395         case GUEST_SS_LIMIT:
3396         case GUEST_DS_LIMIT:
3397         case GUEST_FS_LIMIT:
3398         case GUEST_GS_LIMIT:
3399         case GUEST_LDTR_LIMIT:
3400         case GUEST_TR_LIMIT:
3401         case GUEST_GDTR_LIMIT:
3402         case GUEST_IDTR_LIMIT:
3403         case GUEST_ES_AR_BYTES:
3404         case GUEST_DS_AR_BYTES:
3405         case GUEST_FS_AR_BYTES:
3406         case GUEST_GS_AR_BYTES:
3407         case GUEST_LDTR_AR_BYTES:
3408         case GUEST_TR_AR_BYTES:
3409         case GUEST_ES_BASE:
3410         case GUEST_CS_BASE:
3411         case GUEST_SS_BASE:
3412         case GUEST_DS_BASE:
3413         case GUEST_FS_BASE:
3414         case GUEST_GS_BASE:
3415         case GUEST_LDTR_BASE:
3416         case GUEST_TR_BASE:
3417         case GUEST_GDTR_BASE:
3418         case GUEST_IDTR_BASE:
3419         case GUEST_PENDING_DBG_EXCEPTIONS:
3420         case GUEST_BNDCFGS:
3421                 return true;
3422         default:
3423                 break;
3424         }
3425
3426         return false;
3427 }
3428
3429 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
3430                                        struct vmcs12 *vmcs12)
3431 {
3432         struct vcpu_vmx *vmx = to_vmx(vcpu);
3433
3434         vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
3435         vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
3436         vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
3437         vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
3438         vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
3439         vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
3440         vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
3441         vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
3442         vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
3443         vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
3444         vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
3445         vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
3446         vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
3447         vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
3448         vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
3449         vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
3450         vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
3451         vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
3452         vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
3453         vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
3454         vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
3455         vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
3456         vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
3457         vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
3458         vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
3459         vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
3460         vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
3461         vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
3462         vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
3463         vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
3464         vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
3465         vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
3466         vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
3467         vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
3468         vmcs12->guest_pending_dbg_exceptions =
3469                 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
3470         if (kvm_mpx_supported())
3471                 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3472
3473         vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
3474 }
3475
3476 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
3477                                        struct vmcs12 *vmcs12)
3478 {
3479         struct vcpu_vmx *vmx = to_vmx(vcpu);
3480         int cpu;
3481
3482         if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
3483                 return;
3484
3485
3486         WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
3487
3488         cpu = get_cpu();
3489         vmx->loaded_vmcs = &vmx->nested.vmcs02;
3490         vmx_vcpu_load(&vmx->vcpu, cpu);
3491
3492         sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
3493
3494         vmx->loaded_vmcs = &vmx->vmcs01;
3495         vmx_vcpu_load(&vmx->vcpu, cpu);
3496         put_cpu();
3497 }
3498
3499 /*
3500  * Update the guest state fields of vmcs12 to reflect changes that
3501  * occurred while L2 was running. (The "IA-32e mode guest" bit of the
3502  * VM-entry controls is also updated, since this is really a guest
3503  * state bit.)
3504  */
3505 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3506 {
3507         struct vcpu_vmx *vmx = to_vmx(vcpu);
3508
3509         if (vmx->nested.hv_evmcs)
3510                 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
3511
3512         vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
3513
3514         vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
3515         vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
3516
3517         vmcs12->guest_rsp = kvm_rsp_read(vcpu);
3518         vmcs12->guest_rip = kvm_rip_read(vcpu);
3519         vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
3520
3521         vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
3522         vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
3523
3524         vmcs12->guest_interruptibility_info =
3525                 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3526
3527         if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
3528                 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
3529         else
3530                 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
3531
3532         if (nested_cpu_has_preemption_timer(vmcs12) &&
3533             vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
3534                         vmcs12->vmx_preemption_timer_value =
3535                                 vmx_get_preemption_timer_value(vcpu);
3536
3537         /*
3538          * In some cases (usually, nested EPT), L2 is allowed to change its
3539          * own CR3 without exiting. If it has changed it, we must keep it.
3540          * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
3541          * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
3542          *
3543          * Additionally, restore L2's PDPTR to vmcs12.
3544          */
3545         if (enable_ept) {
3546                 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
3547                 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
3548                 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
3549                 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
3550                 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
3551         }
3552
3553         vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
3554
3555         if (nested_cpu_has_vid(vmcs12))
3556                 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
3557
3558         vmcs12->vm_entry_controls =
3559                 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
3560                 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
3561
3562         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
3563                 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
3564                 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3565         }
3566
3567         if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
3568                 vmcs12->guest_ia32_efer = vcpu->arch.efer;
3569         vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
3570         vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
3571         vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
3572 }
3573
3574 /*
3575  * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
3576  * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
3577  * and this function updates it to reflect the changes to the guest state while
3578  * L2 was running (and perhaps made some exits which were handled directly by L0
3579  * without going back to L1), and to reflect the exit reason.
3580  * Note that we do not have to copy here all VMCS fields, just those that
3581  * could have changed by the L2 guest or the exit - i.e., the guest-state and
3582  * exit-information fields only. Other fields are modified by L1 with VMWRITE,
3583  * which already writes to vmcs12 directly.
3584  */
3585 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
3586                            u32 exit_reason, u32 exit_intr_info,
3587                            unsigned long exit_qualification)
3588 {
3589         /* update exit information fields: */
3590         vmcs12->vm_exit_reason = exit_reason;
3591         vmcs12->exit_qualification = exit_qualification;
3592         vmcs12->vm_exit_intr_info = exit_intr_info;
3593
3594         vmcs12->idt_vectoring_info_field = 0;
3595         vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3596         vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
3597
3598         if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
3599                 vmcs12->launch_state = 1;
3600
3601                 /* vm_entry_intr_info_field is cleared on exit. Emulate this
3602                  * instead of reading the real value. */
3603                 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
3604
3605                 /*
3606                  * Transfer the event that L0 or L1 may wanted to inject into
3607                  * L2 to IDT_VECTORING_INFO_FIELD.
3608                  */
3609                 vmcs12_save_pending_event(vcpu, vmcs12);
3610
3611                 /*
3612                  * According to spec, there's no need to store the guest's
3613                  * MSRs if the exit is due to a VM-entry failure that occurs
3614                  * during or after loading the guest state. Since this exit
3615                  * does not fall in that category, we need to save the MSRs.
3616                  */
3617                 if (nested_vmx_store_msr(vcpu,
3618                                          vmcs12->vm_exit_msr_store_addr,
3619                                          vmcs12->vm_exit_msr_store_count))
3620                         nested_vmx_abort(vcpu,
3621                                          VMX_ABORT_SAVE_GUEST_MSR_FAIL);
3622         }
3623
3624         /*
3625          * Drop what we picked up for L2 via vmx_complete_interrupts. It is
3626          * preserved above and would only end up incorrectly in L1.
3627          */
3628         vcpu->arch.nmi_injected = false;
3629         kvm_clear_exception_queue(vcpu);
3630         kvm_clear_interrupt_queue(vcpu);
3631 }
3632
3633 /*
3634  * A part of what we need to when the nested L2 guest exits and we want to
3635  * run its L1 parent, is to reset L1's guest state to the host state specified
3636  * in vmcs12.
3637  * This function is to be called not only on normal nested exit, but also on
3638  * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
3639  * Failures During or After Loading Guest State").
3640  * This function should be called when the active VMCS is L1's (vmcs01).
3641  */
3642 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3643                                    struct vmcs12 *vmcs12)
3644 {
3645         struct kvm_segment seg;
3646         u32 entry_failure_code;
3647
3648         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
3649                 vcpu->arch.efer = vmcs12->host_ia32_efer;
3650         else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3651                 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
3652         else
3653                 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
3654         vmx_set_efer(vcpu, vcpu->arch.efer);
3655
3656         kvm_rsp_write(vcpu, vmcs12->host_rsp);
3657         kvm_rip_write(vcpu, vmcs12->host_rip);
3658         vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
3659         vmx_set_interrupt_shadow(vcpu, 0);
3660
3661         /*
3662          * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
3663          * actually changed, because vmx_set_cr0 refers to efer set above.
3664          *
3665          * CR0_GUEST_HOST_MASK is already set in the original vmcs01
3666          * (KVM doesn't change it);
3667          */
3668         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3669         vmx_set_cr0(vcpu, vmcs12->host_cr0);
3670
3671         /* Same as above - no reason to call set_cr4_guest_host_mask().  */
3672         vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3673         vmx_set_cr4(vcpu, vmcs12->host_cr4);
3674
3675         nested_ept_uninit_mmu_context(vcpu);
3676
3677         /*
3678          * Only PDPTE load can fail as the value of cr3 was checked on entry and
3679          * couldn't have changed.
3680          */
3681         if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
3682                 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
3683
3684         if (!enable_ept)
3685                 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3686
3687         /*
3688          * If vmcs01 doesn't use VPID, CPU flushes TLB on every
3689          * VMEntry/VMExit. Thus, no need to flush TLB.
3690          *
3691          * If vmcs12 doesn't use VPID, L1 expects TLB to be
3692          * flushed on every VMEntry/VMExit.
3693          *
3694          * Otherwise, we can preserve TLB entries as long as we are
3695          * able to tag L1 TLB entries differently than L2 TLB entries.
3696          *
3697          * If vmcs12 uses EPT, we need to execute this flush on EPTP01
3698          * and therefore we request the TLB flush to happen only after VMCS EPTP
3699          * has been set by KVM_REQ_LOAD_CR3.
3700          */
3701         if (enable_vpid &&
3702             (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
3703                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3704         }
3705
3706         vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
3707         vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
3708         vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
3709         vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
3710         vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
3711         vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
3712         vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
3713
3714         /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1.  */
3715         if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
3716                 vmcs_write64(GUEST_BNDCFGS, 0);
3717
3718         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
3719                 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
3720                 vcpu->arch.pat = vmcs12->host_ia32_pat;
3721         }
3722         if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
3723                 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
3724                         vmcs12->host_ia32_perf_global_ctrl);
3725
3726         /* Set L1 segment info according to Intel SDM
3727             27.5.2 Loading Host Segment and Descriptor-Table Registers */
3728         seg = (struct kvm_segment) {
3729                 .base = 0,
3730                 .limit = 0xFFFFFFFF,
3731                 .selector = vmcs12->host_cs_selector,
3732                 .type = 11,
3733                 .present = 1,
3734                 .s = 1,
3735                 .g = 1
3736         };
3737         if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3738                 seg.l = 1;
3739         else
3740                 seg.db = 1;
3741         vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
3742         seg = (struct kvm_segment) {
3743                 .base = 0,
3744                 .limit = 0xFFFFFFFF,
3745                 .type = 3,
3746                 .present = 1,
3747                 .s = 1,
3748                 .db = 1,
3749                 .g = 1
3750         };
3751         seg.selector = vmcs12->host_ds_selector;
3752         vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
3753         seg.selector = vmcs12->host_es_selector;
3754         vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
3755         seg.selector = vmcs12->host_ss_selector;
3756         vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
3757         seg.selector = vmcs12->host_fs_selector;
3758         seg.base = vmcs12->host_fs_base;
3759         vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
3760         seg.selector = vmcs12->host_gs_selector;
3761         seg.base = vmcs12->host_gs_base;
3762         vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
3763         seg = (struct kvm_segment) {
3764                 .base = vmcs12->host_tr_base,
3765                 .limit = 0x67,
3766                 .selector = vmcs12->host_tr_selector,
3767                 .type = 11,
3768                 .present = 1
3769         };
3770         vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
3771
3772         kvm_set_dr(vcpu, 7, 0x400);
3773         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
3774
3775         if (cpu_has_vmx_msr_bitmap())
3776                 vmx_update_msr_bitmap(vcpu);
3777
3778         if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
3779                                 vmcs12->vm_exit_msr_load_count))
3780                 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3781 }
3782
3783 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
3784 {
3785         struct shared_msr_entry *efer_msr;
3786         unsigned int i;
3787
3788         if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
3789                 return vmcs_read64(GUEST_IA32_EFER);
3790
3791         if (cpu_has_load_ia32_efer())
3792                 return host_efer;
3793
3794         for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
3795                 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
3796                         return vmx->msr_autoload.guest.val[i].value;
3797         }
3798
3799         efer_msr = find_msr_entry(vmx, MSR_EFER);
3800         if (efer_msr)
3801                 return efer_msr->data;
3802
3803         return host_efer;
3804 }
3805
3806 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
3807 {
3808         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3809         struct vcpu_vmx *vmx = to_vmx(vcpu);
3810         struct vmx_msr_entry g, h;
3811         struct msr_data msr;
3812         gpa_t gpa;
3813         u32 i, j;
3814
3815         vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
3816
3817         if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
3818                 /*
3819                  * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
3820                  * as vmcs01.GUEST_DR7 contains a userspace defined value
3821                  * and vcpu->arch.dr7 is not squirreled away before the
3822                  * nested VMENTER (not worth adding a variable in nested_vmx).
3823                  */
3824                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
3825                         kvm_set_dr(vcpu, 7, DR7_FIXED_1);
3826                 else
3827                         WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
3828         }
3829
3830         /*
3831          * Note that calling vmx_set_{efer,cr0,cr4} is important as they
3832          * handle a variety of side effects to KVM's software model.
3833          */
3834         vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
3835
3836         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3837         vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
3838
3839         vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3840         vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
3841
3842         nested_ept_uninit_mmu_context(vcpu);
3843
3844         /*
3845          * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3
3846          * points to shadow pages!  Fortunately we only get here after a WARN_ON
3847          * if EPT is disabled, so a VMabort is perfectly fine.
3848          */
3849         if (enable_ept) {
3850                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3851                 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3852         } else {
3853                 nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED);
3854         }
3855
3856         /*
3857          * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
3858          * from vmcs01 (if necessary).  The PDPTRs are not loaded on
3859          * VMFail, like everything else we just need to ensure our
3860          * software model is up-to-date.
3861          */
3862         ept_save_pdptrs(vcpu);
3863
3864         kvm_mmu_reset_context(vcpu);
3865
3866         if (cpu_has_vmx_msr_bitmap())
3867                 vmx_update_msr_bitmap(vcpu);
3868
3869         /*
3870          * This nasty bit of open coding is a compromise between blindly
3871          * loading L1's MSRs using the exit load lists (incorrect emulation
3872          * of VMFail), leaving the nested VM's MSRs in the software model
3873          * (incorrect behavior) and snapshotting the modified MSRs (too
3874          * expensive since the lists are unbound by hardware).  For each
3875          * MSR that was (prematurely) loaded from the nested VMEntry load
3876          * list, reload it from the exit load list if it exists and differs
3877          * from the guest value.  The intent is to stuff host state as
3878          * silently as possible, not to fully process the exit load list.
3879          */
3880         msr.host_initiated = false;
3881         for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
3882                 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
3883                 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
3884                         pr_debug_ratelimited(
3885                                 "%s read MSR index failed (%u, 0x%08llx)\n",
3886                                 __func__, i, gpa);
3887                         goto vmabort;
3888                 }
3889
3890                 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
3891                         gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
3892                         if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
3893                                 pr_debug_ratelimited(
3894                                         "%s read MSR failed (%u, 0x%08llx)\n",
3895                                         __func__, j, gpa);
3896                                 goto vmabort;
3897                         }
3898                         if (h.index != g.index)
3899                                 continue;
3900                         if (h.value == g.value)
3901                                 break;
3902
3903                         if (nested_vmx_load_msr_check(vcpu, &h)) {
3904                                 pr_debug_ratelimited(
3905                                         "%s check failed (%u, 0x%x, 0x%x)\n",
3906                                         __func__, j, h.index, h.reserved);
3907                                 goto vmabort;
3908                         }
3909
3910                         msr.index = h.index;
3911                         msr.data = h.value;
3912                         if (kvm_set_msr(vcpu, &msr)) {
3913                                 pr_debug_ratelimited(
3914                                         "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
3915                                         __func__, j, h.index, h.value);
3916                                 goto vmabort;
3917                         }
3918                 }
3919         }
3920
3921         return;
3922
3923 vmabort:
3924         nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3925 }
3926
3927 /*
3928  * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
3929  * and modify vmcs12 to make it see what it would expect to see there if
3930  * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
3931  */
3932 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
3933                        u32 exit_intr_info, unsigned long exit_qualification)
3934 {
3935         struct vcpu_vmx *vmx = to_vmx(vcpu);
3936         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3937
3938         /* trying to cancel vmlaunch/vmresume is a bug */
3939         WARN_ON_ONCE(vmx->nested.nested_run_pending);
3940
3941         leave_guest_mode(vcpu);
3942
3943         if (nested_cpu_has_preemption_timer(vmcs12))
3944                 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
3945
3946         if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3947                 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3948
3949         if (likely(!vmx->fail)) {
3950                 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
3951
3952                 if (exit_reason != -1)
3953                         prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
3954                                        exit_qualification);
3955
3956                 /*
3957                  * Must happen outside of sync_vmcs02_to_vmcs12() as it will
3958                  * also be used to capture vmcs12 cache as part of
3959                  * capturing nVMX state for snapshot (migration).
3960                  *
3961                  * Otherwise, this flush will dirty guest memory at a
3962                  * point it is already assumed by user-space to be
3963                  * immutable.
3964                  */
3965                 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
3966         } else {
3967                 /*
3968                  * The only expected VM-instruction error is "VM entry with
3969                  * invalid control field(s)." Anything else indicates a
3970                  * problem with L0.  And we should never get here with a
3971                  * VMFail of any type if early consistency checks are enabled.
3972                  */
3973                 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
3974                              VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3975                 WARN_ON_ONCE(nested_early_check);
3976         }
3977
3978         vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3979
3980         /* Update any VMCS fields that might have changed while L2 ran */
3981         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3982         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3983         vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
3984
3985         if (kvm_has_tsc_control)
3986                 decache_tsc_multiplier(vmx);
3987
3988         if (vmx->nested.change_vmcs01_virtual_apic_mode) {
3989                 vmx->nested.change_vmcs01_virtual_apic_mode = false;
3990                 vmx_set_virtual_apic_mode(vcpu);
3991         } else if (!nested_cpu_has_ept(vmcs12) &&
3992                    nested_cpu_has2(vmcs12,
3993                                    SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3994                 vmx_flush_tlb(vcpu, true);
3995         }
3996
3997         /* Unpin physical memory we referred to in vmcs02 */
3998         if (vmx->nested.apic_access_page) {
3999                 kvm_release_page_dirty(vmx->nested.apic_access_page);
4000                 vmx->nested.apic_access_page = NULL;
4001         }
4002         kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4003         kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4004         vmx->nested.pi_desc = NULL;
4005
4006         /*
4007          * We are now running in L2, mmu_notifier will force to reload the
4008          * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
4009          */
4010         kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4011
4012         if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
4013                 vmx->nested.need_vmcs12_to_shadow_sync = true;
4014
4015         /* in case we halted in L2 */
4016         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4017
4018         if (likely(!vmx->fail)) {
4019                 /*
4020                  * TODO: SDM says that with acknowledge interrupt on
4021                  * exit, bit 31 of the VM-exit interrupt information
4022                  * (valid interrupt) is always set to 1 on
4023                  * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
4024                  * need kvm_cpu_has_interrupt().  See the commit
4025                  * message for details.
4026                  */
4027                 if (nested_exit_intr_ack_set(vcpu) &&
4028                     exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4029                     kvm_cpu_has_interrupt(vcpu)) {
4030                         int irq = kvm_cpu_get_interrupt(vcpu);
4031                         WARN_ON(irq < 0);
4032                         vmcs12->vm_exit_intr_info = irq |
4033                                 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4034                 }
4035
4036                 if (exit_reason != -1)
4037                         trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4038                                                        vmcs12->exit_qualification,
4039                                                        vmcs12->idt_vectoring_info_field,
4040                                                        vmcs12->vm_exit_intr_info,
4041                                                        vmcs12->vm_exit_intr_error_code,
4042                                                        KVM_ISA_VMX);
4043
4044                 load_vmcs12_host_state(vcpu, vmcs12);
4045
4046                 return;
4047         }
4048
4049         /*
4050          * After an early L2 VM-entry failure, we're now back
4051          * in L1 which thinks it just finished a VMLAUNCH or
4052          * VMRESUME instruction, so we need to set the failure
4053          * flag and the VM-instruction error field of the VMCS
4054          * accordingly, and skip the emulated instruction.
4055          */
4056         (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4057
4058         /*
4059          * Restore L1's host state to KVM's software model.  We're here
4060          * because a consistency check was caught by hardware, which
4061          * means some amount of guest state has been propagated to KVM's
4062          * model and needs to be unwound to the host's state.
4063          */
4064         nested_vmx_restore_host_state(vcpu);
4065
4066         vmx->fail = 0;
4067 }
4068
4069 /*
4070  * Decode the memory-address operand of a vmx instruction, as recorded on an
4071  * exit caused by such an instruction (run by a guest hypervisor).
4072  * On success, returns 0. When the operand is invalid, returns 1 and throws
4073  * #UD or #GP.
4074  */
4075 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
4076                         u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
4077 {
4078         gva_t off;
4079         bool exn;
4080         struct kvm_segment s;
4081
4082         /*
4083          * According to Vol. 3B, "Information for VM Exits Due to Instruction
4084          * Execution", on an exit, vmx_instruction_info holds most of the
4085          * addressing components of the operand. Only the displacement part
4086          * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4087          * For how an actual address is calculated from all these components,
4088          * refer to Vol. 1, "Operand Addressing".
4089          */
4090         int  scaling = vmx_instruction_info & 3;
4091         int  addr_size = (vmx_instruction_info >> 7) & 7;
4092         bool is_reg = vmx_instruction_info & (1u << 10);
4093         int  seg_reg = (vmx_instruction_info >> 15) & 7;
4094         int  index_reg = (vmx_instruction_info >> 18) & 0xf;
4095         bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4096         int  base_reg       = (vmx_instruction_info >> 23) & 0xf;
4097         bool base_is_valid  = !(vmx_instruction_info & (1u << 27));
4098
4099         if (is_reg) {
4100                 kvm_queue_exception(vcpu, UD_VECTOR);
4101                 return 1;
4102         }
4103
4104         /* Addr = segment_base + offset */
4105         /* offset = base + [index * scale] + displacement */
4106         off = exit_qualification; /* holds the displacement */
4107         if (addr_size == 1)
4108                 off = (gva_t)sign_extend64(off, 31);
4109         else if (addr_size == 0)
4110                 off = (gva_t)sign_extend64(off, 15);
4111         if (base_is_valid)
4112                 off += kvm_register_read(vcpu, base_reg);
4113         if (index_is_valid)
4114                 off += kvm_register_read(vcpu, index_reg)<<scaling;
4115         vmx_get_segment(vcpu, &s, seg_reg);
4116
4117         /*
4118          * The effective address, i.e. @off, of a memory operand is truncated
4119          * based on the address size of the instruction.  Note that this is
4120          * the *effective address*, i.e. the address prior to accounting for
4121          * the segment's base.
4122          */
4123         if (addr_size == 1) /* 32 bit */
4124                 off &= 0xffffffff;
4125         else if (addr_size == 0) /* 16 bit */
4126                 off &= 0xffff;
4127
4128         /* Checks for #GP/#SS exceptions. */
4129         exn = false;
4130         if (is_long_mode(vcpu)) {
4131                 /*
4132                  * The virtual/linear address is never truncated in 64-bit
4133                  * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4134                  * address when using FS/GS with a non-zero base.
4135                  */
4136                 *ret = s.base + off;
4137
4138                 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4139                  * non-canonical form. This is the only check on the memory
4140                  * destination for long mode!
4141                  */
4142                 exn = is_noncanonical_address(*ret, vcpu);
4143         } else {
4144                 /*
4145                  * When not in long mode, the virtual/linear address is
4146                  * unconditionally truncated to 32 bits regardless of the
4147                  * address size.
4148                  */
4149                 *ret = (s.base + off) & 0xffffffff;
4150
4151                 /* Protected mode: apply checks for segment validity in the
4152                  * following order:
4153                  * - segment type check (#GP(0) may be thrown)
4154                  * - usability check (#GP(0)/#SS(0))
4155                  * - limit check (#GP(0)/#SS(0))
4156                  */
4157                 if (wr)
4158                         /* #GP(0) if the destination operand is located in a
4159                          * read-only data segment or any code segment.
4160                          */
4161                         exn = ((s.type & 0xa) == 0 || (s.type & 8));
4162                 else
4163                         /* #GP(0) if the source operand is located in an
4164                          * execute-only code segment
4165                          */
4166                         exn = ((s.type & 0xa) == 8);
4167                 if (exn) {
4168                         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4169                         return 1;
4170                 }
4171                 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4172                  */
4173                 exn = (s.unusable != 0);
4174
4175                 /*
4176                  * Protected mode: #GP(0)/#SS(0) if the memory operand is
4177                  * outside the segment limit.  All CPUs that support VMX ignore
4178                  * limit checks for flat segments, i.e. segments with base==0,
4179                  * limit==0xffffffff and of type expand-up data or code.
4180                  */
4181                 if (!(s.base == 0 && s.limit == 0xffffffff &&
4182                      ((s.type & 8) || !(s.type & 4))))
4183                         exn = exn || ((u64)off + len - 1 > s.limit);
4184         }
4185         if (exn) {
4186                 kvm_queue_exception_e(vcpu,
4187                                       seg_reg == VCPU_SREG_SS ?
4188                                                 SS_VECTOR : GP_VECTOR,
4189                                       0);
4190                 return 1;
4191         }
4192
4193         return 0;
4194 }
4195
4196 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
4197 {
4198         gva_t gva;
4199         struct x86_exception e;
4200
4201         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4202                                 vmcs_read32(VMX_INSTRUCTION_INFO), false,
4203                                 sizeof(*vmpointer), &gva))
4204                 return 1;
4205
4206         if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
4207                 kvm_inject_page_fault(vcpu, &e);
4208                 return 1;
4209         }
4210
4211         return 0;
4212 }
4213
4214 /*
4215  * Allocate a shadow VMCS and associate it with the currently loaded
4216  * VMCS, unless such a shadow VMCS already exists. The newly allocated
4217  * VMCS is also VMCLEARed, so that it is ready for use.
4218  */
4219 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4220 {
4221         struct vcpu_vmx *vmx = to_vmx(vcpu);
4222         struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4223
4224         /*
4225          * We should allocate a shadow vmcs for vmcs01 only when L1
4226          * executes VMXON and free it when L1 executes VMXOFF.
4227          * As it is invalid to execute VMXON twice, we shouldn't reach
4228          * here when vmcs01 already have an allocated shadow vmcs.
4229          */
4230         WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4231
4232         if (!loaded_vmcs->shadow_vmcs) {
4233                 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4234                 if (loaded_vmcs->shadow_vmcs)
4235                         vmcs_clear(loaded_vmcs->shadow_vmcs);
4236         }
4237         return loaded_vmcs->shadow_vmcs;
4238 }
4239
4240 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4241 {
4242         struct vcpu_vmx *vmx = to_vmx(vcpu);
4243         int r;
4244
4245         r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4246         if (r < 0)
4247                 goto out_vmcs02;
4248
4249         vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4250         if (!vmx->nested.cached_vmcs12)
4251                 goto out_cached_vmcs12;
4252
4253         vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4254         if (!vmx->nested.cached_shadow_vmcs12)
4255                 goto out_cached_shadow_vmcs12;
4256
4257         if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4258                 goto out_shadow_vmcs;
4259
4260         hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4261                      HRTIMER_MODE_REL_PINNED);
4262         vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4263
4264         vmx->nested.vpid02 = allocate_vpid();
4265
4266         vmx->nested.vmcs02_initialized = false;
4267         vmx->nested.vmxon = true;
4268
4269         if (pt_mode == PT_MODE_HOST_GUEST) {
4270                 vmx->pt_desc.guest.ctl = 0;
4271                 pt_update_intercept_for_msr(vmx);
4272         }
4273
4274         return 0;
4275
4276 out_shadow_vmcs:
4277         kfree(vmx->nested.cached_shadow_vmcs12);
4278
4279 out_cached_shadow_vmcs12:
4280         kfree(vmx->nested.cached_vmcs12);
4281
4282 out_cached_vmcs12:
4283         free_loaded_vmcs(&vmx->nested.vmcs02);
4284
4285 out_vmcs02:
4286         return -ENOMEM;
4287 }
4288
4289 /*
4290  * Emulate the VMXON instruction.
4291  * Currently, we just remember that VMX is active, and do not save or even
4292  * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4293  * do not currently need to store anything in that guest-allocated memory
4294  * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4295  * argument is different from the VMXON pointer (which the spec says they do).
4296  */
4297 static int handle_vmon(struct kvm_vcpu *vcpu)
4298 {
4299         int ret;
4300         gpa_t vmptr;
4301         uint32_t revision;
4302         struct vcpu_vmx *vmx = to_vmx(vcpu);
4303         const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
4304                 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
4305
4306         /*
4307          * The Intel VMX Instruction Reference lists a bunch of bits that are
4308          * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4309          * 1 (see vmx_set_cr4() for when we allow the guest to set this).
4310          * Otherwise, we should fail with #UD.  But most faulting conditions
4311          * have already been checked by hardware, prior to the VM-exit for
4312          * VMXON.  We do test guest cr4.VMXE because processor CR4 always has
4313          * that bit set to 1 in non-root mode.
4314          */
4315         if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4316                 kvm_queue_exception(vcpu, UD_VECTOR);
4317                 return 1;
4318         }
4319
4320         /* CPL=0 must be checked manually. */
4321         if (vmx_get_cpl(vcpu)) {
4322                 kvm_inject_gp(vcpu, 0);
4323                 return 1;
4324         }
4325
4326         if (vmx->nested.vmxon)
4327                 return nested_vmx_failValid(vcpu,
4328                         VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
4329
4330         if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4331                         != VMXON_NEEDED_FEATURES) {
4332                 kvm_inject_gp(vcpu, 0);
4333                 return 1;
4334         }
4335
4336         if (nested_vmx_get_vmptr(vcpu, &vmptr))
4337                 return 1;
4338
4339         /*
4340          * SDM 3: 24.11.5
4341          * The first 4 bytes of VMXON region contain the supported
4342          * VMCS revision identifier
4343          *
4344          * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4345          * which replaces physical address width with 32
4346          */
4347         if (!page_address_valid(vcpu, vmptr))
4348                 return nested_vmx_failInvalid(vcpu);
4349
4350         if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
4351             revision != VMCS12_REVISION)
4352                 return nested_vmx_failInvalid(vcpu);
4353
4354         vmx->nested.vmxon_ptr = vmptr;
4355         ret = enter_vmx_operation(vcpu);
4356         if (ret)
4357                 return ret;
4358
4359         return nested_vmx_succeed(vcpu);
4360 }
4361
4362 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
4363 {
4364         struct vcpu_vmx *vmx = to_vmx(vcpu);
4365
4366         if (vmx->nested.current_vmptr == -1ull)
4367                 return;
4368
4369         copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
4370
4371         if (enable_shadow_vmcs) {
4372                 /* copy to memory all shadowed fields in case
4373                    they were modified */
4374                 copy_shadow_to_vmcs12(vmx);
4375                 vmx->nested.need_vmcs12_to_shadow_sync = false;
4376                 vmx_disable_shadow_vmcs(vmx);
4377         }
4378         vmx->nested.posted_intr_nv = -1;
4379
4380         /* Flush VMCS12 to guest memory */
4381         kvm_vcpu_write_guest_page(vcpu,
4382                                   vmx->nested.current_vmptr >> PAGE_SHIFT,
4383                                   vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
4384
4385         kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4386
4387         vmx->nested.current_vmptr = -1ull;
4388 }
4389
4390 /* Emulate the VMXOFF instruction */
4391 static int handle_vmoff(struct kvm_vcpu *vcpu)
4392 {
4393         if (!nested_vmx_check_permission(vcpu))
4394                 return 1;
4395         free_nested(vcpu);
4396         return nested_vmx_succeed(vcpu);
4397 }
4398
4399 /* Emulate the VMCLEAR instruction */
4400 static int handle_vmclear(struct kvm_vcpu *vcpu)
4401 {
4402         struct vcpu_vmx *vmx = to_vmx(vcpu);
4403         u32 zero = 0;
4404         gpa_t vmptr;
4405
4406         if (!nested_vmx_check_permission(vcpu))
4407                 return 1;
4408
4409         if (nested_vmx_get_vmptr(vcpu, &vmptr))
4410                 return 1;
4411
4412         if (!page_address_valid(vcpu, vmptr))
4413                 return nested_vmx_failValid(vcpu,
4414                         VMXERR_VMCLEAR_INVALID_ADDRESS);
4415
4416         if (vmptr == vmx->nested.vmxon_ptr)
4417                 return nested_vmx_failValid(vcpu,
4418                         VMXERR_VMCLEAR_VMXON_POINTER);
4419
4420         if (vmx->nested.hv_evmcs_map.hva) {
4421                 if (vmptr == vmx->nested.hv_evmcs_vmptr)
4422                         nested_release_evmcs(vcpu);
4423         } else {
4424                 if (vmptr == vmx->nested.current_vmptr)
4425                         nested_release_vmcs12(vcpu);
4426
4427                 kvm_vcpu_write_guest(vcpu,
4428                                      vmptr + offsetof(struct vmcs12,
4429                                                       launch_state),
4430                                      &zero, sizeof(zero));
4431         }
4432
4433         return nested_vmx_succeed(vcpu);
4434 }
4435
4436 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
4437
4438 /* Emulate the VMLAUNCH instruction */
4439 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
4440 {
4441         return nested_vmx_run(vcpu, true);
4442 }
4443
4444 /* Emulate the VMRESUME instruction */
4445 static int handle_vmresume(struct kvm_vcpu *vcpu)
4446 {
4447
4448         return nested_vmx_run(vcpu, false);
4449 }
4450
4451 static int handle_vmread(struct kvm_vcpu *vcpu)
4452 {
4453         unsigned long field;
4454         u64 field_value;
4455         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4456         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4457         int len;
4458         gva_t gva = 0;
4459         struct vmcs12 *vmcs12;
4460         short offset;
4461
4462         if (!nested_vmx_check_permission(vcpu))
4463                 return 1;
4464
4465         if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
4466                 return nested_vmx_failInvalid(vcpu);
4467
4468         if (!is_guest_mode(vcpu))
4469                 vmcs12 = get_vmcs12(vcpu);
4470         else {
4471                 /*
4472                  * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
4473                  * to shadowed-field sets the ALU flags for VMfailInvalid.
4474                  */
4475                 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4476                         return nested_vmx_failInvalid(vcpu);
4477                 vmcs12 = get_shadow_vmcs12(vcpu);
4478         }
4479
4480         /* Decode instruction info and find the field to read */
4481         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4482
4483         offset = vmcs_field_to_offset(field);
4484         if (offset < 0)
4485                 return nested_vmx_failValid(vcpu,
4486                         VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4487
4488         if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
4489                 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4490
4491         /* Read the field, zero-extended to a u64 field_value */
4492         field_value = vmcs12_read_any(vmcs12, field, offset);
4493
4494         /*
4495          * Now copy part of this value to register or memory, as requested.
4496          * Note that the number of bits actually copied is 32 or 64 depending
4497          * on the guest's mode (32 or 64 bit), not on the given field's length.
4498          */
4499         if (vmx_instruction_info & (1u << 10)) {
4500                 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
4501                         field_value);
4502         } else {
4503                 len = is_64_bit_mode(vcpu) ? 8 : 4;
4504                 if (get_vmx_mem_address(vcpu, exit_qualification,
4505                                 vmx_instruction_info, true, len, &gva))
4506                         return 1;
4507                 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
4508                 kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
4509         }
4510
4511         return nested_vmx_succeed(vcpu);
4512 }
4513
4514 static bool is_shadow_field_rw(unsigned long field)
4515 {
4516         switch (field) {
4517 #define SHADOW_FIELD_RW(x, y) case x:
4518 #include "vmcs_shadow_fields.h"
4519                 return true;
4520         default:
4521                 break;
4522         }
4523         return false;
4524 }
4525
4526 static bool is_shadow_field_ro(unsigned long field)
4527 {
4528         switch (field) {
4529 #define SHADOW_FIELD_RO(x, y) case x:
4530 #include "vmcs_shadow_fields.h"
4531                 return true;
4532         default:
4533                 break;
4534         }
4535         return false;
4536 }
4537
4538 static int handle_vmwrite(struct kvm_vcpu *vcpu)
4539 {
4540         unsigned long field;
4541         int len;
4542         gva_t gva;
4543         struct vcpu_vmx *vmx = to_vmx(vcpu);
4544         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4545         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4546
4547         /* The value to write might be 32 or 64 bits, depending on L1's long
4548          * mode, and eventually we need to write that into a field of several
4549          * possible lengths. The code below first zero-extends the value to 64
4550          * bit (field_value), and then copies only the appropriate number of
4551          * bits into the vmcs12 field.
4552          */
4553         u64 field_value = 0;
4554         struct x86_exception e;
4555         struct vmcs12 *vmcs12;
4556         short offset;
4557
4558         if (!nested_vmx_check_permission(vcpu))
4559                 return 1;
4560
4561         if (vmx->nested.current_vmptr == -1ull)
4562                 return nested_vmx_failInvalid(vcpu);
4563
4564         if (vmx_instruction_info & (1u << 10))
4565                 field_value = kvm_register_readl(vcpu,
4566                         (((vmx_instruction_info) >> 3) & 0xf));
4567         else {
4568                 len = is_64_bit_mode(vcpu) ? 8 : 4;
4569                 if (get_vmx_mem_address(vcpu, exit_qualification,
4570                                 vmx_instruction_info, false, len, &gva))
4571                         return 1;
4572                 if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) {
4573                         kvm_inject_page_fault(vcpu, &e);
4574                         return 1;
4575                 }
4576         }
4577
4578
4579         field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4580         /*
4581          * If the vCPU supports "VMWRITE to any supported field in the
4582          * VMCS," then the "read-only" fields are actually read/write.
4583          */
4584         if (vmcs_field_readonly(field) &&
4585             !nested_cpu_has_vmwrite_any_field(vcpu))
4586                 return nested_vmx_failValid(vcpu,
4587                         VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
4588
4589         if (!is_guest_mode(vcpu)) {
4590                 vmcs12 = get_vmcs12(vcpu);
4591
4592                 /*
4593                  * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
4594                  * vmcs12, else we may crush a field or consume a stale value.
4595                  */
4596                 if (!is_shadow_field_rw(field))
4597                         copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4598         } else {
4599                 /*
4600                  * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
4601                  * to shadowed-field sets the ALU flags for VMfailInvalid.
4602                  */
4603                 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4604                         return nested_vmx_failInvalid(vcpu);
4605                 vmcs12 = get_shadow_vmcs12(vcpu);
4606         }
4607
4608         offset = vmcs_field_to_offset(field);
4609         if (offset < 0)
4610                 return nested_vmx_failValid(vcpu,
4611                         VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4612
4613         /*
4614          * Some Intel CPUs intentionally drop the reserved bits of the AR byte
4615          * fields on VMWRITE.  Emulate this behavior to ensure consistent KVM
4616          * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
4617          * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
4618          * from L1 will return a different value than VMREAD from L2 (L1 sees
4619          * the stripped down value, L2 sees the full value as stored by KVM).
4620          */
4621         if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
4622                 field_value &= 0x1f0ff;
4623
4624         vmcs12_write_any(vmcs12, field, offset, field_value);
4625
4626         /*
4627          * Do not track vmcs12 dirty-state if in guest-mode as we actually
4628          * dirty shadow vmcs12 instead of vmcs12.  Fields that can be updated
4629          * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
4630          * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
4631          */
4632         if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
4633                 /*
4634                  * L1 can read these fields without exiting, ensure the
4635                  * shadow VMCS is up-to-date.
4636                  */
4637                 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
4638                         preempt_disable();
4639                         vmcs_load(vmx->vmcs01.shadow_vmcs);
4640
4641                         __vmcs_writel(field, field_value);
4642
4643                         vmcs_clear(vmx->vmcs01.shadow_vmcs);
4644                         vmcs_load(vmx->loaded_vmcs->vmcs);
4645                         preempt_enable();
4646                 }
4647                 vmx->nested.dirty_vmcs12 = true;
4648         }
4649
4650         return nested_vmx_succeed(vcpu);
4651 }
4652
4653 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
4654 {
4655         vmx->nested.current_vmptr = vmptr;
4656         if (enable_shadow_vmcs) {
4657                 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
4658                               SECONDARY_EXEC_SHADOW_VMCS);
4659                 vmcs_write64(VMCS_LINK_POINTER,
4660                              __pa(vmx->vmcs01.shadow_vmcs));
4661                 vmx->nested.need_vmcs12_to_shadow_sync = true;
4662         }
4663         vmx->nested.dirty_vmcs12 = true;
4664 }
4665
4666 /* Emulate the VMPTRLD instruction */
4667 static int handle_vmptrld(struct kvm_vcpu *vcpu)
4668 {
4669         struct vcpu_vmx *vmx = to_vmx(vcpu);
4670         gpa_t vmptr;
4671
4672         if (!nested_vmx_check_permission(vcpu))
4673                 return 1;
4674
4675         if (nested_vmx_get_vmptr(vcpu, &vmptr))
4676                 return 1;
4677
4678         if (!page_address_valid(vcpu, vmptr))
4679                 return nested_vmx_failValid(vcpu,
4680                         VMXERR_VMPTRLD_INVALID_ADDRESS);
4681
4682         if (vmptr == vmx->nested.vmxon_ptr)
4683                 return nested_vmx_failValid(vcpu,
4684                         VMXERR_VMPTRLD_VMXON_POINTER);
4685
4686         /* Forbid normal VMPTRLD if Enlightened version was used */
4687         if (vmx->nested.hv_evmcs)
4688                 return 1;
4689
4690         if (vmx->nested.current_vmptr != vmptr) {
4691                 struct kvm_host_map map;
4692                 struct vmcs12 *new_vmcs12;
4693
4694                 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
4695                         /*
4696                          * Reads from an unbacked page return all 1s,
4697                          * which means that the 32 bits located at the
4698                          * given physical address won't match the required
4699                          * VMCS12_REVISION identifier.
4700                          */
4701                         return nested_vmx_failValid(vcpu,
4702                                 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4703                 }
4704
4705                 new_vmcs12 = map.hva;
4706
4707                 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
4708                     (new_vmcs12->hdr.shadow_vmcs &&
4709                      !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
4710                         kvm_vcpu_unmap(vcpu, &map, false);
4711                         return nested_vmx_failValid(vcpu,
4712                                 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4713                 }
4714
4715                 nested_release_vmcs12(vcpu);
4716
4717                 /*
4718                  * Load VMCS12 from guest memory since it is not already
4719                  * cached.
4720                  */
4721                 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
4722                 kvm_vcpu_unmap(vcpu, &map, false);
4723
4724                 set_current_vmptr(vmx, vmptr);
4725         }
4726
4727         return nested_vmx_succeed(vcpu);
4728 }
4729
4730 /* Emulate the VMPTRST instruction */
4731 static int handle_vmptrst(struct kvm_vcpu *vcpu)
4732 {
4733         unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
4734         u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4735         gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
4736         struct x86_exception e;
4737         gva_t gva;
4738
4739         if (!nested_vmx_check_permission(vcpu))
4740                 return 1;
4741
4742         if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
4743                 return 1;
4744
4745         if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
4746                                 true, sizeof(gpa_t), &gva))
4747                 return 1;
4748         /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
4749         if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
4750                                         sizeof(gpa_t), &e)) {
4751                 kvm_inject_page_fault(vcpu, &e);
4752                 return 1;
4753         }
4754         return nested_vmx_succeed(vcpu);
4755 }
4756
4757 /* Emulate the INVEPT instruction */
4758 static int handle_invept(struct kvm_vcpu *vcpu)
4759 {
4760         struct vcpu_vmx *vmx = to_vmx(vcpu);
4761         u32 vmx_instruction_info, types;
4762         unsigned long type;
4763         gva_t gva;
4764         struct x86_exception e;
4765         struct {
4766                 u64 eptp, gpa;
4767         } operand;
4768
4769         if (!(vmx->nested.msrs.secondary_ctls_high &
4770               SECONDARY_EXEC_ENABLE_EPT) ||
4771             !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
4772                 kvm_queue_exception(vcpu, UD_VECTOR);
4773                 return 1;
4774         }
4775
4776         if (!nested_vmx_check_permission(vcpu))
4777                 return 1;
4778
4779         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4780         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4781
4782         types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
4783
4784         if (type >= 32 || !(types & (1 << type)))
4785                 return nested_vmx_failValid(vcpu,
4786                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4787
4788         /* According to the Intel VMX instruction reference, the memory
4789          * operand is read even if it isn't needed (e.g., for type==global)
4790          */
4791         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4792                         vmx_instruction_info, false, sizeof(operand), &gva))
4793                 return 1;
4794         if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4795                 kvm_inject_page_fault(vcpu, &e);
4796                 return 1;
4797         }
4798
4799         switch (type) {
4800         case VMX_EPT_EXTENT_GLOBAL:
4801         /*
4802          * TODO: track mappings and invalidate
4803          * single context requests appropriately
4804          */
4805         case VMX_EPT_EXTENT_CONTEXT:
4806                 kvm_mmu_sync_roots(vcpu);
4807                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4808                 break;
4809         default:
4810                 BUG_ON(1);
4811                 break;
4812         }
4813
4814         return nested_vmx_succeed(vcpu);
4815 }
4816
4817 static int handle_invvpid(struct kvm_vcpu *vcpu)
4818 {
4819         struct vcpu_vmx *vmx = to_vmx(vcpu);
4820         u32 vmx_instruction_info;
4821         unsigned long type, types;
4822         gva_t gva;
4823         struct x86_exception e;
4824         struct {
4825                 u64 vpid;
4826                 u64 gla;
4827         } operand;
4828         u16 vpid02;
4829
4830         if (!(vmx->nested.msrs.secondary_ctls_high &
4831               SECONDARY_EXEC_ENABLE_VPID) ||
4832                         !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
4833                 kvm_queue_exception(vcpu, UD_VECTOR);
4834                 return 1;
4835         }
4836
4837         if (!nested_vmx_check_permission(vcpu))
4838                 return 1;
4839
4840         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4841         type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4842
4843         types = (vmx->nested.msrs.vpid_caps &
4844                         VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
4845
4846         if (type >= 32 || !(types & (1 << type)))
4847                 return nested_vmx_failValid(vcpu,
4848                         VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4849
4850         /* according to the intel vmx instruction reference, the memory
4851          * operand is read even if it isn't needed (e.g., for type==global)
4852          */
4853         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4854                         vmx_instruction_info, false, sizeof(operand), &gva))
4855                 return 1;
4856         if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4857                 kvm_inject_page_fault(vcpu, &e);
4858                 return 1;
4859         }
4860         if (operand.vpid >> 16)
4861                 return nested_vmx_failValid(vcpu,
4862                         VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4863
4864         vpid02 = nested_get_vpid02(vcpu);
4865         switch (type) {
4866         case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
4867                 if (!operand.vpid ||
4868                     is_noncanonical_address(operand.gla, vcpu))
4869                         return nested_vmx_failValid(vcpu,
4870                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4871                 if (cpu_has_vmx_invvpid_individual_addr()) {
4872                         __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
4873                                 vpid02, operand.gla);
4874                 } else
4875                         __vmx_flush_tlb(vcpu, vpid02, false);
4876                 break;
4877         case VMX_VPID_EXTENT_SINGLE_CONTEXT:
4878         case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
4879                 if (!operand.vpid)
4880                         return nested_vmx_failValid(vcpu,
4881                                 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4882                 __vmx_flush_tlb(vcpu, vpid02, false);
4883                 break;
4884         case VMX_VPID_EXTENT_ALL_CONTEXT:
4885                 __vmx_flush_tlb(vcpu, vpid02, false);
4886                 break;
4887         default:
4888                 WARN_ON_ONCE(1);
4889                 return kvm_skip_emulated_instruction(vcpu);
4890         }
4891
4892         return nested_vmx_succeed(vcpu);
4893 }
4894
4895 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
4896                                      struct vmcs12 *vmcs12)
4897 {
4898         u32 index = kvm_rcx_read(vcpu);
4899         u64 address;
4900         bool accessed_dirty;
4901         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
4902
4903         if (!nested_cpu_has_eptp_switching(vmcs12) ||
4904             !nested_cpu_has_ept(vmcs12))
4905                 return 1;
4906
4907         if (index >= VMFUNC_EPTP_ENTRIES)
4908                 return 1;
4909
4910
4911         if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
4912                                      &address, index * 8, 8))
4913                 return 1;
4914
4915         accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
4916
4917         /*
4918          * If the (L2) guest does a vmfunc to the currently
4919          * active ept pointer, we don't have to do anything else
4920          */
4921         if (vmcs12->ept_pointer != address) {
4922                 if (!valid_ept_address(vcpu, address))
4923                         return 1;
4924
4925                 kvm_mmu_unload(vcpu);
4926                 mmu->ept_ad = accessed_dirty;
4927                 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
4928                 vmcs12->ept_pointer = address;
4929                 /*
4930                  * TODO: Check what's the correct approach in case
4931                  * mmu reload fails. Currently, we just let the next
4932                  * reload potentially fail
4933                  */
4934                 kvm_mmu_reload(vcpu);
4935         }
4936
4937         return 0;
4938 }
4939
4940 static int handle_vmfunc(struct kvm_vcpu *vcpu)
4941 {
4942         struct vcpu_vmx *vmx = to_vmx(vcpu);
4943         struct vmcs12 *vmcs12;
4944         u32 function = kvm_rax_read(vcpu);
4945
4946         /*
4947          * VMFUNC is only supported for nested guests, but we always enable the
4948          * secondary control for simplicity; for non-nested mode, fake that we
4949          * didn't by injecting #UD.
4950          */
4951         if (!is_guest_mode(vcpu)) {
4952                 kvm_queue_exception(vcpu, UD_VECTOR);
4953                 return 1;
4954         }
4955
4956         vmcs12 = get_vmcs12(vcpu);
4957         if ((vmcs12->vm_function_control & (1 << function)) == 0)
4958                 goto fail;
4959
4960         switch (function) {
4961         case 0:
4962                 if (nested_vmx_eptp_switching(vcpu, vmcs12))
4963                         goto fail;
4964                 break;
4965         default:
4966                 goto fail;
4967         }
4968         return kvm_skip_emulated_instruction(vcpu);
4969
4970 fail:
4971         nested_vmx_vmexit(vcpu, vmx->exit_reason,
4972                           vmcs_read32(VM_EXIT_INTR_INFO),
4973                           vmcs_readl(EXIT_QUALIFICATION));
4974         return 1;
4975 }
4976
4977
4978 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
4979                                        struct vmcs12 *vmcs12)
4980 {
4981         unsigned long exit_qualification;
4982         gpa_t bitmap, last_bitmap;
4983         unsigned int port;
4984         int size;
4985         u8 b;
4986
4987         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
4988                 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
4989
4990         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4991
4992         port = exit_qualification >> 16;
4993         size = (exit_qualification & 7) + 1;
4994
4995         last_bitmap = (gpa_t)-1;
4996         b = -1;
4997
4998         while (size > 0) {
4999                 if (port < 0x8000)
5000                         bitmap = vmcs12->io_bitmap_a;
5001                 else if (port < 0x10000)
5002                         bitmap = vmcs12->io_bitmap_b;
5003                 else
5004                         return true;
5005                 bitmap += (port & 0x7fff) / 8;
5006
5007                 if (last_bitmap != bitmap)
5008                         if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5009                                 return true;
5010                 if (b & (1 << (port & 7)))
5011                         return true;
5012
5013                 port++;
5014                 size--;
5015                 last_bitmap = bitmap;
5016         }
5017
5018         return false;
5019 }
5020
5021 /*
5022  * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
5023  * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5024  * disinterest in the current event (read or write a specific MSR) by using an
5025  * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5026  */
5027 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5028         struct vmcs12 *vmcs12, u32 exit_reason)
5029 {
5030         u32 msr_index = kvm_rcx_read(vcpu);
5031         gpa_t bitmap;
5032
5033         if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5034                 return true;
5035
5036         /*
5037          * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5038          * for the four combinations of read/write and low/high MSR numbers.
5039          * First we need to figure out which of the four to use:
5040          */
5041         bitmap = vmcs12->msr_bitmap;
5042         if (exit_reason == EXIT_REASON_MSR_WRITE)
5043                 bitmap += 2048;
5044         if (msr_index >= 0xc0000000) {
5045                 msr_index -= 0xc0000000;
5046                 bitmap += 1024;
5047         }
5048
5049         /* Then read the msr_index'th bit from this bitmap: */
5050         if (msr_index < 1024*8) {
5051                 unsigned char b;
5052                 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5053                         return true;
5054                 return 1 & (b >> (msr_index & 7));
5055         } else
5056                 return true; /* let L1 handle the wrong parameter */
5057 }
5058
5059 /*
5060  * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5061  * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5062  * intercept (via guest_host_mask etc.) the current event.
5063  */
5064 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5065         struct vmcs12 *vmcs12)
5066 {
5067         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5068         int cr = exit_qualification & 15;
5069         int reg;
5070         unsigned long val;
5071
5072         switch ((exit_qualification >> 4) & 3) {
5073         case 0: /* mov to cr */
5074                 reg = (exit_qualification >> 8) & 15;
5075                 val = kvm_register_readl(vcpu, reg);
5076                 switch (cr) {
5077                 case 0:
5078                         if (vmcs12->cr0_guest_host_mask &
5079                             (val ^ vmcs12->cr0_read_shadow))
5080                                 return true;
5081                         break;
5082                 case 3:
5083                         if ((vmcs12->cr3_target_count >= 1 &&
5084                                         vmcs12->cr3_target_value0 == val) ||
5085                                 (vmcs12->cr3_target_count >= 2 &&
5086                                         vmcs12->cr3_target_value1 == val) ||
5087                                 (vmcs12->cr3_target_count >= 3 &&
5088                                         vmcs12->cr3_target_value2 == val) ||
5089                                 (vmcs12->cr3_target_count >= 4 &&
5090                                         vmcs12->cr3_target_value3 == val))
5091                                 return false;
5092                         if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5093                                 return true;
5094                         break;
5095                 case 4:
5096                         if (vmcs12->cr4_guest_host_mask &
5097                             (vmcs12->cr4_read_shadow ^ val))
5098                                 return true;
5099                         break;
5100                 case 8:
5101                         if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5102                                 return true;
5103                         break;
5104                 }
5105                 break;
5106         case 2: /* clts */
5107                 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5108                     (vmcs12->cr0_read_shadow & X86_CR0_TS))
5109                         return true;
5110                 break;
5111         case 1: /* mov from cr */
5112                 switch (cr) {
5113                 case 3:
5114                         if (vmcs12->cpu_based_vm_exec_control &
5115                             CPU_BASED_CR3_STORE_EXITING)
5116                                 return true;
5117                         break;
5118                 case 8:
5119                         if (vmcs12->cpu_based_vm_exec_control &
5120                             CPU_BASED_CR8_STORE_EXITING)
5121                                 return true;
5122                         break;
5123                 }
5124                 break;
5125         case 3: /* lmsw */
5126                 /*
5127                  * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5128                  * cr0. Other attempted changes are ignored, with no exit.
5129                  */
5130                 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5131                 if (vmcs12->cr0_guest_host_mask & 0xe &
5132                     (val ^ vmcs12->cr0_read_shadow))
5133                         return true;
5134                 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5135                     !(vmcs12->cr0_read_shadow & 0x1) &&
5136                     (val & 0x1))
5137                         return true;
5138                 break;
5139         }
5140         return false;
5141 }
5142
5143 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5144         struct vmcs12 *vmcs12, gpa_t bitmap)
5145 {
5146         u32 vmx_instruction_info;
5147         unsigned long field;
5148         u8 b;
5149
5150         if (!nested_cpu_has_shadow_vmcs(vmcs12))
5151                 return true;
5152
5153         /* Decode instruction info and find the field to access */
5154         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5155         field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5156
5157         /* Out-of-range fields always cause a VM exit from L2 to L1 */
5158         if (field >> 15)
5159                 return true;
5160
5161         if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5162                 return true;
5163
5164         return 1 & (b >> (field & 7));
5165 }
5166
5167 /*
5168  * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
5169  * should handle it ourselves in L0 (and then continue L2). Only call this
5170  * when in is_guest_mode (L2).
5171  */
5172 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
5173 {
5174         u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
5175         struct vcpu_vmx *vmx = to_vmx(vcpu);
5176         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5177
5178         if (vmx->nested.nested_run_pending)
5179                 return false;
5180
5181         if (unlikely(vmx->fail)) {
5182                 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
5183                                     vmcs_read32(VM_INSTRUCTION_ERROR));
5184                 return true;
5185         }
5186
5187         /*
5188          * The host physical addresses of some pages of guest memory
5189          * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
5190          * Page). The CPU may write to these pages via their host
5191          * physical address while L2 is running, bypassing any
5192          * address-translation-based dirty tracking (e.g. EPT write
5193          * protection).
5194          *
5195          * Mark them dirty on every exit from L2 to prevent them from
5196          * getting out of sync with dirty tracking.
5197          */
5198         nested_mark_vmcs12_pages_dirty(vcpu);
5199
5200         trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
5201                                 vmcs_readl(EXIT_QUALIFICATION),
5202                                 vmx->idt_vectoring_info,
5203                                 intr_info,
5204                                 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
5205                                 KVM_ISA_VMX);
5206
5207         switch (exit_reason) {
5208         case EXIT_REASON_EXCEPTION_NMI:
5209                 if (is_nmi(intr_info))
5210                         return false;
5211                 else if (is_page_fault(intr_info))
5212                         return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
5213                 else if (is_debug(intr_info) &&
5214                          vcpu->guest_debug &
5215                          (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5216                         return false;
5217                 else if (is_breakpoint(intr_info) &&
5218                          vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5219                         return false;
5220                 return vmcs12->exception_bitmap &
5221                                 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5222         case EXIT_REASON_EXTERNAL_INTERRUPT:
5223                 return false;
5224         case EXIT_REASON_TRIPLE_FAULT:
5225                 return true;
5226         case EXIT_REASON_PENDING_INTERRUPT:
5227                 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
5228         case EXIT_REASON_NMI_WINDOW:
5229                 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
5230         case EXIT_REASON_TASK_SWITCH:
5231                 return true;
5232         case EXIT_REASON_CPUID:
5233                 return true;
5234         case EXIT_REASON_HLT:
5235                 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5236         case EXIT_REASON_INVD:
5237                 return true;
5238         case EXIT_REASON_INVLPG:
5239                 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5240         case EXIT_REASON_RDPMC:
5241                 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5242         case EXIT_REASON_RDRAND:
5243                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5244         case EXIT_REASON_RDSEED:
5245                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5246         case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
5247                 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5248         case EXIT_REASON_VMREAD:
5249                 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5250                         vmcs12->vmread_bitmap);
5251         case EXIT_REASON_VMWRITE:
5252                 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5253                         vmcs12->vmwrite_bitmap);
5254         case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
5255         case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
5256         case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
5257         case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
5258         case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
5259                 /*
5260                  * VMX instructions trap unconditionally. This allows L1 to
5261                  * emulate them for its L2 guest, i.e., allows 3-level nesting!
5262                  */
5263                 return true;
5264         case EXIT_REASON_CR_ACCESS:
5265                 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5266         case EXIT_REASON_DR_ACCESS:
5267                 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5268         case EXIT_REASON_IO_INSTRUCTION:
5269                 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5270         case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
5271                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5272         case EXIT_REASON_MSR_READ:
5273         case EXIT_REASON_MSR_WRITE:
5274                 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5275         case EXIT_REASON_INVALID_STATE:
5276                 return true;
5277         case EXIT_REASON_MWAIT_INSTRUCTION:
5278                 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5279         case EXIT_REASON_MONITOR_TRAP_FLAG:
5280                 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
5281         case EXIT_REASON_MONITOR_INSTRUCTION:
5282                 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
5283         case EXIT_REASON_PAUSE_INSTRUCTION:
5284                 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
5285                         nested_cpu_has2(vmcs12,
5286                                 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
5287         case EXIT_REASON_MCE_DURING_VMENTRY:
5288                 return false;
5289         case EXIT_REASON_TPR_BELOW_THRESHOLD:
5290                 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
5291         case EXIT_REASON_APIC_ACCESS:
5292         case EXIT_REASON_APIC_WRITE:
5293         case EXIT_REASON_EOI_INDUCED:
5294                 /*
5295                  * The controls for "virtualize APIC accesses," "APIC-
5296                  * register virtualization," and "virtual-interrupt
5297                  * delivery" only come from vmcs12.
5298                  */
5299                 return true;
5300         case EXIT_REASON_EPT_VIOLATION:
5301                 /*
5302                  * L0 always deals with the EPT violation. If nested EPT is
5303                  * used, and the nested mmu code discovers that the address is
5304                  * missing in the guest EPT table (EPT12), the EPT violation
5305                  * will be injected with nested_ept_inject_page_fault()
5306                  */
5307                 return false;
5308         case EXIT_REASON_EPT_MISCONFIG:
5309                 /*
5310                  * L2 never uses directly L1's EPT, but rather L0's own EPT
5311                  * table (shadow on EPT) or a merged EPT table that L0 built
5312                  * (EPT on EPT). So any problems with the structure of the
5313                  * table is L0's fault.
5314                  */
5315                 return false;
5316         case EXIT_REASON_INVPCID:
5317                 return
5318                         nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
5319                         nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5320         case EXIT_REASON_WBINVD:
5321                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
5322         case EXIT_REASON_XSETBV:
5323                 return true;
5324         case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
5325                 /*
5326                  * This should never happen, since it is not possible to
5327                  * set XSS to a non-zero value---neither in L1 nor in L2.
5328                  * If if it were, XSS would have to be checked against
5329                  * the XSS exit bitmap in vmcs12.
5330                  */
5331                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
5332         case EXIT_REASON_PREEMPTION_TIMER:
5333                 return false;
5334         case EXIT_REASON_PML_FULL:
5335                 /* We emulate PML support to L1. */
5336                 return false;
5337         case EXIT_REASON_VMFUNC:
5338                 /* VM functions are emulated through L2->L0 vmexits. */
5339                 return false;
5340         case EXIT_REASON_ENCLS:
5341                 /* SGX is never exposed to L1 */
5342                 return false;
5343         default:
5344                 return true;
5345         }
5346 }
5347
5348
5349 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5350                                 struct kvm_nested_state __user *user_kvm_nested_state,
5351                                 u32 user_data_size)
5352 {
5353         struct vcpu_vmx *vmx;
5354         struct vmcs12 *vmcs12;
5355         struct kvm_nested_state kvm_state = {
5356                 .flags = 0,
5357                 .format = 0,
5358                 .size = sizeof(kvm_state),
5359                 .vmx.vmxon_pa = -1ull,
5360                 .vmx.vmcs_pa = -1ull,
5361         };
5362
5363         if (!vcpu)
5364                 return kvm_state.size + 2 * VMCS12_SIZE;
5365
5366         vmx = to_vmx(vcpu);
5367         vmcs12 = get_vmcs12(vcpu);
5368
5369         if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
5370                 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
5371
5372         if (nested_vmx_allowed(vcpu) &&
5373             (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
5374                 kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
5375                 kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
5376
5377                 if (vmx_has_valid_vmcs12(vcpu)) {
5378                         kvm_state.size += VMCS12_SIZE;
5379
5380                         if (is_guest_mode(vcpu) &&
5381                             nested_cpu_has_shadow_vmcs(vmcs12) &&
5382                             vmcs12->vmcs_link_pointer != -1ull)
5383                                 kvm_state.size += VMCS12_SIZE;
5384                 }
5385
5386                 if (vmx->nested.smm.vmxon)
5387                         kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
5388
5389                 if (vmx->nested.smm.guest_mode)
5390                         kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
5391
5392                 if (is_guest_mode(vcpu)) {
5393                         kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
5394
5395                         if (vmx->nested.nested_run_pending)
5396                                 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
5397                 }
5398         }
5399
5400         if (user_data_size < kvm_state.size)
5401                 goto out;
5402
5403         if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
5404                 return -EFAULT;
5405
5406         if (!vmx_has_valid_vmcs12(vcpu))
5407                 goto out;
5408
5409         /*
5410          * When running L2, the authoritative vmcs12 state is in the
5411          * vmcs02. When running L1, the authoritative vmcs12 state is
5412          * in the shadow or enlightened vmcs linked to vmcs01, unless
5413          * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
5414          * vmcs12 state is in the vmcs12 already.
5415          */
5416         if (is_guest_mode(vcpu)) {
5417                 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
5418                 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5419         } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
5420                 if (vmx->nested.hv_evmcs)
5421                         copy_enlightened_to_vmcs12(vmx);
5422                 else if (enable_shadow_vmcs)
5423                         copy_shadow_to_vmcs12(vmx);
5424         }
5425
5426         /*
5427          * Copy over the full allocated size of vmcs12 rather than just the size
5428          * of the struct.
5429          */
5430         if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
5431                 return -EFAULT;
5432
5433         if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5434             vmcs12->vmcs_link_pointer != -1ull) {
5435                 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
5436                                  get_shadow_vmcs12(vcpu), VMCS12_SIZE))
5437                         return -EFAULT;
5438         }
5439
5440 out:
5441         return kvm_state.size;
5442 }
5443
5444 /*
5445  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
5446  */
5447 void vmx_leave_nested(struct kvm_vcpu *vcpu)
5448 {
5449         if (is_guest_mode(vcpu)) {
5450                 to_vmx(vcpu)->nested.nested_run_pending = 0;
5451                 nested_vmx_vmexit(vcpu, -1, 0, 0);
5452         }
5453         free_nested(vcpu);
5454 }
5455
5456 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5457                                 struct kvm_nested_state __user *user_kvm_nested_state,
5458                                 struct kvm_nested_state *kvm_state)
5459 {
5460         struct vcpu_vmx *vmx = to_vmx(vcpu);
5461         struct vmcs12 *vmcs12;
5462         u32 exit_qual;
5463         int ret;
5464
5465         if (kvm_state->format != 0)
5466                 return -EINVAL;
5467
5468         if (!nested_vmx_allowed(vcpu))
5469                 return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
5470
5471         if (kvm_state->vmx.vmxon_pa == -1ull) {
5472                 if (kvm_state->vmx.smm.flags)
5473                         return -EINVAL;
5474
5475                 if (kvm_state->vmx.vmcs_pa != -1ull)
5476                         return -EINVAL;
5477
5478                 vmx_leave_nested(vcpu);
5479                 return 0;
5480         }
5481
5482         if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
5483                 return -EINVAL;
5484
5485         if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5486             (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5487                 return -EINVAL;
5488
5489         if (kvm_state->vmx.smm.flags &
5490             ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
5491                 return -EINVAL;
5492
5493         /*
5494          * SMM temporarily disables VMX, so we cannot be in guest mode,
5495          * nor can VMLAUNCH/VMRESUME be pending.  Outside SMM, SMM flags
5496          * must be zero.
5497          */
5498         if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
5499                 return -EINVAL;
5500
5501         if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5502             !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
5503                 return -EINVAL;
5504
5505         vmx_leave_nested(vcpu);
5506         if (kvm_state->vmx.vmxon_pa == -1ull)
5507                 return 0;
5508
5509         if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
5510                 nested_enable_evmcs(vcpu, NULL);
5511
5512         vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
5513         ret = enter_vmx_operation(vcpu);
5514         if (ret)
5515                 return ret;
5516
5517         /* Empty 'VMXON' state is permitted */
5518         if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
5519                 return 0;
5520
5521         if (kvm_state->vmx.vmcs_pa != -1ull) {
5522                 if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
5523                     !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
5524                         return -EINVAL;
5525
5526                 set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
5527         } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
5528                 /*
5529                  * Sync eVMCS upon entry as we may not have
5530                  * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
5531                  */
5532                 vmx->nested.need_vmcs12_to_shadow_sync = true;
5533         } else {
5534                 return -EINVAL;
5535         }
5536
5537         if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
5538                 vmx->nested.smm.vmxon = true;
5539                 vmx->nested.vmxon = false;
5540
5541                 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
5542                         vmx->nested.smm.guest_mode = true;
5543         }
5544
5545         vmcs12 = get_vmcs12(vcpu);
5546         if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
5547                 return -EFAULT;
5548
5549         if (vmcs12->hdr.revision_id != VMCS12_REVISION)
5550                 return -EINVAL;
5551
5552         if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5553                 return 0;
5554
5555         vmx->nested.nested_run_pending =
5556                 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
5557
5558         ret = -EINVAL;
5559         if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5560             vmcs12->vmcs_link_pointer != -1ull) {
5561                 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
5562
5563                 if (kvm_state->size < sizeof(*kvm_state) + VMCS12_SIZE + sizeof(*vmcs12))
5564                         goto error_guest_mode;
5565
5566                 if (copy_from_user(shadow_vmcs12,
5567                                    user_kvm_nested_state->data + VMCS12_SIZE,
5568                                    sizeof(*vmcs12))) {
5569                         ret = -EFAULT;
5570                         goto error_guest_mode;
5571                 }
5572
5573                 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5574                     !shadow_vmcs12->hdr.shadow_vmcs)
5575                         goto error_guest_mode;
5576         }
5577
5578         if (nested_vmx_check_controls(vcpu, vmcs12) ||
5579             nested_vmx_check_host_state(vcpu, vmcs12) ||
5580             nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
5581                 goto error_guest_mode;
5582
5583         vmx->nested.dirty_vmcs12 = true;
5584         ret = nested_vmx_enter_non_root_mode(vcpu, false);
5585         if (ret)
5586                 goto error_guest_mode;
5587
5588         return 0;
5589
5590 error_guest_mode:
5591         vmx->nested.nested_run_pending = 0;
5592         return ret;
5593 }
5594
5595 void nested_vmx_vcpu_setup(void)
5596 {
5597         if (enable_shadow_vmcs) {
5598                 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
5599                 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
5600         }
5601 }
5602
5603 /*
5604  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
5605  * returned for the various VMX controls MSRs when nested VMX is enabled.
5606  * The same values should also be used to verify that vmcs12 control fields are
5607  * valid during nested entry from L1 to L2.
5608  * Each of these control msrs has a low and high 32-bit half: A low bit is on
5609  * if the corresponding bit in the (32-bit) control field *must* be on, and a
5610  * bit in the high half is on if the corresponding bit in the control field
5611  * may be on. See also vmx_control_verify().
5612  */
5613 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
5614                                 bool apicv)
5615 {
5616         /*
5617          * Note that as a general rule, the high half of the MSRs (bits in
5618          * the control fields which may be 1) should be initialized by the
5619          * intersection of the underlying hardware's MSR (i.e., features which
5620          * can be supported) and the list of features we want to expose -
5621          * because they are known to be properly supported in our code.
5622          * Also, usually, the low half of the MSRs (bits which must be 1) can
5623          * be set to 0, meaning that L1 may turn off any of these bits. The
5624          * reason is that if one of these bits is necessary, it will appear
5625          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
5626          * fields of vmcs01 and vmcs02, will turn these bits off - and
5627          * nested_vmx_exit_reflected() will not pass related exits to L1.
5628          * These rules have exceptions below.
5629          */
5630
5631         /* pin-based controls */
5632         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
5633                 msrs->pinbased_ctls_low,
5634                 msrs->pinbased_ctls_high);
5635         msrs->pinbased_ctls_low |=
5636                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5637         msrs->pinbased_ctls_high &=
5638                 PIN_BASED_EXT_INTR_MASK |
5639                 PIN_BASED_NMI_EXITING |
5640                 PIN_BASED_VIRTUAL_NMIS |
5641                 (apicv ? PIN_BASED_POSTED_INTR : 0);
5642         msrs->pinbased_ctls_high |=
5643                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5644                 PIN_BASED_VMX_PREEMPTION_TIMER;
5645
5646         /* exit controls */
5647         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
5648                 msrs->exit_ctls_low,
5649                 msrs->exit_ctls_high);
5650         msrs->exit_ctls_low =
5651                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
5652
5653         msrs->exit_ctls_high &=
5654 #ifdef CONFIG_X86_64
5655                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
5656 #endif
5657                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
5658         msrs->exit_ctls_high |=
5659                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
5660                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
5661                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
5662
5663         /* We support free control of debug control saving. */
5664         msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
5665
5666         /* entry controls */
5667         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
5668                 msrs->entry_ctls_low,
5669                 msrs->entry_ctls_high);
5670         msrs->entry_ctls_low =
5671                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
5672         msrs->entry_ctls_high &=
5673 #ifdef CONFIG_X86_64
5674                 VM_ENTRY_IA32E_MODE |
5675 #endif
5676                 VM_ENTRY_LOAD_IA32_PAT;
5677         msrs->entry_ctls_high |=
5678                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
5679
5680         /* We support free control of debug control loading. */
5681         msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
5682
5683         /* cpu-based controls */
5684         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
5685                 msrs->procbased_ctls_low,
5686                 msrs->procbased_ctls_high);
5687         msrs->procbased_ctls_low =
5688                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5689         msrs->procbased_ctls_high &=
5690                 CPU_BASED_VIRTUAL_INTR_PENDING |
5691                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
5692                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
5693                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
5694                 CPU_BASED_CR3_STORE_EXITING |
5695 #ifdef CONFIG_X86_64
5696                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
5697 #endif
5698                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
5699                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
5700                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
5701                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
5702                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
5703         /*
5704          * We can allow some features even when not supported by the
5705          * hardware. For example, L1 can specify an MSR bitmap - and we
5706          * can use it to avoid exits to L1 - even when L0 runs L2
5707          * without MSR bitmaps.
5708          */
5709         msrs->procbased_ctls_high |=
5710                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5711                 CPU_BASED_USE_MSR_BITMAPS;
5712
5713         /* We support free control of CR3 access interception. */
5714         msrs->procbased_ctls_low &=
5715                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
5716
5717         /*
5718          * secondary cpu-based controls.  Do not include those that
5719          * depend on CPUID bits, they are added later by vmx_cpuid_update.
5720          */
5721         if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
5722                 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
5723                       msrs->secondary_ctls_low,
5724                       msrs->secondary_ctls_high);
5725
5726         msrs->secondary_ctls_low = 0;
5727         msrs->secondary_ctls_high &=
5728                 SECONDARY_EXEC_DESC |
5729                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
5730                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
5731                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
5732                 SECONDARY_EXEC_WBINVD_EXITING;
5733
5734         /*
5735          * We can emulate "VMCS shadowing," even if the hardware
5736          * doesn't support it.
5737          */
5738         msrs->secondary_ctls_high |=
5739                 SECONDARY_EXEC_SHADOW_VMCS;
5740
5741         if (enable_ept) {
5742                 /* nested EPT: emulate EPT also to L1 */
5743                 msrs->secondary_ctls_high |=
5744                         SECONDARY_EXEC_ENABLE_EPT;
5745                 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
5746                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
5747                 if (cpu_has_vmx_ept_execute_only())
5748                         msrs->ept_caps |=
5749                                 VMX_EPT_EXECUTE_ONLY_BIT;
5750                 msrs->ept_caps &= ept_caps;
5751                 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
5752                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
5753                         VMX_EPT_1GB_PAGE_BIT;
5754                 if (enable_ept_ad_bits) {
5755                         msrs->secondary_ctls_high |=
5756                                 SECONDARY_EXEC_ENABLE_PML;
5757                         msrs->ept_caps |= VMX_EPT_AD_BIT;
5758                 }
5759         }
5760
5761         if (cpu_has_vmx_vmfunc()) {
5762                 msrs->secondary_ctls_high |=
5763                         SECONDARY_EXEC_ENABLE_VMFUNC;
5764                 /*
5765                  * Advertise EPTP switching unconditionally
5766                  * since we emulate it
5767                  */
5768                 if (enable_ept)
5769                         msrs->vmfunc_controls =
5770                                 VMX_VMFUNC_EPTP_SWITCHING;
5771         }
5772
5773         /*
5774          * Old versions of KVM use the single-context version without
5775          * checking for support, so declare that it is supported even
5776          * though it is treated as global context.  The alternative is
5777          * not failing the single-context invvpid, and it is worse.
5778          */
5779         if (enable_vpid) {
5780                 msrs->secondary_ctls_high |=
5781                         SECONDARY_EXEC_ENABLE_VPID;
5782                 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
5783                         VMX_VPID_EXTENT_SUPPORTED_MASK;
5784         }
5785
5786         if (enable_unrestricted_guest)
5787                 msrs->secondary_ctls_high |=
5788                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
5789
5790         if (flexpriority_enabled)
5791                 msrs->secondary_ctls_high |=
5792                         SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
5793
5794         /* miscellaneous data */
5795         rdmsr(MSR_IA32_VMX_MISC,
5796                 msrs->misc_low,
5797                 msrs->misc_high);
5798         msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
5799         msrs->misc_low |=
5800                 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
5801                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
5802                 VMX_MISC_ACTIVITY_HLT;
5803         msrs->misc_high = 0;
5804
5805         /*
5806          * This MSR reports some information about VMX support. We
5807          * should return information about the VMX we emulate for the
5808          * guest, and the VMCS structure we give it - not about the
5809          * VMX support of the underlying hardware.
5810          */
5811         msrs->basic =
5812                 VMCS12_REVISION |
5813                 VMX_BASIC_TRUE_CTLS |
5814                 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
5815                 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
5816
5817         if (cpu_has_vmx_basic_inout())
5818                 msrs->basic |= VMX_BASIC_INOUT;
5819
5820         /*
5821          * These MSRs specify bits which the guest must keep fixed on
5822          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
5823          * We picked the standard core2 setting.
5824          */
5825 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
5826 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
5827         msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
5828         msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
5829
5830         /* These MSRs specify bits which the guest must keep fixed off. */
5831         rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
5832         rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
5833
5834         /* highest index: VMX_PREEMPTION_TIMER_VALUE */
5835         msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
5836 }
5837
5838 void nested_vmx_hardware_unsetup(void)
5839 {
5840         int i;
5841
5842         if (enable_shadow_vmcs) {
5843                 for (i = 0; i < VMX_BITMAP_NR; i++)
5844                         free_page((unsigned long)vmx_bitmap[i]);
5845         }
5846 }
5847
5848 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
5849 {
5850         int i;
5851
5852         /*
5853          * Without EPT it is not possible to restore L1's CR3 and PDPTR on
5854          * VMfail, because they are not available in vmcs01.  Just always
5855          * use hardware checks.
5856          */
5857         if (!enable_ept)
5858                 nested_early_check = 1;
5859
5860         if (!cpu_has_vmx_shadow_vmcs())
5861                 enable_shadow_vmcs = 0;
5862         if (enable_shadow_vmcs) {
5863                 for (i = 0; i < VMX_BITMAP_NR; i++) {
5864                         /*
5865                          * The vmx_bitmap is not tied to a VM and so should
5866                          * not be charged to a memcg.
5867                          */
5868                         vmx_bitmap[i] = (unsigned long *)
5869                                 __get_free_page(GFP_KERNEL);
5870                         if (!vmx_bitmap[i]) {
5871                                 nested_vmx_hardware_unsetup();
5872                                 return -ENOMEM;
5873                         }
5874                 }
5875
5876                 init_vmcs_shadow_fields();
5877         }
5878
5879         exit_handlers[EXIT_REASON_VMCLEAR]      = handle_vmclear,
5880         exit_handlers[EXIT_REASON_VMLAUNCH]     = handle_vmlaunch,
5881         exit_handlers[EXIT_REASON_VMPTRLD]      = handle_vmptrld,
5882         exit_handlers[EXIT_REASON_VMPTRST]      = handle_vmptrst,
5883         exit_handlers[EXIT_REASON_VMREAD]       = handle_vmread,
5884         exit_handlers[EXIT_REASON_VMRESUME]     = handle_vmresume,
5885         exit_handlers[EXIT_REASON_VMWRITE]      = handle_vmwrite,
5886         exit_handlers[EXIT_REASON_VMOFF]        = handle_vmoff,
5887         exit_handlers[EXIT_REASON_VMON]         = handle_vmon,
5888         exit_handlers[EXIT_REASON_INVEPT]       = handle_invept,
5889         exit_handlers[EXIT_REASON_INVVPID]      = handle_invvpid,
5890         exit_handlers[EXIT_REASON_VMFUNC]       = handle_vmfunc,
5891
5892         kvm_x86_ops->check_nested_events = vmx_check_nested_events;
5893         kvm_x86_ops->get_nested_state = vmx_get_nested_state;
5894         kvm_x86_ops->set_nested_state = vmx_set_nested_state;
5895         kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages,
5896         kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
5897         kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
5898
5899         return 0;
5900 }