4 * Copyright (C) 2018, Red Hat, Inc.
6 * This work is licensed under the terms of the GNU GPL, version 2.
8 * Tests for vCPU state save/restore, including nested guest state.
10 #define _GNU_SOURCE /* for program_invocation_short_name */
15 #include <sys/ioctl.h>
17 #include "test_util.h"
24 #define PORT_SYNC 0x1000
25 #define PORT_ABORT 0x1001
26 #define PORT_DONE 0x1002
28 static inline void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1)
30 __asm__ __volatile__("in %[port], %%al"
32 : [port]"d"(port), "D"(arg0), "S"(arg1)
36 #define exit_to_l0(_port, _arg0, _arg1) \
37 __exit_to_l0(_port, (uint64_t) (_arg0), (uint64_t) (_arg1))
39 #define GUEST_ASSERT(_condition) do { \
41 exit_to_l0(PORT_ABORT, "Failed guest assert: " #_condition, __LINE__);\
44 #define GUEST_SYNC(stage) \
45 exit_to_l0(PORT_SYNC, "hello", stage);
47 static bool have_nested_state;
49 void l2_guest_code(void)
58 /* Done, exit to L1 and never come back. */
62 void l1_guest_code(struct vmx_pages *vmx_pages)
64 #define L2_GUEST_STACK_SIZE 64
65 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
67 GUEST_ASSERT(vmx_pages->vmcs_gpa);
68 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
69 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
72 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
74 prepare_vmcs(vmx_pages, l2_guest_code,
75 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
78 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
79 GUEST_ASSERT(!vmlaunch());
80 GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
81 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
83 /* Check that the launched state is preserved. */
84 GUEST_ASSERT(vmlaunch());
86 GUEST_ASSERT(!vmresume());
87 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
90 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
92 GUEST_ASSERT(!vmresume());
93 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
95 vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
97 GUEST_ASSERT(!vmresume());
98 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
102 void guest_code(struct vmx_pages *vmx_pages)
108 l1_guest_code(vmx_pages);
110 exit_to_l0(PORT_DONE, 0, 0);
113 int main(int argc, char *argv[])
115 struct vmx_pages *vmx_pages = NULL;
116 vm_vaddr_t vmx_pages_gva = 0;
118 struct kvm_regs regs1, regs2;
121 struct kvm_x86_state *state;
124 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
127 vm = vm_create_default(VCPU_ID, guest_code);
128 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
129 run = vcpu_state(vm, VCPU_ID);
131 vcpu_regs_get(vm, VCPU_ID, ®s1);
133 if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
134 vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
135 vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
137 printf("will skip nested state checks\n");
138 vcpu_args_set(vm, VCPU_ID, 1, 0);
141 for (stage = 1;; stage++) {
142 _vcpu_run(vm, VCPU_ID);
143 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
144 "Unexpected exit reason: %u (%s),\n",
146 exit_reason_str(run->exit_reason));
148 memset(®s1, 0, sizeof(regs1));
149 vcpu_regs_get(vm, VCPU_ID, ®s1);
150 switch (run->io.port) {
152 TEST_ASSERT(false, "%s at %s:%d", (const char *) regs1.rdi,
153 __FILE__, regs1.rsi);
160 TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);
163 /* PORT_SYNC is handled here. */
164 TEST_ASSERT(!strcmp((const char *)regs1.rdi, "hello") &&
165 regs1.rsi == stage, "Unexpected register values vmexit #%lx, got %lx",
166 stage, (ulong) regs1.rsi);
168 state = vcpu_save_state(vm, VCPU_ID);
171 /* Restore state in a new VM. */
172 kvm_vm_restart(vm, O_RDWR);
173 vm_vcpu_add(vm, VCPU_ID, 0, 0);
174 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
175 vcpu_load_state(vm, VCPU_ID, state);
176 run = vcpu_state(vm, VCPU_ID);
179 memset(®s2, 0, sizeof(regs2));
180 vcpu_regs_get(vm, VCPU_ID, ®s2);
181 TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)),
182 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
183 (ulong) regs2.rdi, (ulong) regs2.rsi);