]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/kvm/svm.c
KVM: x86: Exit to userspace on emulation skip failure
[linux.git] / arch / x86 / kvm / svm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14
15 #define pr_fmt(fmt) "SVM: " fmt
16
17 #include <linux/kvm_host.h>
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22 #include "x86.h"
23 #include "cpuid.h"
24 #include "pmu.h"
25
26 #include <linux/module.h>
27 #include <linux/mod_devicetable.h>
28 #include <linux/kernel.h>
29 #include <linux/vmalloc.h>
30 #include <linux/highmem.h>
31 #include <linux/sched.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/amd-iommu.h>
35 #include <linux/hashtable.h>
36 #include <linux/frame.h>
37 #include <linux/psp-sev.h>
38 #include <linux/file.h>
39 #include <linux/pagemap.h>
40 #include <linux/swap.h>
41
42 #include <asm/apic.h>
43 #include <asm/perf_event.h>
44 #include <asm/tlbflush.h>
45 #include <asm/desc.h>
46 #include <asm/debugreg.h>
47 #include <asm/kvm_para.h>
48 #include <asm/irq_remapping.h>
49 #include <asm/spec-ctrl.h>
50
51 #include <asm/virtext.h>
52 #include "trace.h"
53
54 #define __ex(x) __kvm_handle_fault_on_reboot(x)
55
56 MODULE_AUTHOR("Qumranet");
57 MODULE_LICENSE("GPL");
58
59 static const struct x86_cpu_id svm_cpu_id[] = {
60         X86_FEATURE_MATCH(X86_FEATURE_SVM),
61         {}
62 };
63 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
64
65 #define IOPM_ALLOC_ORDER 2
66 #define MSRPM_ALLOC_ORDER 1
67
68 #define SEG_TYPE_LDT 2
69 #define SEG_TYPE_BUSY_TSS16 3
70
71 #define SVM_FEATURE_LBRV           (1 <<  1)
72 #define SVM_FEATURE_SVML           (1 <<  2)
73 #define SVM_FEATURE_TSC_RATE       (1 <<  4)
74 #define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
75 #define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
76 #define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
77 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
78
79 #define SVM_AVIC_DOORBELL       0xc001011b
80
81 #define NESTED_EXIT_HOST        0       /* Exit handled on host level */
82 #define NESTED_EXIT_DONE        1       /* Exit caused nested vmexit  */
83 #define NESTED_EXIT_CONTINUE    2       /* Further checks needed      */
84
85 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
86
87 #define TSC_RATIO_RSVD          0xffffff0000000000ULL
88 #define TSC_RATIO_MIN           0x0000000000000001ULL
89 #define TSC_RATIO_MAX           0x000000ffffffffffULL
90
91 #define AVIC_HPA_MASK   ~((0xFFFULL << 52) | 0xFFF)
92
93 /*
94  * 0xff is broadcast, so the max index allowed for physical APIC ID
95  * table is 0xfe.  APIC IDs above 0xff are reserved.
96  */
97 #define AVIC_MAX_PHYSICAL_ID_COUNT      255
98
99 #define AVIC_UNACCEL_ACCESS_WRITE_MASK          1
100 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK         0xFF0
101 #define AVIC_UNACCEL_ACCESS_VECTOR_MASK         0xFFFFFFFF
102
103 /* AVIC GATAG is encoded using VM and VCPU IDs */
104 #define AVIC_VCPU_ID_BITS               8
105 #define AVIC_VCPU_ID_MASK               ((1 << AVIC_VCPU_ID_BITS) - 1)
106
107 #define AVIC_VM_ID_BITS                 24
108 #define AVIC_VM_ID_NR                   (1 << AVIC_VM_ID_BITS)
109 #define AVIC_VM_ID_MASK                 ((1 << AVIC_VM_ID_BITS) - 1)
110
111 #define AVIC_GATAG(x, y)                (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
112                                                 (y & AVIC_VCPU_ID_MASK))
113 #define AVIC_GATAG_TO_VMID(x)           ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
114 #define AVIC_GATAG_TO_VCPUID(x)         (x & AVIC_VCPU_ID_MASK)
115
116 static bool erratum_383_found __read_mostly;
117
118 static const u32 host_save_user_msrs[] = {
119 #ifdef CONFIG_X86_64
120         MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
121         MSR_FS_BASE,
122 #endif
123         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
124         MSR_TSC_AUX,
125 };
126
127 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
128
129 struct kvm_sev_info {
130         bool active;            /* SEV enabled guest */
131         unsigned int asid;      /* ASID used for this guest */
132         unsigned int handle;    /* SEV firmware handle */
133         int fd;                 /* SEV device fd */
134         unsigned long pages_locked; /* Number of pages locked */
135         struct list_head regions_list;  /* List of registered regions */
136 };
137
138 struct kvm_svm {
139         struct kvm kvm;
140
141         /* Struct members for AVIC */
142         u32 avic_vm_id;
143         struct page *avic_logical_id_table_page;
144         struct page *avic_physical_id_table_page;
145         struct hlist_node hnode;
146
147         struct kvm_sev_info sev_info;
148 };
149
150 struct kvm_vcpu;
151
152 struct nested_state {
153         struct vmcb *hsave;
154         u64 hsave_msr;
155         u64 vm_cr_msr;
156         u64 vmcb;
157
158         /* These are the merged vectors */
159         u32 *msrpm;
160
161         /* gpa pointers to the real vectors */
162         u64 vmcb_msrpm;
163         u64 vmcb_iopm;
164
165         /* A VMEXIT is required but not yet emulated */
166         bool exit_required;
167
168         /* cache for intercepts of the guest */
169         u32 intercept_cr;
170         u32 intercept_dr;
171         u32 intercept_exceptions;
172         u64 intercept;
173
174         /* Nested Paging related state */
175         u64 nested_cr3;
176 };
177
178 #define MSRPM_OFFSETS   16
179 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
180
181 /*
182  * Set osvw_len to higher value when updated Revision Guides
183  * are published and we know what the new status bits are
184  */
185 static uint64_t osvw_len = 4, osvw_status;
186
187 struct vcpu_svm {
188         struct kvm_vcpu vcpu;
189         struct vmcb *vmcb;
190         unsigned long vmcb_pa;
191         struct svm_cpu_data *svm_data;
192         uint64_t asid_generation;
193         uint64_t sysenter_esp;
194         uint64_t sysenter_eip;
195         uint64_t tsc_aux;
196
197         u64 msr_decfg;
198
199         u64 next_rip;
200
201         u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
202         struct {
203                 u16 fs;
204                 u16 gs;
205                 u16 ldt;
206                 u64 gs_base;
207         } host;
208
209         u64 spec_ctrl;
210         /*
211          * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
212          * translated into the appropriate L2_CFG bits on the host to
213          * perform speculative control.
214          */
215         u64 virt_spec_ctrl;
216
217         u32 *msrpm;
218
219         ulong nmi_iret_rip;
220
221         struct nested_state nested;
222
223         bool nmi_singlestep;
224         u64 nmi_singlestep_guest_rflags;
225
226         unsigned int3_injected;
227         unsigned long int3_rip;
228
229         /* cached guest cpuid flags for faster access */
230         bool nrips_enabled      : 1;
231
232         u32 ldr_reg;
233         u32 dfr_reg;
234         struct page *avic_backing_page;
235         u64 *avic_physical_id_cache;
236         bool avic_is_running;
237
238         /*
239          * Per-vcpu list of struct amd_svm_iommu_ir:
240          * This is used mainly to store interrupt remapping information used
241          * when update the vcpu affinity. This avoids the need to scan for
242          * IRTE and try to match ga_tag in the IOMMU driver.
243          */
244         struct list_head ir_list;
245         spinlock_t ir_list_lock;
246
247         /* which host CPU was used for running this vcpu */
248         unsigned int last_cpu;
249 };
250
251 /*
252  * This is a wrapper of struct amd_iommu_ir_data.
253  */
254 struct amd_svm_iommu_ir {
255         struct list_head node;  /* Used by SVM for per-vcpu ir_list */
256         void *data;             /* Storing pointer to struct amd_ir_data */
257 };
258
259 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK    (0xFF)
260 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                 31
261 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK                (1 << 31)
262
263 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK    (0xFFULL)
264 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK        (0xFFFFFFFFFFULL << 12)
265 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK          (1ULL << 62)
266 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK               (1ULL << 63)
267
268 static DEFINE_PER_CPU(u64, current_tsc_ratio);
269 #define TSC_RATIO_DEFAULT       0x0100000000ULL
270
271 #define MSR_INVALID                     0xffffffffU
272
273 static const struct svm_direct_access_msrs {
274         u32 index;   /* Index of the MSR */
275         bool always; /* True if intercept is always on */
276 } direct_access_msrs[] = {
277         { .index = MSR_STAR,                            .always = true  },
278         { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
279 #ifdef CONFIG_X86_64
280         { .index = MSR_GS_BASE,                         .always = true  },
281         { .index = MSR_FS_BASE,                         .always = true  },
282         { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
283         { .index = MSR_LSTAR,                           .always = true  },
284         { .index = MSR_CSTAR,                           .always = true  },
285         { .index = MSR_SYSCALL_MASK,                    .always = true  },
286 #endif
287         { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
288         { .index = MSR_IA32_PRED_CMD,                   .always = false },
289         { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
290         { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
291         { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
292         { .index = MSR_IA32_LASTINTTOIP,                .always = false },
293         { .index = MSR_INVALID,                         .always = false },
294 };
295
296 /* enable NPT for AMD64 and X86 with PAE */
297 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
298 static bool npt_enabled = true;
299 #else
300 static bool npt_enabled;
301 #endif
302
303 /*
304  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
305  * pause_filter_count: On processors that support Pause filtering(indicated
306  *      by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
307  *      count value. On VMRUN this value is loaded into an internal counter.
308  *      Each time a pause instruction is executed, this counter is decremented
309  *      until it reaches zero at which time a #VMEXIT is generated if pause
310  *      intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
311  *      Intercept Filtering for more details.
312  *      This also indicate if ple logic enabled.
313  *
314  * pause_filter_thresh: In addition, some processor families support advanced
315  *      pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
316  *      the amount of time a guest is allowed to execute in a pause loop.
317  *      In this mode, a 16-bit pause filter threshold field is added in the
318  *      VMCB. The threshold value is a cycle count that is used to reset the
319  *      pause counter. As with simple pause filtering, VMRUN loads the pause
320  *      count value from VMCB into an internal counter. Then, on each pause
321  *      instruction the hardware checks the elapsed number of cycles since
322  *      the most recent pause instruction against the pause filter threshold.
323  *      If the elapsed cycle count is greater than the pause filter threshold,
324  *      then the internal pause count is reloaded from the VMCB and execution
325  *      continues. If the elapsed cycle count is less than the pause filter
326  *      threshold, then the internal pause count is decremented. If the count
327  *      value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
328  *      triggered. If advanced pause filtering is supported and pause filter
329  *      threshold field is set to zero, the filter will operate in the simpler,
330  *      count only mode.
331  */
332
333 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
334 module_param(pause_filter_thresh, ushort, 0444);
335
336 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
337 module_param(pause_filter_count, ushort, 0444);
338
339 /* Default doubles per-vcpu window every exit. */
340 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
341 module_param(pause_filter_count_grow, ushort, 0444);
342
343 /* Default resets per-vcpu window every exit to pause_filter_count. */
344 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
345 module_param(pause_filter_count_shrink, ushort, 0444);
346
347 /* Default is to compute the maximum so we can never overflow. */
348 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
349 module_param(pause_filter_count_max, ushort, 0444);
350
351 /* allow nested paging (virtualized MMU) for all guests */
352 static int npt = true;
353 module_param(npt, int, S_IRUGO);
354
355 /* allow nested virtualization in KVM/SVM */
356 static int nested = true;
357 module_param(nested, int, S_IRUGO);
358
359 /* enable / disable AVIC */
360 static int avic;
361 #ifdef CONFIG_X86_LOCAL_APIC
362 module_param(avic, int, S_IRUGO);
363 #endif
364
365 /* enable/disable Next RIP Save */
366 static int nrips = true;
367 module_param(nrips, int, 0444);
368
369 /* enable/disable Virtual VMLOAD VMSAVE */
370 static int vls = true;
371 module_param(vls, int, 0444);
372
373 /* enable/disable Virtual GIF */
374 static int vgif = true;
375 module_param(vgif, int, 0444);
376
377 /* enable/disable SEV support */
378 static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
379 module_param(sev, int, 0444);
380
381 static bool __read_mostly dump_invalid_vmcb = 0;
382 module_param(dump_invalid_vmcb, bool, 0644);
383
384 static u8 rsm_ins_bytes[] = "\x0f\xaa";
385
386 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
387 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
388 static void svm_complete_interrupts(struct vcpu_svm *svm);
389
390 static int nested_svm_exit_handled(struct vcpu_svm *svm);
391 static int nested_svm_intercept(struct vcpu_svm *svm);
392 static int nested_svm_vmexit(struct vcpu_svm *svm);
393 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
394                                       bool has_error_code, u32 error_code);
395
396 enum {
397         VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
398                             pause filter count */
399         VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
400         VMCB_ASID,       /* ASID */
401         VMCB_INTR,       /* int_ctl, int_vector */
402         VMCB_NPT,        /* npt_en, nCR3, gPAT */
403         VMCB_CR,         /* CR0, CR3, CR4, EFER */
404         VMCB_DR,         /* DR6, DR7 */
405         VMCB_DT,         /* GDT, IDT */
406         VMCB_SEG,        /* CS, DS, SS, ES, CPL */
407         VMCB_CR2,        /* CR2 only */
408         VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
409         VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
410                           * AVIC PHYSICAL_TABLE pointer,
411                           * AVIC LOGICAL_TABLE pointer
412                           */
413         VMCB_DIRTY_MAX,
414 };
415
416 /* TPR and CR2 are always written before VMRUN */
417 #define VMCB_ALWAYS_DIRTY_MASK  ((1U << VMCB_INTR) | (1U << VMCB_CR2))
418
419 #define VMCB_AVIC_APIC_BAR_MASK         0xFFFFFFFFFF000ULL
420
421 static unsigned int max_sev_asid;
422 static unsigned int min_sev_asid;
423 static unsigned long *sev_asid_bitmap;
424 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
425
426 struct enc_region {
427         struct list_head list;
428         unsigned long npages;
429         struct page **pages;
430         unsigned long uaddr;
431         unsigned long size;
432 };
433
434
435 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
436 {
437         return container_of(kvm, struct kvm_svm, kvm);
438 }
439
440 static inline bool svm_sev_enabled(void)
441 {
442         return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
443 }
444
445 static inline bool sev_guest(struct kvm *kvm)
446 {
447 #ifdef CONFIG_KVM_AMD_SEV
448         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
449
450         return sev->active;
451 #else
452         return false;
453 #endif
454 }
455
456 static inline int sev_get_asid(struct kvm *kvm)
457 {
458         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
459
460         return sev->asid;
461 }
462
463 static inline void mark_all_dirty(struct vmcb *vmcb)
464 {
465         vmcb->control.clean = 0;
466 }
467
468 static inline void mark_all_clean(struct vmcb *vmcb)
469 {
470         vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
471                                & ~VMCB_ALWAYS_DIRTY_MASK;
472 }
473
474 static inline void mark_dirty(struct vmcb *vmcb, int bit)
475 {
476         vmcb->control.clean &= ~(1 << bit);
477 }
478
479 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
480 {
481         return container_of(vcpu, struct vcpu_svm, vcpu);
482 }
483
484 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
485 {
486         svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
487         mark_dirty(svm->vmcb, VMCB_AVIC);
488 }
489
490 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
491 {
492         struct vcpu_svm *svm = to_svm(vcpu);
493         u64 *entry = svm->avic_physical_id_cache;
494
495         if (!entry)
496                 return false;
497
498         return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
499 }
500
501 static void recalc_intercepts(struct vcpu_svm *svm)
502 {
503         struct vmcb_control_area *c, *h;
504         struct nested_state *g;
505
506         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
507
508         if (!is_guest_mode(&svm->vcpu))
509                 return;
510
511         c = &svm->vmcb->control;
512         h = &svm->nested.hsave->control;
513         g = &svm->nested;
514
515         c->intercept_cr = h->intercept_cr | g->intercept_cr;
516         c->intercept_dr = h->intercept_dr | g->intercept_dr;
517         c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
518         c->intercept = h->intercept | g->intercept;
519 }
520
521 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
522 {
523         if (is_guest_mode(&svm->vcpu))
524                 return svm->nested.hsave;
525         else
526                 return svm->vmcb;
527 }
528
529 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
530 {
531         struct vmcb *vmcb = get_host_vmcb(svm);
532
533         vmcb->control.intercept_cr |= (1U << bit);
534
535         recalc_intercepts(svm);
536 }
537
538 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
539 {
540         struct vmcb *vmcb = get_host_vmcb(svm);
541
542         vmcb->control.intercept_cr &= ~(1U << bit);
543
544         recalc_intercepts(svm);
545 }
546
547 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
548 {
549         struct vmcb *vmcb = get_host_vmcb(svm);
550
551         return vmcb->control.intercept_cr & (1U << bit);
552 }
553
554 static inline void set_dr_intercepts(struct vcpu_svm *svm)
555 {
556         struct vmcb *vmcb = get_host_vmcb(svm);
557
558         vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
559                 | (1 << INTERCEPT_DR1_READ)
560                 | (1 << INTERCEPT_DR2_READ)
561                 | (1 << INTERCEPT_DR3_READ)
562                 | (1 << INTERCEPT_DR4_READ)
563                 | (1 << INTERCEPT_DR5_READ)
564                 | (1 << INTERCEPT_DR6_READ)
565                 | (1 << INTERCEPT_DR7_READ)
566                 | (1 << INTERCEPT_DR0_WRITE)
567                 | (1 << INTERCEPT_DR1_WRITE)
568                 | (1 << INTERCEPT_DR2_WRITE)
569                 | (1 << INTERCEPT_DR3_WRITE)
570                 | (1 << INTERCEPT_DR4_WRITE)
571                 | (1 << INTERCEPT_DR5_WRITE)
572                 | (1 << INTERCEPT_DR6_WRITE)
573                 | (1 << INTERCEPT_DR7_WRITE);
574
575         recalc_intercepts(svm);
576 }
577
578 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
579 {
580         struct vmcb *vmcb = get_host_vmcb(svm);
581
582         vmcb->control.intercept_dr = 0;
583
584         recalc_intercepts(svm);
585 }
586
587 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
588 {
589         struct vmcb *vmcb = get_host_vmcb(svm);
590
591         vmcb->control.intercept_exceptions |= (1U << bit);
592
593         recalc_intercepts(svm);
594 }
595
596 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
597 {
598         struct vmcb *vmcb = get_host_vmcb(svm);
599
600         vmcb->control.intercept_exceptions &= ~(1U << bit);
601
602         recalc_intercepts(svm);
603 }
604
605 static inline void set_intercept(struct vcpu_svm *svm, int bit)
606 {
607         struct vmcb *vmcb = get_host_vmcb(svm);
608
609         vmcb->control.intercept |= (1ULL << bit);
610
611         recalc_intercepts(svm);
612 }
613
614 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
615 {
616         struct vmcb *vmcb = get_host_vmcb(svm);
617
618         vmcb->control.intercept &= ~(1ULL << bit);
619
620         recalc_intercepts(svm);
621 }
622
623 static inline bool vgif_enabled(struct vcpu_svm *svm)
624 {
625         return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
626 }
627
628 static inline void enable_gif(struct vcpu_svm *svm)
629 {
630         if (vgif_enabled(svm))
631                 svm->vmcb->control.int_ctl |= V_GIF_MASK;
632         else
633                 svm->vcpu.arch.hflags |= HF_GIF_MASK;
634 }
635
636 static inline void disable_gif(struct vcpu_svm *svm)
637 {
638         if (vgif_enabled(svm))
639                 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
640         else
641                 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
642 }
643
644 static inline bool gif_set(struct vcpu_svm *svm)
645 {
646         if (vgif_enabled(svm))
647                 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
648         else
649                 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
650 }
651
652 static unsigned long iopm_base;
653
654 struct kvm_ldttss_desc {
655         u16 limit0;
656         u16 base0;
657         unsigned base1:8, type:5, dpl:2, p:1;
658         unsigned limit1:4, zero0:3, g:1, base2:8;
659         u32 base3;
660         u32 zero1;
661 } __attribute__((packed));
662
663 struct svm_cpu_data {
664         int cpu;
665
666         u64 asid_generation;
667         u32 max_asid;
668         u32 next_asid;
669         u32 min_asid;
670         struct kvm_ldttss_desc *tss_desc;
671
672         struct page *save_area;
673         struct vmcb *current_vmcb;
674
675         /* index = sev_asid, value = vmcb pointer */
676         struct vmcb **sev_vmcbs;
677 };
678
679 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
680
681 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
682
683 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
684 #define MSRS_RANGE_SIZE 2048
685 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
686
687 static u32 svm_msrpm_offset(u32 msr)
688 {
689         u32 offset;
690         int i;
691
692         for (i = 0; i < NUM_MSR_MAPS; i++) {
693                 if (msr < msrpm_ranges[i] ||
694                     msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
695                         continue;
696
697                 offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
698                 offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
699
700                 /* Now we have the u8 offset - but need the u32 offset */
701                 return offset / 4;
702         }
703
704         /* MSR not in any range */
705         return MSR_INVALID;
706 }
707
708 #define MAX_INST_SIZE 15
709
710 static inline void clgi(void)
711 {
712         asm volatile (__ex("clgi"));
713 }
714
715 static inline void stgi(void)
716 {
717         asm volatile (__ex("stgi"));
718 }
719
720 static inline void invlpga(unsigned long addr, u32 asid)
721 {
722         asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
723 }
724
725 static int get_npt_level(struct kvm_vcpu *vcpu)
726 {
727 #ifdef CONFIG_X86_64
728         return PT64_ROOT_4LEVEL;
729 #else
730         return PT32E_ROOT_LEVEL;
731 #endif
732 }
733
734 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
735 {
736         vcpu->arch.efer = efer;
737         if (!npt_enabled && !(efer & EFER_LMA))
738                 efer &= ~EFER_LME;
739
740         to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
741         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
742 }
743
744 static int is_external_interrupt(u32 info)
745 {
746         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
747         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
748 }
749
750 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
751 {
752         struct vcpu_svm *svm = to_svm(vcpu);
753         u32 ret = 0;
754
755         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
756                 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
757         return ret;
758 }
759
760 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
761 {
762         struct vcpu_svm *svm = to_svm(vcpu);
763
764         if (mask == 0)
765                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
766         else
767                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
768
769 }
770
771 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
772 {
773         struct vcpu_svm *svm = to_svm(vcpu);
774
775         if (nrips && svm->vmcb->control.next_rip != 0) {
776                 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
777                 svm->next_rip = svm->vmcb->control.next_rip;
778         }
779
780         if (!svm->next_rip)
781                 return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP);
782
783         if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
784                 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
785                        __func__, kvm_rip_read(vcpu), svm->next_rip);
786
787         kvm_rip_write(vcpu, svm->next_rip);
788         svm_set_interrupt_shadow(vcpu, 0);
789
790         return EMULATE_DONE;
791 }
792
793 static void svm_queue_exception(struct kvm_vcpu *vcpu)
794 {
795         struct vcpu_svm *svm = to_svm(vcpu);
796         unsigned nr = vcpu->arch.exception.nr;
797         bool has_error_code = vcpu->arch.exception.has_error_code;
798         bool reinject = vcpu->arch.exception.injected;
799         u32 error_code = vcpu->arch.exception.error_code;
800
801         /*
802          * If we are within a nested VM we'd better #VMEXIT and let the guest
803          * handle the exception
804          */
805         if (!reinject &&
806             nested_svm_check_exception(svm, nr, has_error_code, error_code))
807                 return;
808
809         kvm_deliver_exception_payload(&svm->vcpu);
810
811         if (nr == BP_VECTOR && !nrips) {
812                 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
813
814                 /*
815                  * For guest debugging where we have to reinject #BP if some
816                  * INT3 is guest-owned:
817                  * Emulate nRIP by moving RIP forward. Will fail if injection
818                  * raises a fault that is not intercepted. Still better than
819                  * failing in all cases.
820                  */
821                 (void)skip_emulated_instruction(&svm->vcpu);
822                 rip = kvm_rip_read(&svm->vcpu);
823                 svm->int3_rip = rip + svm->vmcb->save.cs.base;
824                 svm->int3_injected = rip - old_rip;
825         }
826
827         svm->vmcb->control.event_inj = nr
828                 | SVM_EVTINJ_VALID
829                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
830                 | SVM_EVTINJ_TYPE_EXEPT;
831         svm->vmcb->control.event_inj_err = error_code;
832 }
833
834 static void svm_init_erratum_383(void)
835 {
836         u32 low, high;
837         int err;
838         u64 val;
839
840         if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
841                 return;
842
843         /* Use _safe variants to not break nested virtualization */
844         val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
845         if (err)
846                 return;
847
848         val |= (1ULL << 47);
849
850         low  = lower_32_bits(val);
851         high = upper_32_bits(val);
852
853         native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
854
855         erratum_383_found = true;
856 }
857
858 static void svm_init_osvw(struct kvm_vcpu *vcpu)
859 {
860         /*
861          * Guests should see errata 400 and 415 as fixed (assuming that
862          * HLT and IO instructions are intercepted).
863          */
864         vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
865         vcpu->arch.osvw.status = osvw_status & ~(6ULL);
866
867         /*
868          * By increasing VCPU's osvw.length to 3 we are telling the guest that
869          * all osvw.status bits inside that length, including bit 0 (which is
870          * reserved for erratum 298), are valid. However, if host processor's
871          * osvw_len is 0 then osvw_status[0] carries no information. We need to
872          * be conservative here and therefore we tell the guest that erratum 298
873          * is present (because we really don't know).
874          */
875         if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
876                 vcpu->arch.osvw.status |= 1;
877 }
878
879 static int has_svm(void)
880 {
881         const char *msg;
882
883         if (!cpu_has_svm(&msg)) {
884                 printk(KERN_INFO "has_svm: %s\n", msg);
885                 return 0;
886         }
887
888         return 1;
889 }
890
891 static void svm_hardware_disable(void)
892 {
893         /* Make sure we clean up behind us */
894         if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
895                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
896
897         cpu_svm_disable();
898
899         amd_pmu_disable_virt();
900 }
901
902 static int svm_hardware_enable(void)
903 {
904
905         struct svm_cpu_data *sd;
906         uint64_t efer;
907         struct desc_struct *gdt;
908         int me = raw_smp_processor_id();
909
910         rdmsrl(MSR_EFER, efer);
911         if (efer & EFER_SVME)
912                 return -EBUSY;
913
914         if (!has_svm()) {
915                 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
916                 return -EINVAL;
917         }
918         sd = per_cpu(svm_data, me);
919         if (!sd) {
920                 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
921                 return -EINVAL;
922         }
923
924         sd->asid_generation = 1;
925         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
926         sd->next_asid = sd->max_asid + 1;
927         sd->min_asid = max_sev_asid + 1;
928
929         gdt = get_current_gdt_rw();
930         sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
931
932         wrmsrl(MSR_EFER, efer | EFER_SVME);
933
934         wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
935
936         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
937                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
938                 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
939         }
940
941
942         /*
943          * Get OSVW bits.
944          *
945          * Note that it is possible to have a system with mixed processor
946          * revisions and therefore different OSVW bits. If bits are not the same
947          * on different processors then choose the worst case (i.e. if erratum
948          * is present on one processor and not on another then assume that the
949          * erratum is present everywhere).
950          */
951         if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
952                 uint64_t len, status = 0;
953                 int err;
954
955                 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
956                 if (!err)
957                         status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
958                                                       &err);
959
960                 if (err)
961                         osvw_status = osvw_len = 0;
962                 else {
963                         if (len < osvw_len)
964                                 osvw_len = len;
965                         osvw_status |= status;
966                         osvw_status &= (1ULL << osvw_len) - 1;
967                 }
968         } else
969                 osvw_status = osvw_len = 0;
970
971         svm_init_erratum_383();
972
973         amd_pmu_enable_virt();
974
975         return 0;
976 }
977
978 static void svm_cpu_uninit(int cpu)
979 {
980         struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
981
982         if (!sd)
983                 return;
984
985         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
986         kfree(sd->sev_vmcbs);
987         __free_page(sd->save_area);
988         kfree(sd);
989 }
990
991 static int svm_cpu_init(int cpu)
992 {
993         struct svm_cpu_data *sd;
994         int r;
995
996         sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
997         if (!sd)
998                 return -ENOMEM;
999         sd->cpu = cpu;
1000         r = -ENOMEM;
1001         sd->save_area = alloc_page(GFP_KERNEL);
1002         if (!sd->save_area)
1003                 goto err_1;
1004
1005         if (svm_sev_enabled()) {
1006                 r = -ENOMEM;
1007                 sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
1008                                               sizeof(void *),
1009                                               GFP_KERNEL);
1010                 if (!sd->sev_vmcbs)
1011                         goto err_1;
1012         }
1013
1014         per_cpu(svm_data, cpu) = sd;
1015
1016         return 0;
1017
1018 err_1:
1019         kfree(sd);
1020         return r;
1021
1022 }
1023
1024 static bool valid_msr_intercept(u32 index)
1025 {
1026         int i;
1027
1028         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
1029                 if (direct_access_msrs[i].index == index)
1030                         return true;
1031
1032         return false;
1033 }
1034
1035 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
1036 {
1037         u8 bit_write;
1038         unsigned long tmp;
1039         u32 offset;
1040         u32 *msrpm;
1041
1042         msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
1043                                       to_svm(vcpu)->msrpm;
1044
1045         offset    = svm_msrpm_offset(msr);
1046         bit_write = 2 * (msr & 0x0f) + 1;
1047         tmp       = msrpm[offset];
1048
1049         BUG_ON(offset == MSR_INVALID);
1050
1051         return !!test_bit(bit_write,  &tmp);
1052 }
1053
1054 static void set_msr_interception(u32 *msrpm, unsigned msr,
1055                                  int read, int write)
1056 {
1057         u8 bit_read, bit_write;
1058         unsigned long tmp;
1059         u32 offset;
1060
1061         /*
1062          * If this warning triggers extend the direct_access_msrs list at the
1063          * beginning of the file
1064          */
1065         WARN_ON(!valid_msr_intercept(msr));
1066
1067         offset    = svm_msrpm_offset(msr);
1068         bit_read  = 2 * (msr & 0x0f);
1069         bit_write = 2 * (msr & 0x0f) + 1;
1070         tmp       = msrpm[offset];
1071
1072         BUG_ON(offset == MSR_INVALID);
1073
1074         read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
1075         write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
1076
1077         msrpm[offset] = tmp;
1078 }
1079
1080 static void svm_vcpu_init_msrpm(u32 *msrpm)
1081 {
1082         int i;
1083
1084         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
1085
1086         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1087                 if (!direct_access_msrs[i].always)
1088                         continue;
1089
1090                 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
1091         }
1092 }
1093
1094 static void add_msr_offset(u32 offset)
1095 {
1096         int i;
1097
1098         for (i = 0; i < MSRPM_OFFSETS; ++i) {
1099
1100                 /* Offset already in list? */
1101                 if (msrpm_offsets[i] == offset)
1102                         return;
1103
1104                 /* Slot used by another offset? */
1105                 if (msrpm_offsets[i] != MSR_INVALID)
1106                         continue;
1107
1108                 /* Add offset to list */
1109                 msrpm_offsets[i] = offset;
1110
1111                 return;
1112         }
1113
1114         /*
1115          * If this BUG triggers the msrpm_offsets table has an overflow. Just
1116          * increase MSRPM_OFFSETS in this case.
1117          */
1118         BUG();
1119 }
1120
1121 static void init_msrpm_offsets(void)
1122 {
1123         int i;
1124
1125         memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
1126
1127         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1128                 u32 offset;
1129
1130                 offset = svm_msrpm_offset(direct_access_msrs[i].index);
1131                 BUG_ON(offset == MSR_INVALID);
1132
1133                 add_msr_offset(offset);
1134         }
1135 }
1136
1137 static void svm_enable_lbrv(struct vcpu_svm *svm)
1138 {
1139         u32 *msrpm = svm->msrpm;
1140
1141         svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
1142         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1143         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1144         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1145         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1146 }
1147
1148 static void svm_disable_lbrv(struct vcpu_svm *svm)
1149 {
1150         u32 *msrpm = svm->msrpm;
1151
1152         svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
1153         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
1154         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
1155         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
1156         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1157 }
1158
1159 static void disable_nmi_singlestep(struct vcpu_svm *svm)
1160 {
1161         svm->nmi_singlestep = false;
1162
1163         if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1164                 /* Clear our flags if they were not set by the guest */
1165                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1166                         svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1167                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1168                         svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1169         }
1170 }
1171
1172 /* Note:
1173  * This hash table is used to map VM_ID to a struct kvm_svm,
1174  * when handling AMD IOMMU GALOG notification to schedule in
1175  * a particular vCPU.
1176  */
1177 #define SVM_VM_DATA_HASH_BITS   8
1178 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
1179 static u32 next_vm_id = 0;
1180 static bool next_vm_id_wrapped = 0;
1181 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
1182
1183 /* Note:
1184  * This function is called from IOMMU driver to notify
1185  * SVM to schedule in a particular vCPU of a particular VM.
1186  */
1187 static int avic_ga_log_notifier(u32 ga_tag)
1188 {
1189         unsigned long flags;
1190         struct kvm_svm *kvm_svm;
1191         struct kvm_vcpu *vcpu = NULL;
1192         u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
1193         u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
1194
1195         pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
1196
1197         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1198         hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
1199                 if (kvm_svm->avic_vm_id != vm_id)
1200                         continue;
1201                 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
1202                 break;
1203         }
1204         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1205
1206         /* Note:
1207          * At this point, the IOMMU should have already set the pending
1208          * bit in the vAPIC backing page. So, we just need to schedule
1209          * in the vcpu.
1210          */
1211         if (vcpu)
1212                 kvm_vcpu_wake_up(vcpu);
1213
1214         return 0;
1215 }
1216
1217 static __init int sev_hardware_setup(void)
1218 {
1219         struct sev_user_data_status *status;
1220         int rc;
1221
1222         /* Maximum number of encrypted guests supported simultaneously */
1223         max_sev_asid = cpuid_ecx(0x8000001F);
1224
1225         if (!max_sev_asid)
1226                 return 1;
1227
1228         /* Minimum ASID value that should be used for SEV guest */
1229         min_sev_asid = cpuid_edx(0x8000001F);
1230
1231         /* Initialize SEV ASID bitmap */
1232         sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1233         if (!sev_asid_bitmap)
1234                 return 1;
1235
1236         status = kmalloc(sizeof(*status), GFP_KERNEL);
1237         if (!status)
1238                 return 1;
1239
1240         /*
1241          * Check SEV platform status.
1242          *
1243          * PLATFORM_STATUS can be called in any state, if we failed to query
1244          * the PLATFORM status then either PSP firmware does not support SEV
1245          * feature or SEV firmware is dead.
1246          */
1247         rc = sev_platform_status(status, NULL);
1248         if (rc)
1249                 goto err;
1250
1251         pr_info("SEV supported\n");
1252
1253 err:
1254         kfree(status);
1255         return rc;
1256 }
1257
1258 static void grow_ple_window(struct kvm_vcpu *vcpu)
1259 {
1260         struct vcpu_svm *svm = to_svm(vcpu);
1261         struct vmcb_control_area *control = &svm->vmcb->control;
1262         int old = control->pause_filter_count;
1263
1264         control->pause_filter_count = __grow_ple_window(old,
1265                                                         pause_filter_count,
1266                                                         pause_filter_count_grow,
1267                                                         pause_filter_count_max);
1268
1269         if (control->pause_filter_count != old) {
1270                 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1271                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1272                                             control->pause_filter_count, old);
1273         }
1274 }
1275
1276 static void shrink_ple_window(struct kvm_vcpu *vcpu)
1277 {
1278         struct vcpu_svm *svm = to_svm(vcpu);
1279         struct vmcb_control_area *control = &svm->vmcb->control;
1280         int old = control->pause_filter_count;
1281
1282         control->pause_filter_count =
1283                                 __shrink_ple_window(old,
1284                                                     pause_filter_count,
1285                                                     pause_filter_count_shrink,
1286                                                     pause_filter_count);
1287         if (control->pause_filter_count != old) {
1288                 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1289                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1290                                             control->pause_filter_count, old);
1291         }
1292 }
1293
1294 static __init int svm_hardware_setup(void)
1295 {
1296         int cpu;
1297         struct page *iopm_pages;
1298         void *iopm_va;
1299         int r;
1300
1301         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1302
1303         if (!iopm_pages)
1304                 return -ENOMEM;
1305
1306         iopm_va = page_address(iopm_pages);
1307         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
1308         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1309
1310         init_msrpm_offsets();
1311
1312         if (boot_cpu_has(X86_FEATURE_NX))
1313                 kvm_enable_efer_bits(EFER_NX);
1314
1315         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1316                 kvm_enable_efer_bits(EFER_FFXSR);
1317
1318         if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1319                 kvm_has_tsc_control = true;
1320                 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1321                 kvm_tsc_scaling_ratio_frac_bits = 32;
1322         }
1323
1324         /* Check for pause filtering support */
1325         if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1326                 pause_filter_count = 0;
1327                 pause_filter_thresh = 0;
1328         } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
1329                 pause_filter_thresh = 0;
1330         }
1331
1332         if (nested) {
1333                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
1334                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
1335         }
1336
1337         if (sev) {
1338                 if (boot_cpu_has(X86_FEATURE_SEV) &&
1339                     IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
1340                         r = sev_hardware_setup();
1341                         if (r)
1342                                 sev = false;
1343                 } else {
1344                         sev = false;
1345                 }
1346         }
1347
1348         for_each_possible_cpu(cpu) {
1349                 r = svm_cpu_init(cpu);
1350                 if (r)
1351                         goto err;
1352         }
1353
1354         if (!boot_cpu_has(X86_FEATURE_NPT))
1355                 npt_enabled = false;
1356
1357         if (npt_enabled && !npt) {
1358                 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1359                 npt_enabled = false;
1360         }
1361
1362         if (npt_enabled) {
1363                 printk(KERN_INFO "kvm: Nested Paging enabled\n");
1364                 kvm_enable_tdp();
1365         } else
1366                 kvm_disable_tdp();
1367
1368         if (nrips) {
1369                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
1370                         nrips = false;
1371         }
1372
1373         if (avic) {
1374                 if (!npt_enabled ||
1375                     !boot_cpu_has(X86_FEATURE_AVIC) ||
1376                     !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
1377                         avic = false;
1378                 } else {
1379                         pr_info("AVIC enabled\n");
1380
1381                         amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1382                 }
1383         }
1384
1385         if (vls) {
1386                 if (!npt_enabled ||
1387                     !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
1388                     !IS_ENABLED(CONFIG_X86_64)) {
1389                         vls = false;
1390                 } else {
1391                         pr_info("Virtual VMLOAD VMSAVE supported\n");
1392                 }
1393         }
1394
1395         if (vgif) {
1396                 if (!boot_cpu_has(X86_FEATURE_VGIF))
1397                         vgif = false;
1398                 else
1399                         pr_info("Virtual GIF supported\n");
1400         }
1401
1402         return 0;
1403
1404 err:
1405         __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1406         iopm_base = 0;
1407         return r;
1408 }
1409
1410 static __exit void svm_hardware_unsetup(void)
1411 {
1412         int cpu;
1413
1414         if (svm_sev_enabled())
1415                 bitmap_free(sev_asid_bitmap);
1416
1417         for_each_possible_cpu(cpu)
1418                 svm_cpu_uninit(cpu);
1419
1420         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
1421         iopm_base = 0;
1422 }
1423
1424 static void init_seg(struct vmcb_seg *seg)
1425 {
1426         seg->selector = 0;
1427         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1428                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
1429         seg->limit = 0xffff;
1430         seg->base = 0;
1431 }
1432
1433 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1434 {
1435         seg->selector = 0;
1436         seg->attrib = SVM_SELECTOR_P_MASK | type;
1437         seg->limit = 0xffff;
1438         seg->base = 0;
1439 }
1440
1441 static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
1442 {
1443         struct vcpu_svm *svm = to_svm(vcpu);
1444
1445         if (is_guest_mode(vcpu))
1446                 return svm->nested.hsave->control.tsc_offset;
1447
1448         return vcpu->arch.tsc_offset;
1449 }
1450
1451 static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1452 {
1453         struct vcpu_svm *svm = to_svm(vcpu);
1454         u64 g_tsc_offset = 0;
1455
1456         if (is_guest_mode(vcpu)) {
1457                 /* Write L1's TSC offset.  */
1458                 g_tsc_offset = svm->vmcb->control.tsc_offset -
1459                                svm->nested.hsave->control.tsc_offset;
1460                 svm->nested.hsave->control.tsc_offset = offset;
1461         }
1462
1463         trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1464                                    svm->vmcb->control.tsc_offset - g_tsc_offset,
1465                                    offset);
1466
1467         svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1468
1469         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1470         return svm->vmcb->control.tsc_offset;
1471 }
1472
1473 static void avic_init_vmcb(struct vcpu_svm *svm)
1474 {
1475         struct vmcb *vmcb = svm->vmcb;
1476         struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
1477         phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
1478         phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
1479         phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
1480
1481         vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1482         vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1483         vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1484         vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1485         vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
1486 }
1487
1488 static void init_vmcb(struct vcpu_svm *svm)
1489 {
1490         struct vmcb_control_area *control = &svm->vmcb->control;
1491         struct vmcb_save_area *save = &svm->vmcb->save;
1492
1493         svm->vcpu.arch.hflags = 0;
1494
1495         set_cr_intercept(svm, INTERCEPT_CR0_READ);
1496         set_cr_intercept(svm, INTERCEPT_CR3_READ);
1497         set_cr_intercept(svm, INTERCEPT_CR4_READ);
1498         set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1499         set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1500         set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1501         if (!kvm_vcpu_apicv_active(&svm->vcpu))
1502                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
1503
1504         set_dr_intercepts(svm);
1505
1506         set_exception_intercept(svm, PF_VECTOR);
1507         set_exception_intercept(svm, UD_VECTOR);
1508         set_exception_intercept(svm, MC_VECTOR);
1509         set_exception_intercept(svm, AC_VECTOR);
1510         set_exception_intercept(svm, DB_VECTOR);
1511         /*
1512          * Guest access to VMware backdoor ports could legitimately
1513          * trigger #GP because of TSS I/O permission bitmap.
1514          * We intercept those #GP and allow access to them anyway
1515          * as VMware does.
1516          */
1517         if (enable_vmware_backdoor)
1518                 set_exception_intercept(svm, GP_VECTOR);
1519
1520         set_intercept(svm, INTERCEPT_INTR);
1521         set_intercept(svm, INTERCEPT_NMI);
1522         set_intercept(svm, INTERCEPT_SMI);
1523         set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1524         set_intercept(svm, INTERCEPT_RDPMC);
1525         set_intercept(svm, INTERCEPT_CPUID);
1526         set_intercept(svm, INTERCEPT_INVD);
1527         set_intercept(svm, INTERCEPT_INVLPG);
1528         set_intercept(svm, INTERCEPT_INVLPGA);
1529         set_intercept(svm, INTERCEPT_IOIO_PROT);
1530         set_intercept(svm, INTERCEPT_MSR_PROT);
1531         set_intercept(svm, INTERCEPT_TASK_SWITCH);
1532         set_intercept(svm, INTERCEPT_SHUTDOWN);
1533         set_intercept(svm, INTERCEPT_VMRUN);
1534         set_intercept(svm, INTERCEPT_VMMCALL);
1535         set_intercept(svm, INTERCEPT_VMLOAD);
1536         set_intercept(svm, INTERCEPT_VMSAVE);
1537         set_intercept(svm, INTERCEPT_STGI);
1538         set_intercept(svm, INTERCEPT_CLGI);
1539         set_intercept(svm, INTERCEPT_SKINIT);
1540         set_intercept(svm, INTERCEPT_WBINVD);
1541         set_intercept(svm, INTERCEPT_XSETBV);
1542         set_intercept(svm, INTERCEPT_RSM);
1543
1544         if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
1545                 set_intercept(svm, INTERCEPT_MONITOR);
1546                 set_intercept(svm, INTERCEPT_MWAIT);
1547         }
1548
1549         if (!kvm_hlt_in_guest(svm->vcpu.kvm))
1550                 set_intercept(svm, INTERCEPT_HLT);
1551
1552         control->iopm_base_pa = __sme_set(iopm_base);
1553         control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1554         control->int_ctl = V_INTR_MASKING_MASK;
1555
1556         init_seg(&save->es);
1557         init_seg(&save->ss);
1558         init_seg(&save->ds);
1559         init_seg(&save->fs);
1560         init_seg(&save->gs);
1561
1562         save->cs.selector = 0xf000;
1563         save->cs.base = 0xffff0000;
1564         /* Executable/Readable Code Segment */
1565         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1566                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1567         save->cs.limit = 0xffff;
1568
1569         save->gdtr.limit = 0xffff;
1570         save->idtr.limit = 0xffff;
1571
1572         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1573         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1574
1575         svm_set_efer(&svm->vcpu, 0);
1576         save->dr6 = 0xffff0ff0;
1577         kvm_set_rflags(&svm->vcpu, 2);
1578         save->rip = 0x0000fff0;
1579         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1580
1581         /*
1582          * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1583          * It also updates the guest-visible cr0 value.
1584          */
1585         svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1586         kvm_mmu_reset_context(&svm->vcpu);
1587
1588         save->cr4 = X86_CR4_PAE;
1589         /* rdx = ?? */
1590
1591         if (npt_enabled) {
1592                 /* Setup VMCB for Nested Paging */
1593                 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1594                 clr_intercept(svm, INTERCEPT_INVLPG);
1595                 clr_exception_intercept(svm, PF_VECTOR);
1596                 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1597                 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1598                 save->g_pat = svm->vcpu.arch.pat;
1599                 save->cr3 = 0;
1600                 save->cr4 = 0;
1601         }
1602         svm->asid_generation = 0;
1603
1604         svm->nested.vmcb = 0;
1605         svm->vcpu.arch.hflags = 0;
1606
1607         if (pause_filter_count) {
1608                 control->pause_filter_count = pause_filter_count;
1609                 if (pause_filter_thresh)
1610                         control->pause_filter_thresh = pause_filter_thresh;
1611                 set_intercept(svm, INTERCEPT_PAUSE);
1612         } else {
1613                 clr_intercept(svm, INTERCEPT_PAUSE);
1614         }
1615
1616         if (kvm_vcpu_apicv_active(&svm->vcpu))
1617                 avic_init_vmcb(svm);
1618
1619         /*
1620          * If hardware supports Virtual VMLOAD VMSAVE then enable it
1621          * in VMCB and clear intercepts to avoid #VMEXIT.
1622          */
1623         if (vls) {
1624                 clr_intercept(svm, INTERCEPT_VMLOAD);
1625                 clr_intercept(svm, INTERCEPT_VMSAVE);
1626                 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1627         }
1628
1629         if (vgif) {
1630                 clr_intercept(svm, INTERCEPT_STGI);
1631                 clr_intercept(svm, INTERCEPT_CLGI);
1632                 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1633         }
1634
1635         if (sev_guest(svm->vcpu.kvm)) {
1636                 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1637                 clr_exception_intercept(svm, UD_VECTOR);
1638         }
1639
1640         mark_all_dirty(svm->vmcb);
1641
1642         enable_gif(svm);
1643
1644 }
1645
1646 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1647                                        unsigned int index)
1648 {
1649         u64 *avic_physical_id_table;
1650         struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
1651
1652         if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1653                 return NULL;
1654
1655         avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
1656
1657         return &avic_physical_id_table[index];
1658 }
1659
1660 /**
1661  * Note:
1662  * AVIC hardware walks the nested page table to check permissions,
1663  * but does not use the SPA address specified in the leaf page
1664  * table entry since it uses  address in the AVIC_BACKING_PAGE pointer
1665  * field of the VMCB. Therefore, we set up the
1666  * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1667  */
1668 static int avic_init_access_page(struct kvm_vcpu *vcpu)
1669 {
1670         struct kvm *kvm = vcpu->kvm;
1671         int ret = 0;
1672
1673         mutex_lock(&kvm->slots_lock);
1674         if (kvm->arch.apic_access_page_done)
1675                 goto out;
1676
1677         ret = __x86_set_memory_region(kvm,
1678                                       APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1679                                       APIC_DEFAULT_PHYS_BASE,
1680                                       PAGE_SIZE);
1681         if (ret)
1682                 goto out;
1683
1684         kvm->arch.apic_access_page_done = true;
1685 out:
1686         mutex_unlock(&kvm->slots_lock);
1687         return ret;
1688 }
1689
1690 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1691 {
1692         int ret;
1693         u64 *entry, new_entry;
1694         int id = vcpu->vcpu_id;
1695         struct vcpu_svm *svm = to_svm(vcpu);
1696
1697         ret = avic_init_access_page(vcpu);
1698         if (ret)
1699                 return ret;
1700
1701         if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1702                 return -EINVAL;
1703
1704         if (!svm->vcpu.arch.apic->regs)
1705                 return -EINVAL;
1706
1707         svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1708
1709         /* Setting AVIC backing page address in the phy APIC ID table */
1710         entry = avic_get_physical_id_entry(vcpu, id);
1711         if (!entry)
1712                 return -EINVAL;
1713
1714         new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1715                               AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1716                               AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
1717         WRITE_ONCE(*entry, new_entry);
1718
1719         svm->avic_physical_id_cache = entry;
1720
1721         return 0;
1722 }
1723
1724 static void __sev_asid_free(int asid)
1725 {
1726         struct svm_cpu_data *sd;
1727         int cpu, pos;
1728
1729         pos = asid - 1;
1730         clear_bit(pos, sev_asid_bitmap);
1731
1732         for_each_possible_cpu(cpu) {
1733                 sd = per_cpu(svm_data, cpu);
1734                 sd->sev_vmcbs[pos] = NULL;
1735         }
1736 }
1737
1738 static void sev_asid_free(struct kvm *kvm)
1739 {
1740         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1741
1742         __sev_asid_free(sev->asid);
1743 }
1744
1745 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
1746 {
1747         struct sev_data_decommission *decommission;
1748         struct sev_data_deactivate *data;
1749
1750         if (!handle)
1751                 return;
1752
1753         data = kzalloc(sizeof(*data), GFP_KERNEL);
1754         if (!data)
1755                 return;
1756
1757         /* deactivate handle */
1758         data->handle = handle;
1759         sev_guest_deactivate(data, NULL);
1760
1761         wbinvd_on_all_cpus();
1762         sev_guest_df_flush(NULL);
1763         kfree(data);
1764
1765         decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
1766         if (!decommission)
1767                 return;
1768
1769         /* decommission handle */
1770         decommission->handle = handle;
1771         sev_guest_decommission(decommission, NULL);
1772
1773         kfree(decommission);
1774 }
1775
1776 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
1777                                     unsigned long ulen, unsigned long *n,
1778                                     int write)
1779 {
1780         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1781         unsigned long npages, npinned, size;
1782         unsigned long locked, lock_limit;
1783         struct page **pages;
1784         unsigned long first, last;
1785
1786         if (ulen == 0 || uaddr + ulen < uaddr)
1787                 return NULL;
1788
1789         /* Calculate number of pages. */
1790         first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
1791         last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
1792         npages = (last - first + 1);
1793
1794         locked = sev->pages_locked + npages;
1795         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1796         if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
1797                 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
1798                 return NULL;
1799         }
1800
1801         /* Avoid using vmalloc for smaller buffers. */
1802         size = npages * sizeof(struct page *);
1803         if (size > PAGE_SIZE)
1804                 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
1805                                   PAGE_KERNEL);
1806         else
1807                 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
1808
1809         if (!pages)
1810                 return NULL;
1811
1812         /* Pin the user virtual address. */
1813         npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
1814         if (npinned != npages) {
1815                 pr_err("SEV: Failure locking %lu pages.\n", npages);
1816                 goto err;
1817         }
1818
1819         *n = npages;
1820         sev->pages_locked = locked;
1821
1822         return pages;
1823
1824 err:
1825         if (npinned > 0)
1826                 release_pages(pages, npinned);
1827
1828         kvfree(pages);
1829         return NULL;
1830 }
1831
1832 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
1833                              unsigned long npages)
1834 {
1835         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1836
1837         release_pages(pages, npages);
1838         kvfree(pages);
1839         sev->pages_locked -= npages;
1840 }
1841
1842 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
1843 {
1844         uint8_t *page_virtual;
1845         unsigned long i;
1846
1847         if (npages == 0 || pages == NULL)
1848                 return;
1849
1850         for (i = 0; i < npages; i++) {
1851                 page_virtual = kmap_atomic(pages[i]);
1852                 clflush_cache_range(page_virtual, PAGE_SIZE);
1853                 kunmap_atomic(page_virtual);
1854         }
1855 }
1856
1857 static void __unregister_enc_region_locked(struct kvm *kvm,
1858                                            struct enc_region *region)
1859 {
1860         /*
1861          * The guest may change the memory encryption attribute from C=0 -> C=1
1862          * or vice versa for this memory range. Lets make sure caches are
1863          * flushed to ensure that guest data gets written into memory with
1864          * correct C-bit.
1865          */
1866         sev_clflush_pages(region->pages, region->npages);
1867
1868         sev_unpin_memory(kvm, region->pages, region->npages);
1869         list_del(&region->list);
1870         kfree(region);
1871 }
1872
1873 static struct kvm *svm_vm_alloc(void)
1874 {
1875         struct kvm_svm *kvm_svm = __vmalloc(sizeof(struct kvm_svm),
1876                                             GFP_KERNEL_ACCOUNT | __GFP_ZERO,
1877                                             PAGE_KERNEL);
1878         return &kvm_svm->kvm;
1879 }
1880
1881 static void svm_vm_free(struct kvm *kvm)
1882 {
1883         vfree(to_kvm_svm(kvm));
1884 }
1885
1886 static void sev_vm_destroy(struct kvm *kvm)
1887 {
1888         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1889         struct list_head *head = &sev->regions_list;
1890         struct list_head *pos, *q;
1891
1892         if (!sev_guest(kvm))
1893                 return;
1894
1895         mutex_lock(&kvm->lock);
1896
1897         /*
1898          * if userspace was terminated before unregistering the memory regions
1899          * then lets unpin all the registered memory.
1900          */
1901         if (!list_empty(head)) {
1902                 list_for_each_safe(pos, q, head) {
1903                         __unregister_enc_region_locked(kvm,
1904                                 list_entry(pos, struct enc_region, list));
1905                 }
1906         }
1907
1908         mutex_unlock(&kvm->lock);
1909
1910         sev_unbind_asid(kvm, sev->handle);
1911         sev_asid_free(kvm);
1912 }
1913
1914 static void avic_vm_destroy(struct kvm *kvm)
1915 {
1916         unsigned long flags;
1917         struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
1918
1919         if (!avic)
1920                 return;
1921
1922         if (kvm_svm->avic_logical_id_table_page)
1923                 __free_page(kvm_svm->avic_logical_id_table_page);
1924         if (kvm_svm->avic_physical_id_table_page)
1925                 __free_page(kvm_svm->avic_physical_id_table_page);
1926
1927         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1928         hash_del(&kvm_svm->hnode);
1929         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1930 }
1931
1932 static void svm_vm_destroy(struct kvm *kvm)
1933 {
1934         avic_vm_destroy(kvm);
1935         sev_vm_destroy(kvm);
1936 }
1937
1938 static int avic_vm_init(struct kvm *kvm)
1939 {
1940         unsigned long flags;
1941         int err = -ENOMEM;
1942         struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
1943         struct kvm_svm *k2;
1944         struct page *p_page;
1945         struct page *l_page;
1946         u32 vm_id;
1947
1948         if (!avic)
1949                 return 0;
1950
1951         /* Allocating physical APIC ID table (4KB) */
1952         p_page = alloc_page(GFP_KERNEL_ACCOUNT);
1953         if (!p_page)
1954                 goto free_avic;
1955
1956         kvm_svm->avic_physical_id_table_page = p_page;
1957         clear_page(page_address(p_page));
1958
1959         /* Allocating logical APIC ID table (4KB) */
1960         l_page = alloc_page(GFP_KERNEL_ACCOUNT);
1961         if (!l_page)
1962                 goto free_avic;
1963
1964         kvm_svm->avic_logical_id_table_page = l_page;
1965         clear_page(page_address(l_page));
1966
1967         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1968  again:
1969         vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
1970         if (vm_id == 0) { /* id is 1-based, zero is not okay */
1971                 next_vm_id_wrapped = 1;
1972                 goto again;
1973         }
1974         /* Is it still in use? Only possible if wrapped at least once */
1975         if (next_vm_id_wrapped) {
1976                 hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
1977                         if (k2->avic_vm_id == vm_id)
1978                                 goto again;
1979                 }
1980         }
1981         kvm_svm->avic_vm_id = vm_id;
1982         hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
1983         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1984
1985         return 0;
1986
1987 free_avic:
1988         avic_vm_destroy(kvm);
1989         return err;
1990 }
1991
1992 static inline int
1993 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
1994 {
1995         int ret = 0;
1996         unsigned long flags;
1997         struct amd_svm_iommu_ir *ir;
1998         struct vcpu_svm *svm = to_svm(vcpu);
1999
2000         if (!kvm_arch_has_assigned_device(vcpu->kvm))
2001                 return 0;
2002
2003         /*
2004          * Here, we go through the per-vcpu ir_list to update all existing
2005          * interrupt remapping table entry targeting this vcpu.
2006          */
2007         spin_lock_irqsave(&svm->ir_list_lock, flags);
2008
2009         if (list_empty(&svm->ir_list))
2010                 goto out;
2011
2012         list_for_each_entry(ir, &svm->ir_list, node) {
2013                 ret = amd_iommu_update_ga(cpu, r, ir->data);
2014                 if (ret)
2015                         break;
2016         }
2017 out:
2018         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
2019         return ret;
2020 }
2021
2022 static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2023 {
2024         u64 entry;
2025         /* ID = 0xff (broadcast), ID > 0xff (reserved) */
2026         int h_physical_id = kvm_cpu_get_apicid(cpu);
2027         struct vcpu_svm *svm = to_svm(vcpu);
2028
2029         if (!kvm_vcpu_apicv_active(vcpu))
2030                 return;
2031
2032         /*
2033          * Since the host physical APIC id is 8 bits,
2034          * we can support host APIC ID upto 255.
2035          */
2036         if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
2037                 return;
2038
2039         entry = READ_ONCE(*(svm->avic_physical_id_cache));
2040         WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
2041
2042         entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
2043         entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
2044
2045         entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2046         if (svm->avic_is_running)
2047                 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2048
2049         WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
2050         avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
2051                                         svm->avic_is_running);
2052 }
2053
2054 static void avic_vcpu_put(struct kvm_vcpu *vcpu)
2055 {
2056         u64 entry;
2057         struct vcpu_svm *svm = to_svm(vcpu);
2058
2059         if (!kvm_vcpu_apicv_active(vcpu))
2060                 return;
2061
2062         entry = READ_ONCE(*(svm->avic_physical_id_cache));
2063         if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
2064                 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
2065
2066         entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2067         WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
2068 }
2069
2070 /**
2071  * This function is called during VCPU halt/unhalt.
2072  */
2073 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
2074 {
2075         struct vcpu_svm *svm = to_svm(vcpu);
2076
2077         svm->avic_is_running = is_run;
2078         if (is_run)
2079                 avic_vcpu_load(vcpu, vcpu->cpu);
2080         else
2081                 avic_vcpu_put(vcpu);
2082 }
2083
2084 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
2085 {
2086         struct vcpu_svm *svm = to_svm(vcpu);
2087         u32 dummy;
2088         u32 eax = 1;
2089
2090         vcpu->arch.microcode_version = 0x01000065;
2091         svm->spec_ctrl = 0;
2092         svm->virt_spec_ctrl = 0;
2093
2094         if (!init_event) {
2095                 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
2096                                            MSR_IA32_APICBASE_ENABLE;
2097                 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
2098                         svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
2099         }
2100         init_vmcb(svm);
2101
2102         kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
2103         kvm_rdx_write(vcpu, eax);
2104
2105         if (kvm_vcpu_apicv_active(vcpu) && !init_event)
2106                 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
2107 }
2108
2109 static int avic_init_vcpu(struct vcpu_svm *svm)
2110 {
2111         int ret;
2112
2113         if (!kvm_vcpu_apicv_active(&svm->vcpu))
2114                 return 0;
2115
2116         ret = avic_init_backing_page(&svm->vcpu);
2117         if (ret)
2118                 return ret;
2119
2120         INIT_LIST_HEAD(&svm->ir_list);
2121         spin_lock_init(&svm->ir_list_lock);
2122         svm->dfr_reg = APIC_DFR_FLAT;
2123
2124         return ret;
2125 }
2126
2127 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
2128 {
2129         struct vcpu_svm *svm;
2130         struct page *page;
2131         struct page *msrpm_pages;
2132         struct page *hsave_page;
2133         struct page *nested_msrpm_pages;
2134         int err;
2135
2136         BUILD_BUG_ON_MSG(offsetof(struct vcpu_svm, vcpu) != 0,
2137                 "struct kvm_vcpu must be at offset 0 for arch usercopy region");
2138
2139         svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
2140         if (!svm) {
2141                 err = -ENOMEM;
2142                 goto out;
2143         }
2144
2145         svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
2146                                                      GFP_KERNEL_ACCOUNT);
2147         if (!svm->vcpu.arch.user_fpu) {
2148                 printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
2149                 err = -ENOMEM;
2150                 goto free_partial_svm;
2151         }
2152
2153         svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
2154                                                      GFP_KERNEL_ACCOUNT);
2155         if (!svm->vcpu.arch.guest_fpu) {
2156                 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
2157                 err = -ENOMEM;
2158                 goto free_user_fpu;
2159         }
2160
2161         err = kvm_vcpu_init(&svm->vcpu, kvm, id);
2162         if (err)
2163                 goto free_svm;
2164
2165         err = -ENOMEM;
2166         page = alloc_page(GFP_KERNEL_ACCOUNT);
2167         if (!page)
2168                 goto uninit;
2169
2170         msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
2171         if (!msrpm_pages)
2172                 goto free_page1;
2173
2174         nested_msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
2175         if (!nested_msrpm_pages)
2176                 goto free_page2;
2177
2178         hsave_page = alloc_page(GFP_KERNEL_ACCOUNT);
2179         if (!hsave_page)
2180                 goto free_page3;
2181
2182         err = avic_init_vcpu(svm);
2183         if (err)
2184                 goto free_page4;
2185
2186         /* We initialize this flag to true to make sure that the is_running
2187          * bit would be set the first time the vcpu is loaded.
2188          */
2189         svm->avic_is_running = true;
2190
2191         svm->nested.hsave = page_address(hsave_page);
2192
2193         svm->msrpm = page_address(msrpm_pages);
2194         svm_vcpu_init_msrpm(svm->msrpm);
2195
2196         svm->nested.msrpm = page_address(nested_msrpm_pages);
2197         svm_vcpu_init_msrpm(svm->nested.msrpm);
2198
2199         svm->vmcb = page_address(page);
2200         clear_page(svm->vmcb);
2201         svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
2202         svm->asid_generation = 0;
2203         init_vmcb(svm);
2204
2205         svm_init_osvw(&svm->vcpu);
2206
2207         return &svm->vcpu;
2208
2209 free_page4:
2210         __free_page(hsave_page);
2211 free_page3:
2212         __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
2213 free_page2:
2214         __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
2215 free_page1:
2216         __free_page(page);
2217 uninit:
2218         kvm_vcpu_uninit(&svm->vcpu);
2219 free_svm:
2220         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2221 free_user_fpu:
2222         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
2223 free_partial_svm:
2224         kmem_cache_free(kvm_vcpu_cache, svm);
2225 out:
2226         return ERR_PTR(err);
2227 }
2228
2229 static void svm_clear_current_vmcb(struct vmcb *vmcb)
2230 {
2231         int i;
2232
2233         for_each_online_cpu(i)
2234                 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
2235 }
2236
2237 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
2238 {
2239         struct vcpu_svm *svm = to_svm(vcpu);
2240
2241         /*
2242          * The vmcb page can be recycled, causing a false negative in
2243          * svm_vcpu_load(). So, ensure that no logical CPU has this
2244          * vmcb page recorded as its current vmcb.
2245          */
2246         svm_clear_current_vmcb(svm->vmcb);
2247
2248         __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
2249         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
2250         __free_page(virt_to_page(svm->nested.hsave));
2251         __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
2252         kvm_vcpu_uninit(vcpu);
2253         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
2254         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2255         kmem_cache_free(kvm_vcpu_cache, svm);
2256 }
2257
2258 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2259 {
2260         struct vcpu_svm *svm = to_svm(vcpu);
2261         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2262         int i;
2263
2264         if (unlikely(cpu != vcpu->cpu)) {
2265                 svm->asid_generation = 0;
2266                 mark_all_dirty(svm->vmcb);
2267         }
2268
2269 #ifdef CONFIG_X86_64
2270         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
2271 #endif
2272         savesegment(fs, svm->host.fs);
2273         savesegment(gs, svm->host.gs);
2274         svm->host.ldt = kvm_read_ldt();
2275
2276         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
2277                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
2278
2279         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
2280                 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
2281                 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
2282                         __this_cpu_write(current_tsc_ratio, tsc_ratio);
2283                         wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
2284                 }
2285         }
2286         /* This assumes that the kernel never uses MSR_TSC_AUX */
2287         if (static_cpu_has(X86_FEATURE_RDTSCP))
2288                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
2289
2290         if (sd->current_vmcb != svm->vmcb) {
2291                 sd->current_vmcb = svm->vmcb;
2292                 indirect_branch_prediction_barrier();
2293         }
2294         avic_vcpu_load(vcpu, cpu);
2295 }
2296
2297 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
2298 {
2299         struct vcpu_svm *svm = to_svm(vcpu);
2300         int i;
2301
2302         avic_vcpu_put(vcpu);
2303
2304         ++vcpu->stat.host_state_reload;
2305         kvm_load_ldt(svm->host.ldt);
2306 #ifdef CONFIG_X86_64
2307         loadsegment(fs, svm->host.fs);
2308         wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
2309         load_gs_index(svm->host.gs);
2310 #else
2311 #ifdef CONFIG_X86_32_LAZY_GS
2312         loadsegment(gs, svm->host.gs);
2313 #endif
2314 #endif
2315         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
2316                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
2317 }
2318
2319 static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
2320 {
2321         avic_set_running(vcpu, false);
2322 }
2323
2324 static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
2325 {
2326         avic_set_running(vcpu, true);
2327 }
2328
2329 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
2330 {
2331         struct vcpu_svm *svm = to_svm(vcpu);
2332         unsigned long rflags = svm->vmcb->save.rflags;
2333
2334         if (svm->nmi_singlestep) {
2335                 /* Hide our flags if they were not set by the guest */
2336                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
2337                         rflags &= ~X86_EFLAGS_TF;
2338                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
2339                         rflags &= ~X86_EFLAGS_RF;
2340         }
2341         return rflags;
2342 }
2343
2344 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2345 {
2346         if (to_svm(vcpu)->nmi_singlestep)
2347                 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2348
2349        /*
2350         * Any change of EFLAGS.VM is accompanied by a reload of SS
2351         * (caused by either a task switch or an inter-privilege IRET),
2352         * so we do not need to update the CPL here.
2353         */
2354         to_svm(vcpu)->vmcb->save.rflags = rflags;
2355 }
2356
2357 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2358 {
2359         switch (reg) {
2360         case VCPU_EXREG_PDPTR:
2361                 BUG_ON(!npt_enabled);
2362                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
2363                 break;
2364         default:
2365                 BUG();
2366         }
2367 }
2368
2369 static void svm_set_vintr(struct vcpu_svm *svm)
2370 {
2371         set_intercept(svm, INTERCEPT_VINTR);
2372 }
2373
2374 static void svm_clear_vintr(struct vcpu_svm *svm)
2375 {
2376         clr_intercept(svm, INTERCEPT_VINTR);
2377 }
2378
2379 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
2380 {
2381         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2382
2383         switch (seg) {
2384         case VCPU_SREG_CS: return &save->cs;
2385         case VCPU_SREG_DS: return &save->ds;
2386         case VCPU_SREG_ES: return &save->es;
2387         case VCPU_SREG_FS: return &save->fs;
2388         case VCPU_SREG_GS: return &save->gs;
2389         case VCPU_SREG_SS: return &save->ss;
2390         case VCPU_SREG_TR: return &save->tr;
2391         case VCPU_SREG_LDTR: return &save->ldtr;
2392         }
2393         BUG();
2394         return NULL;
2395 }
2396
2397 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2398 {
2399         struct vmcb_seg *s = svm_seg(vcpu, seg);
2400
2401         return s->base;
2402 }
2403
2404 static void svm_get_segment(struct kvm_vcpu *vcpu,
2405                             struct kvm_segment *var, int seg)
2406 {
2407         struct vmcb_seg *s = svm_seg(vcpu, seg);
2408
2409         var->base = s->base;
2410         var->limit = s->limit;
2411         var->selector = s->selector;
2412         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
2413         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
2414         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
2415         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
2416         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
2417         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
2418         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
2419
2420         /*
2421          * AMD CPUs circa 2014 track the G bit for all segments except CS.
2422          * However, the SVM spec states that the G bit is not observed by the
2423          * CPU, and some VMware virtual CPUs drop the G bit for all segments.
2424          * So let's synthesize a legal G bit for all segments, this helps
2425          * running KVM nested. It also helps cross-vendor migration, because
2426          * Intel's vmentry has a check on the 'G' bit.
2427          */
2428         var->g = s->limit > 0xfffff;
2429
2430         /*
2431          * AMD's VMCB does not have an explicit unusable field, so emulate it
2432          * for cross vendor migration purposes by "not present"
2433          */
2434         var->unusable = !var->present;
2435
2436         switch (seg) {
2437         case VCPU_SREG_TR:
2438                 /*
2439                  * Work around a bug where the busy flag in the tr selector
2440                  * isn't exposed
2441                  */
2442                 var->type |= 0x2;
2443                 break;
2444         case VCPU_SREG_DS:
2445         case VCPU_SREG_ES:
2446         case VCPU_SREG_FS:
2447         case VCPU_SREG_GS:
2448                 /*
2449                  * The accessed bit must always be set in the segment
2450                  * descriptor cache, although it can be cleared in the
2451                  * descriptor, the cached bit always remains at 1. Since
2452                  * Intel has a check on this, set it here to support
2453                  * cross-vendor migration.
2454                  */
2455                 if (!var->unusable)
2456                         var->type |= 0x1;
2457                 break;
2458         case VCPU_SREG_SS:
2459                 /*
2460                  * On AMD CPUs sometimes the DB bit in the segment
2461                  * descriptor is left as 1, although the whole segment has
2462                  * been made unusable. Clear it here to pass an Intel VMX
2463                  * entry check when cross vendor migrating.
2464                  */
2465                 if (var->unusable)
2466                         var->db = 0;
2467                 /* This is symmetric with svm_set_segment() */
2468                 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
2469                 break;
2470         }
2471 }
2472
2473 static int svm_get_cpl(struct kvm_vcpu *vcpu)
2474 {
2475         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2476
2477         return save->cpl;
2478 }
2479
2480 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2481 {
2482         struct vcpu_svm *svm = to_svm(vcpu);
2483
2484         dt->size = svm->vmcb->save.idtr.limit;
2485         dt->address = svm->vmcb->save.idtr.base;
2486 }
2487
2488 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2489 {
2490         struct vcpu_svm *svm = to_svm(vcpu);
2491
2492         svm->vmcb->save.idtr.limit = dt->size;
2493         svm->vmcb->save.idtr.base = dt->address ;
2494         mark_dirty(svm->vmcb, VMCB_DT);
2495 }
2496
2497 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2498 {
2499         struct vcpu_svm *svm = to_svm(vcpu);
2500
2501         dt->size = svm->vmcb->save.gdtr.limit;
2502         dt->address = svm->vmcb->save.gdtr.base;
2503 }
2504
2505 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2506 {
2507         struct vcpu_svm *svm = to_svm(vcpu);
2508
2509         svm->vmcb->save.gdtr.limit = dt->size;
2510         svm->vmcb->save.gdtr.base = dt->address ;
2511         mark_dirty(svm->vmcb, VMCB_DT);
2512 }
2513
2514 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2515 {
2516 }
2517
2518 static void svm_decache_cr3(struct kvm_vcpu *vcpu)
2519 {
2520 }
2521
2522 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
2523 {
2524 }
2525
2526 static void update_cr0_intercept(struct vcpu_svm *svm)
2527 {
2528         ulong gcr0 = svm->vcpu.arch.cr0;
2529         u64 *hcr0 = &svm->vmcb->save.cr0;
2530
2531         *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
2532                 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
2533
2534         mark_dirty(svm->vmcb, VMCB_CR);
2535
2536         if (gcr0 == *hcr0) {
2537                 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
2538                 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
2539         } else {
2540                 set_cr_intercept(svm, INTERCEPT_CR0_READ);
2541                 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
2542         }
2543 }
2544
2545 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2546 {
2547         struct vcpu_svm *svm = to_svm(vcpu);
2548
2549 #ifdef CONFIG_X86_64
2550         if (vcpu->arch.efer & EFER_LME) {
2551                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
2552                         vcpu->arch.efer |= EFER_LMA;
2553                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
2554                 }
2555
2556                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
2557                         vcpu->arch.efer &= ~EFER_LMA;
2558                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
2559                 }
2560         }
2561 #endif
2562         vcpu->arch.cr0 = cr0;
2563
2564         if (!npt_enabled)
2565                 cr0 |= X86_CR0_PG | X86_CR0_WP;
2566
2567         /*
2568          * re-enable caching here because the QEMU bios
2569          * does not do it - this results in some delay at
2570          * reboot
2571          */
2572         if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
2573                 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
2574         svm->vmcb->save.cr0 = cr0;
2575         mark_dirty(svm->vmcb, VMCB_CR);
2576         update_cr0_intercept(svm);
2577 }
2578
2579 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2580 {
2581         unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
2582         unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
2583
2584         if (cr4 & X86_CR4_VMXE)
2585                 return 1;
2586
2587         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
2588                 svm_flush_tlb(vcpu, true);
2589
2590         vcpu->arch.cr4 = cr4;
2591         if (!npt_enabled)
2592                 cr4 |= X86_CR4_PAE;
2593         cr4 |= host_cr4_mce;
2594         to_svm(vcpu)->vmcb->save.cr4 = cr4;
2595         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
2596         return 0;
2597 }
2598
2599 static void svm_set_segment(struct kvm_vcpu *vcpu,
2600                             struct kvm_segment *var, int seg)
2601 {
2602         struct vcpu_svm *svm = to_svm(vcpu);
2603         struct vmcb_seg *s = svm_seg(vcpu, seg);
2604
2605         s->base = var->base;
2606         s->limit = var->limit;
2607         s->selector = var->selector;
2608         s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
2609         s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
2610         s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
2611         s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
2612         s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
2613         s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
2614         s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
2615         s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
2616
2617         /*
2618          * This is always accurate, except if SYSRET returned to a segment
2619          * with SS.DPL != 3.  Intel does not have this quirk, and always
2620          * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
2621          * would entail passing the CPL to userspace and back.
2622          */
2623         if (seg == VCPU_SREG_SS)
2624                 /* This is symmetric with svm_get_segment() */
2625                 svm->vmcb->save.cpl = (var->dpl & 3);
2626
2627         mark_dirty(svm->vmcb, VMCB_SEG);
2628 }
2629
2630 static void update_bp_intercept(struct kvm_vcpu *vcpu)
2631 {
2632         struct vcpu_svm *svm = to_svm(vcpu);
2633
2634         clr_exception_intercept(svm, BP_VECTOR);
2635
2636         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
2637                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2638                         set_exception_intercept(svm, BP_VECTOR);
2639         } else
2640                 vcpu->guest_debug = 0;
2641 }
2642
2643 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
2644 {
2645         if (sd->next_asid > sd->max_asid) {
2646                 ++sd->asid_generation;
2647                 sd->next_asid = sd->min_asid;
2648                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
2649         }
2650
2651         svm->asid_generation = sd->asid_generation;
2652         svm->vmcb->control.asid = sd->next_asid++;
2653
2654         mark_dirty(svm->vmcb, VMCB_ASID);
2655 }
2656
2657 static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2658 {
2659         return to_svm(vcpu)->vmcb->save.dr6;
2660 }
2661
2662 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2663 {
2664         struct vcpu_svm *svm = to_svm(vcpu);
2665
2666         svm->vmcb->save.dr6 = value;
2667         mark_dirty(svm->vmcb, VMCB_DR);
2668 }
2669
2670 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2671 {
2672         struct vcpu_svm *svm = to_svm(vcpu);
2673
2674         get_debugreg(vcpu->arch.db[0], 0);
2675         get_debugreg(vcpu->arch.db[1], 1);
2676         get_debugreg(vcpu->arch.db[2], 2);
2677         get_debugreg(vcpu->arch.db[3], 3);
2678         vcpu->arch.dr6 = svm_get_dr6(vcpu);
2679         vcpu->arch.dr7 = svm->vmcb->save.dr7;
2680
2681         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2682         set_dr_intercepts(svm);
2683 }
2684
2685 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
2686 {
2687         struct vcpu_svm *svm = to_svm(vcpu);
2688
2689         svm->vmcb->save.dr7 = value;
2690         mark_dirty(svm->vmcb, VMCB_DR);
2691 }
2692
2693 static int pf_interception(struct vcpu_svm *svm)
2694 {
2695         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
2696         u64 error_code = svm->vmcb->control.exit_info_1;
2697
2698         return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
2699                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2700                         svm->vmcb->control.insn_bytes : NULL,
2701                         svm->vmcb->control.insn_len);
2702 }
2703
2704 static int npf_interception(struct vcpu_svm *svm)
2705 {
2706         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
2707         u64 error_code = svm->vmcb->control.exit_info_1;
2708
2709         trace_kvm_page_fault(fault_address, error_code);
2710         return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
2711                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2712                         svm->vmcb->control.insn_bytes : NULL,
2713                         svm->vmcb->control.insn_len);
2714 }
2715
2716 static int db_interception(struct vcpu_svm *svm)
2717 {
2718         struct kvm_run *kvm_run = svm->vcpu.run;
2719         struct kvm_vcpu *vcpu = &svm->vcpu;
2720
2721         if (!(svm->vcpu.guest_debug &
2722               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2723                 !svm->nmi_singlestep) {
2724                 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2725                 return 1;
2726         }
2727
2728         if (svm->nmi_singlestep) {
2729                 disable_nmi_singlestep(svm);
2730                 /* Make sure we check for pending NMIs upon entry */
2731                 kvm_make_request(KVM_REQ_EVENT, vcpu);
2732         }
2733
2734         if (svm->vcpu.guest_debug &
2735             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2736                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2737                 kvm_run->debug.arch.pc =
2738                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2739                 kvm_run->debug.arch.exception = DB_VECTOR;
2740                 return 0;
2741         }
2742
2743         return 1;
2744 }
2745
2746 static int bp_interception(struct vcpu_svm *svm)
2747 {
2748         struct kvm_run *kvm_run = svm->vcpu.run;
2749
2750         kvm_run->exit_reason = KVM_EXIT_DEBUG;
2751         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2752         kvm_run->debug.arch.exception = BP_VECTOR;
2753         return 0;
2754 }
2755
2756 static int ud_interception(struct vcpu_svm *svm)
2757 {
2758         return handle_ud(&svm->vcpu);
2759 }
2760
2761 static int ac_interception(struct vcpu_svm *svm)
2762 {
2763         kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2764         return 1;
2765 }
2766
2767 static int gp_interception(struct vcpu_svm *svm)
2768 {
2769         struct kvm_vcpu *vcpu = &svm->vcpu;
2770         u32 error_code = svm->vmcb->control.exit_info_1;
2771
2772         WARN_ON_ONCE(!enable_vmware_backdoor);
2773
2774         /*
2775          * VMware backdoor emulation on #GP interception only handles IN{S},
2776          * OUT{S}, and RDPMC, none of which generate a non-zero error code.
2777          */
2778         if (error_code) {
2779                 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2780                 return 1;
2781         }
2782         return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP) !=
2783                                                 EMULATE_USER_EXIT;
2784 }
2785
2786 static bool is_erratum_383(void)
2787 {
2788         int err, i;
2789         u64 value;
2790
2791         if (!erratum_383_found)
2792                 return false;
2793
2794         value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2795         if (err)
2796                 return false;
2797
2798         /* Bit 62 may or may not be set for this mce */
2799         value &= ~(1ULL << 62);
2800
2801         if (value != 0xb600000000010015ULL)
2802                 return false;
2803
2804         /* Clear MCi_STATUS registers */
2805         for (i = 0; i < 6; ++i)
2806                 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2807
2808         value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2809         if (!err) {
2810                 u32 low, high;
2811
2812                 value &= ~(1ULL << 2);
2813                 low    = lower_32_bits(value);
2814                 high   = upper_32_bits(value);
2815
2816                 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2817         }
2818
2819         /* Flush tlb to evict multi-match entries */
2820         __flush_tlb_all();
2821
2822         return true;
2823 }
2824
2825 static void svm_handle_mce(struct vcpu_svm *svm)
2826 {
2827         if (is_erratum_383()) {
2828                 /*
2829                  * Erratum 383 triggered. Guest state is corrupt so kill the
2830                  * guest.
2831                  */
2832                 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2833
2834                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
2835
2836                 return;
2837         }
2838
2839         /*
2840          * On an #MC intercept the MCE handler is not called automatically in
2841          * the host. So do it by hand here.
2842          */
2843         asm volatile (
2844                 "int $0x12\n");
2845         /* not sure if we ever come back to this point */
2846
2847         return;
2848 }
2849
2850 static int mc_interception(struct vcpu_svm *svm)
2851 {
2852         return 1;
2853 }
2854
2855 static int shutdown_interception(struct vcpu_svm *svm)
2856 {
2857         struct kvm_run *kvm_run = svm->vcpu.run;
2858
2859         /*
2860          * VMCB is undefined after a SHUTDOWN intercept
2861          * so reinitialize it.
2862          */
2863         clear_page(svm->vmcb);
2864         init_vmcb(svm);
2865
2866         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2867         return 0;
2868 }
2869
2870 static int io_interception(struct vcpu_svm *svm)
2871 {
2872         struct kvm_vcpu *vcpu = &svm->vcpu;
2873         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2874         int size, in, string;
2875         unsigned port;
2876
2877         ++svm->vcpu.stat.io_exits;
2878         string = (io_info & SVM_IOIO_STR_MASK) != 0;
2879         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2880         if (string)
2881                 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
2882
2883         port = io_info >> 16;
2884         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2885         svm->next_rip = svm->vmcb->control.exit_info_2;
2886
2887         return kvm_fast_pio(&svm->vcpu, size, port, in);
2888 }
2889
2890 static int nmi_interception(struct vcpu_svm *svm)
2891 {
2892         return 1;
2893 }
2894
2895 static int intr_interception(struct vcpu_svm *svm)
2896 {
2897         ++svm->vcpu.stat.irq_exits;
2898         return 1;
2899 }
2900
2901 static int nop_on_interception(struct vcpu_svm *svm)
2902 {
2903         return 1;
2904 }
2905
2906 static int halt_interception(struct vcpu_svm *svm)
2907 {
2908         return kvm_emulate_halt(&svm->vcpu);
2909 }
2910
2911 static int vmmcall_interception(struct vcpu_svm *svm)
2912 {
2913         return kvm_emulate_hypercall(&svm->vcpu);
2914 }
2915
2916 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2917 {
2918         struct vcpu_svm *svm = to_svm(vcpu);
2919
2920         return svm->nested.nested_cr3;
2921 }
2922
2923 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2924 {
2925         struct vcpu_svm *svm = to_svm(vcpu);
2926         u64 cr3 = svm->nested.nested_cr3;
2927         u64 pdpte;
2928         int ret;
2929
2930         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
2931                                        offset_in_page(cr3) + index * 8, 8);
2932         if (ret)
2933                 return 0;
2934         return pdpte;
2935 }
2936
2937 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2938                                    unsigned long root)
2939 {
2940         struct vcpu_svm *svm = to_svm(vcpu);
2941
2942         svm->vmcb->control.nested_cr3 = __sme_set(root);
2943         mark_dirty(svm->vmcb, VMCB_NPT);
2944 }
2945
2946 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2947                                        struct x86_exception *fault)
2948 {
2949         struct vcpu_svm *svm = to_svm(vcpu);
2950
2951         if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2952                 /*
2953                  * TODO: track the cause of the nested page fault, and
2954                  * correctly fill in the high bits of exit_info_1.
2955                  */
2956                 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2957                 svm->vmcb->control.exit_code_hi = 0;
2958                 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2959                 svm->vmcb->control.exit_info_2 = fault->address;
2960         }
2961
2962         svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2963         svm->vmcb->control.exit_info_1 |= fault->error_code;
2964
2965         /*
2966          * The present bit is always zero for page structure faults on real
2967          * hardware.
2968          */
2969         if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2970                 svm->vmcb->control.exit_info_1 &= ~1;
2971
2972         nested_svm_vmexit(svm);
2973 }
2974
2975 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
2976 {
2977         WARN_ON(mmu_is_nested(vcpu));
2978
2979         vcpu->arch.mmu = &vcpu->arch.guest_mmu;
2980         kvm_init_shadow_mmu(vcpu);
2981         vcpu->arch.mmu->set_cr3           = nested_svm_set_tdp_cr3;
2982         vcpu->arch.mmu->get_cr3           = nested_svm_get_tdp_cr3;
2983         vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
2984         vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
2985         vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
2986         reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
2987         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
2988 }
2989
2990 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2991 {
2992         vcpu->arch.mmu = &vcpu->arch.root_mmu;
2993         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
2994 }
2995
2996 static int nested_svm_check_permissions(struct vcpu_svm *svm)
2997 {
2998         if (!(svm->vcpu.arch.efer & EFER_SVME) ||
2999             !is_paging(&svm->vcpu)) {
3000                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3001                 return 1;
3002         }
3003
3004         if (svm->vmcb->save.cpl) {
3005                 kvm_inject_gp(&svm->vcpu, 0);
3006                 return 1;
3007         }
3008
3009         return 0;
3010 }
3011
3012 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
3013                                       bool has_error_code, u32 error_code)
3014 {
3015         int vmexit;
3016
3017         if (!is_guest_mode(&svm->vcpu))
3018                 return 0;
3019
3020         vmexit = nested_svm_intercept(svm);
3021         if (vmexit != NESTED_EXIT_DONE)
3022                 return 0;
3023
3024         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
3025         svm->vmcb->control.exit_code_hi = 0;
3026         svm->vmcb->control.exit_info_1 = error_code;
3027
3028         /*
3029          * EXITINFO2 is undefined for all exception intercepts other
3030          * than #PF.
3031          */
3032         if (svm->vcpu.arch.exception.nested_apf)
3033                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
3034         else if (svm->vcpu.arch.exception.has_payload)
3035                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
3036         else
3037                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
3038
3039         svm->nested.exit_required = true;
3040         return vmexit;
3041 }
3042
3043 /* This function returns true if it is save to enable the irq window */
3044 static inline bool nested_svm_intr(struct vcpu_svm *svm)
3045 {
3046         if (!is_guest_mode(&svm->vcpu))
3047                 return true;
3048
3049         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3050                 return true;
3051
3052         if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
3053                 return false;
3054
3055         /*
3056          * if vmexit was already requested (by intercepted exception
3057          * for instance) do not overwrite it with "external interrupt"
3058          * vmexit.
3059          */
3060         if (svm->nested.exit_required)
3061                 return false;
3062
3063         svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
3064         svm->vmcb->control.exit_info_1 = 0;
3065         svm->vmcb->control.exit_info_2 = 0;
3066
3067         if (svm->nested.intercept & 1ULL) {
3068                 /*
3069                  * The #vmexit can't be emulated here directly because this
3070                  * code path runs with irqs and preemption disabled. A
3071                  * #vmexit emulation might sleep. Only signal request for
3072                  * the #vmexit here.
3073                  */
3074                 svm->nested.exit_required = true;
3075                 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
3076                 return false;
3077         }
3078
3079         return true;
3080 }
3081
3082 /* This function returns true if it is save to enable the nmi window */
3083 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
3084 {
3085         if (!is_guest_mode(&svm->vcpu))
3086                 return true;
3087
3088         if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
3089                 return true;
3090
3091         svm->vmcb->control.exit_code = SVM_EXIT_NMI;
3092         svm->nested.exit_required = true;
3093
3094         return false;
3095 }
3096
3097 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
3098 {
3099         unsigned port, size, iopm_len;
3100         u16 val, mask;
3101         u8 start_bit;
3102         u64 gpa;
3103
3104         if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
3105                 return NESTED_EXIT_HOST;
3106
3107         port = svm->vmcb->control.exit_info_1 >> 16;
3108         size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
3109                 SVM_IOIO_SIZE_SHIFT;
3110         gpa  = svm->nested.vmcb_iopm + (port / 8);
3111         start_bit = port % 8;
3112         iopm_len = (start_bit + size > 8) ? 2 : 1;
3113         mask = (0xf >> (4 - size)) << start_bit;
3114         val = 0;
3115
3116         if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
3117                 return NESTED_EXIT_DONE;
3118
3119         return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
3120 }
3121
3122 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
3123 {
3124         u32 offset, msr, value;
3125         int write, mask;
3126
3127         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3128                 return NESTED_EXIT_HOST;
3129
3130         msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
3131         offset = svm_msrpm_offset(msr);
3132         write  = svm->vmcb->control.exit_info_1 & 1;
3133         mask   = 1 << ((2 * (msr & 0xf)) + write);
3134
3135         if (offset == MSR_INVALID)
3136                 return NESTED_EXIT_DONE;
3137
3138         /* Offset is in 32 bit units but need in 8 bit units */
3139         offset *= 4;
3140
3141         if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
3142                 return NESTED_EXIT_DONE;
3143
3144         return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
3145 }
3146
3147 /* DB exceptions for our internal use must not cause vmexit */
3148 static int nested_svm_intercept_db(struct vcpu_svm *svm)
3149 {
3150         unsigned long dr6;
3151
3152         /* if we're not singlestepping, it's not ours */
3153         if (!svm->nmi_singlestep)
3154                 return NESTED_EXIT_DONE;
3155
3156         /* if it's not a singlestep exception, it's not ours */
3157         if (kvm_get_dr(&svm->vcpu, 6, &dr6))
3158                 return NESTED_EXIT_DONE;
3159         if (!(dr6 & DR6_BS))
3160                 return NESTED_EXIT_DONE;
3161
3162         /* if the guest is singlestepping, it should get the vmexit */
3163         if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
3164                 disable_nmi_singlestep(svm);
3165                 return NESTED_EXIT_DONE;
3166         }
3167
3168         /* it's ours, the nested hypervisor must not see this one */
3169         return NESTED_EXIT_HOST;
3170 }
3171
3172 static int nested_svm_exit_special(struct vcpu_svm *svm)
3173 {
3174         u32 exit_code = svm->vmcb->control.exit_code;
3175
3176         switch (exit_code) {
3177         case SVM_EXIT_INTR:
3178         case SVM_EXIT_NMI:
3179         case SVM_EXIT_EXCP_BASE + MC_VECTOR:
3180                 return NESTED_EXIT_HOST;
3181         case SVM_EXIT_NPF:
3182                 /* For now we are always handling NPFs when using them */
3183                 if (npt_enabled)
3184                         return NESTED_EXIT_HOST;
3185                 break;
3186         case SVM_EXIT_EXCP_BASE + PF_VECTOR:
3187                 /* When we're shadowing, trap PFs, but not async PF */
3188                 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
3189                         return NESTED_EXIT_HOST;
3190                 break;
3191         default:
3192                 break;
3193         }
3194
3195         return NESTED_EXIT_CONTINUE;
3196 }
3197
3198 /*
3199  * If this function returns true, this #vmexit was already handled
3200  */
3201 static int nested_svm_intercept(struct vcpu_svm *svm)
3202 {
3203         u32 exit_code = svm->vmcb->control.exit_code;
3204         int vmexit = NESTED_EXIT_HOST;
3205
3206         switch (exit_code) {
3207         case SVM_EXIT_MSR:
3208                 vmexit = nested_svm_exit_handled_msr(svm);
3209                 break;
3210         case SVM_EXIT_IOIO:
3211                 vmexit = nested_svm_intercept_ioio(svm);
3212                 break;
3213         case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
3214                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
3215                 if (svm->nested.intercept_cr & bit)
3216                         vmexit = NESTED_EXIT_DONE;
3217                 break;
3218         }
3219         case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
3220                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
3221                 if (svm->nested.intercept_dr & bit)
3222                         vmexit = NESTED_EXIT_DONE;
3223                 break;
3224         }
3225         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
3226                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
3227                 if (svm->nested.intercept_exceptions & excp_bits) {
3228                         if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
3229                                 vmexit = nested_svm_intercept_db(svm);
3230                         else
3231                                 vmexit = NESTED_EXIT_DONE;
3232                 }
3233                 /* async page fault always cause vmexit */
3234                 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
3235                          svm->vcpu.arch.exception.nested_apf != 0)
3236                         vmexit = NESTED_EXIT_DONE;
3237                 break;
3238         }
3239         case SVM_EXIT_ERR: {
3240                 vmexit = NESTED_EXIT_DONE;
3241                 break;
3242         }
3243         default: {
3244                 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
3245                 if (svm->nested.intercept & exit_bits)
3246                         vmexit = NESTED_EXIT_DONE;
3247         }
3248         }
3249
3250         return vmexit;
3251 }
3252
3253 static int nested_svm_exit_handled(struct vcpu_svm *svm)
3254 {
3255         int vmexit;
3256
3257         vmexit = nested_svm_intercept(svm);
3258
3259         if (vmexit == NESTED_EXIT_DONE)
3260                 nested_svm_vmexit(svm);
3261
3262         return vmexit;
3263 }
3264
3265 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
3266 {
3267         struct vmcb_control_area *dst  = &dst_vmcb->control;
3268         struct vmcb_control_area *from = &from_vmcb->control;
3269
3270         dst->intercept_cr         = from->intercept_cr;
3271         dst->intercept_dr         = from->intercept_dr;
3272         dst->intercept_exceptions = from->intercept_exceptions;
3273         dst->intercept            = from->intercept;
3274         dst->iopm_base_pa         = from->iopm_base_pa;
3275         dst->msrpm_base_pa        = from->msrpm_base_pa;
3276         dst->tsc_offset           = from->tsc_offset;
3277         dst->asid                 = from->asid;
3278         dst->tlb_ctl              = from->tlb_ctl;
3279         dst->int_ctl              = from->int_ctl;
3280         dst->int_vector           = from->int_vector;
3281         dst->int_state            = from->int_state;
3282         dst->exit_code            = from->exit_code;
3283         dst->exit_code_hi         = from->exit_code_hi;
3284         dst->exit_info_1          = from->exit_info_1;
3285         dst->exit_info_2          = from->exit_info_2;
3286         dst->exit_int_info        = from->exit_int_info;
3287         dst->exit_int_info_err    = from->exit_int_info_err;
3288         dst->nested_ctl           = from->nested_ctl;
3289         dst->event_inj            = from->event_inj;
3290         dst->event_inj_err        = from->event_inj_err;
3291         dst->nested_cr3           = from->nested_cr3;
3292         dst->virt_ext              = from->virt_ext;
3293         dst->pause_filter_count   = from->pause_filter_count;
3294         dst->pause_filter_thresh  = from->pause_filter_thresh;
3295 }
3296
3297 static int nested_svm_vmexit(struct vcpu_svm *svm)
3298 {
3299         int rc;
3300         struct vmcb *nested_vmcb;
3301         struct vmcb *hsave = svm->nested.hsave;
3302         struct vmcb *vmcb = svm->vmcb;
3303         struct kvm_host_map map;
3304
3305         trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
3306                                        vmcb->control.exit_info_1,
3307                                        vmcb->control.exit_info_2,
3308                                        vmcb->control.exit_int_info,
3309                                        vmcb->control.exit_int_info_err,
3310                                        KVM_ISA_SVM);
3311
3312         rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
3313         if (rc) {
3314                 if (rc == -EINVAL)
3315                         kvm_inject_gp(&svm->vcpu, 0);
3316                 return 1;
3317         }
3318
3319         nested_vmcb = map.hva;
3320
3321         /* Exit Guest-Mode */
3322         leave_guest_mode(&svm->vcpu);
3323         svm->nested.vmcb = 0;
3324
3325         /* Give the current vmcb to the guest */
3326         disable_gif(svm);
3327
3328         nested_vmcb->save.es     = vmcb->save.es;
3329         nested_vmcb->save.cs     = vmcb->save.cs;
3330         nested_vmcb->save.ss     = vmcb->save.ss;
3331         nested_vmcb->save.ds     = vmcb->save.ds;
3332         nested_vmcb->save.gdtr   = vmcb->save.gdtr;
3333         nested_vmcb->save.idtr   = vmcb->save.idtr;
3334         nested_vmcb->save.efer   = svm->vcpu.arch.efer;
3335         nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
3336         nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
3337         nested_vmcb->save.cr2    = vmcb->save.cr2;
3338         nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
3339         nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
3340         nested_vmcb->save.rip    = vmcb->save.rip;
3341         nested_vmcb->save.rsp    = vmcb->save.rsp;
3342         nested_vmcb->save.rax    = vmcb->save.rax;
3343         nested_vmcb->save.dr7    = vmcb->save.dr7;
3344         nested_vmcb->save.dr6    = vmcb->save.dr6;
3345         nested_vmcb->save.cpl    = vmcb->save.cpl;
3346
3347         nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
3348         nested_vmcb->control.int_vector        = vmcb->control.int_vector;
3349         nested_vmcb->control.int_state         = vmcb->control.int_state;
3350         nested_vmcb->control.exit_code         = vmcb->control.exit_code;
3351         nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
3352         nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
3353         nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
3354         nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
3355         nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
3356
3357         if (svm->nrips_enabled)
3358                 nested_vmcb->control.next_rip  = vmcb->control.next_rip;
3359
3360         /*
3361          * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
3362          * to make sure that we do not lose injected events. So check event_inj
3363          * here and copy it to exit_int_info if it is valid.
3364          * Exit_int_info and event_inj can't be both valid because the case
3365          * below only happens on a VMRUN instruction intercept which has
3366          * no valid exit_int_info set.
3367          */
3368         if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
3369                 struct vmcb_control_area *nc = &nested_vmcb->control;
3370
3371                 nc->exit_int_info     = vmcb->control.event_inj;
3372                 nc->exit_int_info_err = vmcb->control.event_inj_err;
3373         }
3374
3375         nested_vmcb->control.tlb_ctl           = 0;
3376         nested_vmcb->control.event_inj         = 0;
3377         nested_vmcb->control.event_inj_err     = 0;
3378
3379         nested_vmcb->control.pause_filter_count =
3380                 svm->vmcb->control.pause_filter_count;
3381         nested_vmcb->control.pause_filter_thresh =
3382                 svm->vmcb->control.pause_filter_thresh;
3383
3384         /* We always set V_INTR_MASKING and remember the old value in hflags */
3385         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3386                 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3387
3388         /* Restore the original control entries */
3389         copy_vmcb_control_area(vmcb, hsave);
3390
3391         svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
3392         kvm_clear_exception_queue(&svm->vcpu);
3393         kvm_clear_interrupt_queue(&svm->vcpu);
3394
3395         svm->nested.nested_cr3 = 0;
3396
3397         /* Restore selected save entries */
3398         svm->vmcb->save.es = hsave->save.es;
3399         svm->vmcb->save.cs = hsave->save.cs;
3400         svm->vmcb->save.ss = hsave->save.ss;
3401         svm->vmcb->save.ds = hsave->save.ds;
3402         svm->vmcb->save.gdtr = hsave->save.gdtr;
3403         svm->vmcb->save.idtr = hsave->save.idtr;
3404         kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
3405         svm_set_efer(&svm->vcpu, hsave->save.efer);
3406         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
3407         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
3408         if (npt_enabled) {
3409                 svm->vmcb->save.cr3 = hsave->save.cr3;
3410                 svm->vcpu.arch.cr3 = hsave->save.cr3;
3411         } else {
3412                 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
3413         }
3414         kvm_rax_write(&svm->vcpu, hsave->save.rax);
3415         kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
3416         kvm_rip_write(&svm->vcpu, hsave->save.rip);
3417         svm->vmcb->save.dr7 = 0;
3418         svm->vmcb->save.cpl = 0;
3419         svm->vmcb->control.exit_int_info = 0;
3420
3421         mark_all_dirty(svm->vmcb);
3422
3423         kvm_vcpu_unmap(&svm->vcpu, &map, true);
3424
3425         nested_svm_uninit_mmu_context(&svm->vcpu);
3426         kvm_mmu_reset_context(&svm->vcpu);
3427         kvm_mmu_load(&svm->vcpu);
3428
3429         /*
3430          * Drop what we picked up for L2 via svm_complete_interrupts() so it
3431          * doesn't end up in L1.
3432          */
3433         svm->vcpu.arch.nmi_injected = false;
3434         kvm_clear_exception_queue(&svm->vcpu);
3435         kvm_clear_interrupt_queue(&svm->vcpu);
3436
3437         return 0;
3438 }
3439
3440 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
3441 {
3442         /*
3443          * This function merges the msr permission bitmaps of kvm and the
3444          * nested vmcb. It is optimized in that it only merges the parts where
3445          * the kvm msr permission bitmap may contain zero bits
3446          */
3447         int i;
3448
3449         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3450                 return true;
3451
3452         for (i = 0; i < MSRPM_OFFSETS; i++) {
3453                 u32 value, p;
3454                 u64 offset;
3455
3456                 if (msrpm_offsets[i] == 0xffffffff)
3457                         break;
3458
3459                 p      = msrpm_offsets[i];
3460                 offset = svm->nested.vmcb_msrpm + (p * 4);
3461
3462                 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
3463                         return false;
3464
3465                 svm->nested.msrpm[p] = svm->msrpm[p] | value;
3466         }
3467
3468         svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
3469
3470         return true;
3471 }
3472
3473 static bool nested_vmcb_checks(struct vmcb *vmcb)
3474 {
3475         if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
3476                 return false;
3477
3478         if (vmcb->control.asid == 0)
3479                 return false;
3480
3481         if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
3482             !npt_enabled)
3483                 return false;
3484
3485         return true;
3486 }
3487
3488 static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
3489                                  struct vmcb *nested_vmcb, struct kvm_host_map *map)
3490 {
3491         if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
3492                 svm->vcpu.arch.hflags |= HF_HIF_MASK;
3493         else
3494                 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
3495
3496         if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
3497                 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
3498                 nested_svm_init_mmu_context(&svm->vcpu);
3499         }
3500
3501         /* Load the nested guest state */
3502         svm->vmcb->save.es = nested_vmcb->save.es;
3503         svm->vmcb->save.cs = nested_vmcb->save.cs;
3504         svm->vmcb->save.ss = nested_vmcb->save.ss;
3505         svm->vmcb->save.ds = nested_vmcb->save.ds;
3506         svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
3507         svm->vmcb->save.idtr = nested_vmcb->save.idtr;
3508         kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
3509         svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
3510         svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
3511         svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
3512         if (npt_enabled) {
3513                 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
3514                 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
3515         } else
3516                 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
3517
3518         /* Guest paging mode is active - reset mmu */
3519         kvm_mmu_reset_context(&svm->vcpu);
3520
3521         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
3522         kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
3523         kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
3524         kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
3525
3526         /* In case we don't even reach vcpu_run, the fields are not updated */
3527         svm->vmcb->save.rax = nested_vmcb->save.rax;
3528         svm->vmcb->save.rsp = nested_vmcb->save.rsp;
3529         svm->vmcb->save.rip = nested_vmcb->save.rip;
3530         svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
3531         svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
3532         svm->vmcb->save.cpl = nested_vmcb->save.cpl;
3533
3534         svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
3535         svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
3536
3537         /* cache intercepts */
3538         svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
3539         svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
3540         svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3541         svm->nested.intercept            = nested_vmcb->control.intercept;
3542
3543         svm_flush_tlb(&svm->vcpu, true);
3544         svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
3545         if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3546                 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3547         else
3548                 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
3549
3550         if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
3551                 /* We only want the cr8 intercept bits of the guest */
3552                 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
3553                 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3554         }
3555
3556         /* We don't want to see VMMCALLs from a nested guest */
3557         clr_intercept(svm, INTERCEPT_VMMCALL);
3558
3559         svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
3560         svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
3561
3562         svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
3563         svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3564         svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3565         svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3566         svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3567
3568         svm->vmcb->control.pause_filter_count =
3569                 nested_vmcb->control.pause_filter_count;
3570         svm->vmcb->control.pause_filter_thresh =
3571                 nested_vmcb->control.pause_filter_thresh;
3572
3573         kvm_vcpu_unmap(&svm->vcpu, map, true);
3574
3575         /* Enter Guest-Mode */
3576         enter_guest_mode(&svm->vcpu);
3577
3578         /*
3579          * Merge guest and host intercepts - must be called  with vcpu in
3580          * guest-mode to take affect here
3581          */
3582         recalc_intercepts(svm);
3583
3584         svm->nested.vmcb = vmcb_gpa;
3585
3586         enable_gif(svm);
3587
3588         mark_all_dirty(svm->vmcb);
3589 }
3590
3591 static int nested_svm_vmrun(struct vcpu_svm *svm)
3592 {
3593         int ret;
3594         struct vmcb *nested_vmcb;
3595         struct vmcb *hsave = svm->nested.hsave;
3596         struct vmcb *vmcb = svm->vmcb;
3597         struct kvm_host_map map;
3598         u64 vmcb_gpa;
3599
3600         vmcb_gpa = svm->vmcb->save.rax;
3601
3602         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
3603         if (ret == -EINVAL) {
3604                 kvm_inject_gp(&svm->vcpu, 0);
3605                 return 1;
3606         } else if (ret) {
3607                 return kvm_skip_emulated_instruction(&svm->vcpu);
3608         }
3609
3610         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3611
3612         nested_vmcb = map.hva;
3613
3614         if (!nested_vmcb_checks(nested_vmcb)) {
3615                 nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
3616                 nested_vmcb->control.exit_code_hi = 0;
3617                 nested_vmcb->control.exit_info_1  = 0;
3618                 nested_vmcb->control.exit_info_2  = 0;
3619
3620                 kvm_vcpu_unmap(&svm->vcpu, &map, true);
3621
3622                 return ret;
3623         }
3624
3625         trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
3626                                nested_vmcb->save.rip,
3627                                nested_vmcb->control.int_ctl,
3628                                nested_vmcb->control.event_inj,
3629                                nested_vmcb->control.nested_ctl);
3630
3631         trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
3632                                     nested_vmcb->control.intercept_cr >> 16,
3633                                     nested_vmcb->control.intercept_exceptions,
3634                                     nested_vmcb->control.intercept);
3635
3636         /* Clear internal status */
3637         kvm_clear_exception_queue(&svm->vcpu);
3638         kvm_clear_interrupt_queue(&svm->vcpu);
3639
3640         /*
3641          * Save the old vmcb, so we don't need to pick what we save, but can
3642          * restore everything when a VMEXIT occurs
3643          */
3644         hsave->save.es     = vmcb->save.es;
3645         hsave->save.cs     = vmcb->save.cs;
3646         hsave->save.ss     = vmcb->save.ss;
3647         hsave->save.ds     = vmcb->save.ds;
3648         hsave->save.gdtr   = vmcb->save.gdtr;
3649         hsave->save.idtr   = vmcb->save.idtr;
3650         hsave->save.efer   = svm->vcpu.arch.efer;
3651         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
3652         hsave->save.cr4    = svm->vcpu.arch.cr4;
3653         hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
3654         hsave->save.rip    = kvm_rip_read(&svm->vcpu);
3655         hsave->save.rsp    = vmcb->save.rsp;
3656         hsave->save.rax    = vmcb->save.rax;
3657         if (npt_enabled)
3658                 hsave->save.cr3    = vmcb->save.cr3;
3659         else
3660                 hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
3661
3662         copy_vmcb_control_area(hsave, vmcb);
3663
3664         enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
3665
3666         if (!nested_svm_vmrun_msrpm(svm)) {
3667                 svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
3668                 svm->vmcb->control.exit_code_hi = 0;
3669                 svm->vmcb->control.exit_info_1  = 0;
3670                 svm->vmcb->control.exit_info_2  = 0;
3671
3672                 nested_svm_vmexit(svm);
3673         }
3674
3675         return ret;
3676 }
3677
3678 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
3679 {
3680         to_vmcb->save.fs = from_vmcb->save.fs;
3681         to_vmcb->save.gs = from_vmcb->save.gs;
3682         to_vmcb->save.tr = from_vmcb->save.tr;
3683         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
3684         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
3685         to_vmcb->save.star = from_vmcb->save.star;
3686         to_vmcb->save.lstar = from_vmcb->save.lstar;
3687         to_vmcb->save.cstar = from_vmcb->save.cstar;
3688         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
3689         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
3690         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
3691         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
3692 }
3693
3694 static int vmload_interception(struct vcpu_svm *svm)
3695 {
3696         struct vmcb *nested_vmcb;
3697         struct kvm_host_map map;
3698         int ret;
3699
3700         if (nested_svm_check_permissions(svm))
3701                 return 1;
3702
3703         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
3704         if (ret) {
3705                 if (ret == -EINVAL)
3706                         kvm_inject_gp(&svm->vcpu, 0);
3707                 return 1;
3708         }
3709
3710         nested_vmcb = map.hva;
3711
3712         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3713
3714         nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
3715         kvm_vcpu_unmap(&svm->vcpu, &map, true);
3716
3717         return ret;
3718 }
3719
3720 static int vmsave_interception(struct vcpu_svm *svm)
3721 {
3722         struct vmcb *nested_vmcb;
3723         struct kvm_host_map map;
3724         int ret;
3725
3726         if (nested_svm_check_permissions(svm))
3727                 return 1;
3728
3729         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
3730         if (ret) {
3731                 if (ret == -EINVAL)
3732                         kvm_inject_gp(&svm->vcpu, 0);
3733                 return 1;
3734         }
3735
3736         nested_vmcb = map.hva;
3737
3738         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3739
3740         nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
3741         kvm_vcpu_unmap(&svm->vcpu, &map, true);
3742
3743         return ret;
3744 }
3745
3746 static int vmrun_interception(struct vcpu_svm *svm)
3747 {
3748         if (nested_svm_check_permissions(svm))
3749                 return 1;
3750
3751         return nested_svm_vmrun(svm);
3752 }
3753
3754 static int stgi_interception(struct vcpu_svm *svm)
3755 {
3756         int ret;
3757
3758         if (nested_svm_check_permissions(svm))
3759                 return 1;
3760
3761         /*
3762          * If VGIF is enabled, the STGI intercept is only added to
3763          * detect the opening of the SMI/NMI window; remove it now.
3764          */
3765         if (vgif_enabled(svm))
3766                 clr_intercept(svm, INTERCEPT_STGI);
3767
3768         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3769         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3770
3771         enable_gif(svm);
3772
3773         return ret;
3774 }
3775
3776 static int clgi_interception(struct vcpu_svm *svm)
3777 {
3778         int ret;
3779
3780         if (nested_svm_check_permissions(svm))
3781                 return 1;
3782
3783         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3784
3785         disable_gif(svm);
3786
3787         /* After a CLGI no interrupts should come */
3788         if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3789                 svm_clear_vintr(svm);
3790                 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3791                 mark_dirty(svm->vmcb, VMCB_INTR);
3792         }
3793
3794         return ret;
3795 }
3796
3797 static int invlpga_interception(struct vcpu_svm *svm)
3798 {
3799         struct kvm_vcpu *vcpu = &svm->vcpu;
3800
3801         trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
3802                           kvm_rax_read(&svm->vcpu));
3803
3804         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
3805         kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
3806
3807         return kvm_skip_emulated_instruction(&svm->vcpu);
3808 }
3809
3810 static int skinit_interception(struct vcpu_svm *svm)
3811 {
3812         trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
3813
3814         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3815         return 1;
3816 }
3817
3818 static int wbinvd_interception(struct vcpu_svm *svm)
3819 {
3820         return kvm_emulate_wbinvd(&svm->vcpu);
3821 }
3822
3823 static int xsetbv_interception(struct vcpu_svm *svm)
3824 {
3825         u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3826         u32 index = kvm_rcx_read(&svm->vcpu);
3827
3828         if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3829                 return kvm_skip_emulated_instruction(&svm->vcpu);
3830         }
3831
3832         return 1;
3833 }
3834
3835 static int task_switch_interception(struct vcpu_svm *svm)
3836 {
3837         u16 tss_selector;
3838         int reason;
3839         int int_type = svm->vmcb->control.exit_int_info &
3840                 SVM_EXITINTINFO_TYPE_MASK;
3841         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
3842         uint32_t type =
3843                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3844         uint32_t idt_v =
3845                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
3846         bool has_error_code = false;
3847         u32 error_code = 0;
3848
3849         tss_selector = (u16)svm->vmcb->control.exit_info_1;
3850
3851         if (svm->vmcb->control.exit_info_2 &
3852             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
3853                 reason = TASK_SWITCH_IRET;
3854         else if (svm->vmcb->control.exit_info_2 &
3855                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3856                 reason = TASK_SWITCH_JMP;
3857         else if (idt_v)
3858                 reason = TASK_SWITCH_GATE;
3859         else
3860                 reason = TASK_SWITCH_CALL;
3861
3862         if (reason == TASK_SWITCH_GATE) {
3863                 switch (type) {
3864                 case SVM_EXITINTINFO_TYPE_NMI:
3865                         svm->vcpu.arch.nmi_injected = false;
3866                         break;
3867                 case SVM_EXITINTINFO_TYPE_EXEPT:
3868                         if (svm->vmcb->control.exit_info_2 &
3869                             (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3870                                 has_error_code = true;
3871                                 error_code =
3872                                         (u32)svm->vmcb->control.exit_info_2;
3873                         }
3874                         kvm_clear_exception_queue(&svm->vcpu);
3875                         break;
3876                 case SVM_EXITINTINFO_TYPE_INTR:
3877                         kvm_clear_interrupt_queue(&svm->vcpu);
3878                         break;
3879                 default:
3880                         break;
3881                 }
3882         }
3883
3884         if (reason != TASK_SWITCH_GATE ||
3885             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3886             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
3887              (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
3888                 if (skip_emulated_instruction(&svm->vcpu) == EMULATE_USER_EXIT)
3889                         return 0;
3890         }
3891
3892         if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3893                 int_vec = -1;
3894
3895         if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
3896                                 has_error_code, error_code) == EMULATE_FAIL)
3897                 goto fail;
3898
3899         return 1;
3900
3901 fail:
3902         svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3903         svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3904         svm->vcpu.run->internal.ndata = 0;
3905         return 0;
3906 }
3907
3908 static int cpuid_interception(struct vcpu_svm *svm)
3909 {
3910         return kvm_emulate_cpuid(&svm->vcpu);
3911 }
3912
3913 static int iret_interception(struct vcpu_svm *svm)
3914 {
3915         ++svm->vcpu.stat.nmi_window_exits;
3916         clr_intercept(svm, INTERCEPT_IRET);
3917         svm->vcpu.arch.hflags |= HF_IRET_MASK;
3918         svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
3919         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3920         return 1;
3921 }
3922
3923 static int invlpg_interception(struct vcpu_svm *svm)
3924 {
3925         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3926                 return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3927
3928         kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
3929         return kvm_skip_emulated_instruction(&svm->vcpu);
3930 }
3931
3932 static int emulate_on_interception(struct vcpu_svm *svm)
3933 {
3934         return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3935 }
3936
3937 static int rsm_interception(struct vcpu_svm *svm)
3938 {
3939         return kvm_emulate_instruction_from_buffer(&svm->vcpu,
3940                                         rsm_ins_bytes, 2) == EMULATE_DONE;
3941 }
3942
3943 static int rdpmc_interception(struct vcpu_svm *svm)
3944 {
3945         int err;
3946
3947         if (!nrips)
3948                 return emulate_on_interception(svm);
3949
3950         err = kvm_rdpmc(&svm->vcpu);
3951         return kvm_complete_insn_gp(&svm->vcpu, err);
3952 }
3953
3954 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3955                                             unsigned long val)
3956 {
3957         unsigned long cr0 = svm->vcpu.arch.cr0;
3958         bool ret = false;
3959         u64 intercept;
3960
3961         intercept = svm->nested.intercept;
3962
3963         if (!is_guest_mode(&svm->vcpu) ||
3964             (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3965                 return false;
3966
3967         cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3968         val &= ~SVM_CR0_SELECTIVE_MASK;
3969
3970         if (cr0 ^ val) {
3971                 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3972                 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3973         }
3974
3975         return ret;
3976 }
3977
3978 #define CR_VALID (1ULL << 63)
3979
3980 static int cr_interception(struct vcpu_svm *svm)
3981 {
3982         int reg, cr;
3983         unsigned long val;
3984         int err;
3985
3986         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3987                 return emulate_on_interception(svm);
3988
3989         if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3990                 return emulate_on_interception(svm);
3991
3992         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3993         if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3994                 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3995         else
3996                 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
3997
3998         err = 0;
3999         if (cr >= 16) { /* mov to cr */
4000                 cr -= 16;
4001                 val = kvm_register_read(&svm->vcpu, reg);
4002                 switch (cr) {
4003                 case 0:
4004                         if (!check_selective_cr0_intercepted(svm, val))
4005                                 err = kvm_set_cr0(&svm->vcpu, val);
4006                         else
4007                                 return 1;
4008
4009                         break;
4010                 case 3:
4011                         err = kvm_set_cr3(&svm->vcpu, val);
4012                         break;
4013                 case 4:
4014                         err = kvm_set_cr4(&svm->vcpu, val);
4015                         break;
4016                 case 8:
4017                         err = kvm_set_cr8(&svm->vcpu, val);
4018                         break;
4019                 default:
4020                         WARN(1, "unhandled write to CR%d", cr);
4021                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
4022                         return 1;
4023                 }
4024         } else { /* mov from cr */
4025                 switch (cr) {
4026                 case 0:
4027                         val = kvm_read_cr0(&svm->vcpu);
4028                         break;
4029                 case 2:
4030                         val = svm->vcpu.arch.cr2;
4031                         break;
4032                 case 3:
4033                         val = kvm_read_cr3(&svm->vcpu);
4034                         break;
4035                 case 4:
4036                         val = kvm_read_cr4(&svm->vcpu);
4037                         break;
4038                 case 8:
4039                         val = kvm_get_cr8(&svm->vcpu);
4040                         break;
4041                 default:
4042                         WARN(1, "unhandled read from CR%d", cr);
4043                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
4044                         return 1;
4045                 }
4046                 kvm_register_write(&svm->vcpu, reg, val);
4047         }
4048         return kvm_complete_insn_gp(&svm->vcpu, err);
4049 }
4050
4051 static int dr_interception(struct vcpu_svm *svm)
4052 {
4053         int reg, dr;
4054         unsigned long val;
4055
4056         if (svm->vcpu.guest_debug == 0) {
4057                 /*
4058                  * No more DR vmexits; force a reload of the debug registers
4059                  * and reenter on this instruction.  The next vmexit will
4060                  * retrieve the full state of the debug registers.
4061                  */
4062                 clr_dr_intercepts(svm);
4063                 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
4064                 return 1;
4065         }
4066
4067         if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
4068                 return emulate_on_interception(svm);
4069
4070         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
4071         dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
4072
4073         if (dr >= 16) { /* mov to DRn */
4074                 if (!kvm_require_dr(&svm->vcpu, dr - 16))
4075                         return 1;
4076                 val = kvm_register_read(&svm->vcpu, reg);
4077                 kvm_set_dr(&svm->vcpu, dr - 16, val);
4078         } else {
4079                 if (!kvm_require_dr(&svm->vcpu, dr))
4080                         return 1;
4081                 kvm_get_dr(&svm->vcpu, dr, &val);
4082                 kvm_register_write(&svm->vcpu, reg, val);
4083         }
4084
4085         return kvm_skip_emulated_instruction(&svm->vcpu);
4086 }
4087
4088 static int cr8_write_interception(struct vcpu_svm *svm)
4089 {
4090         struct kvm_run *kvm_run = svm->vcpu.run;
4091         int r;
4092
4093         u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
4094         /* instruction emulation calls kvm_set_cr8() */
4095         r = cr_interception(svm);
4096         if (lapic_in_kernel(&svm->vcpu))
4097                 return r;
4098         if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
4099                 return r;
4100         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
4101         return 0;
4102 }
4103
4104 static int svm_get_msr_feature(struct kvm_msr_entry *msr)
4105 {
4106         msr->data = 0;
4107
4108         switch (msr->index) {
4109         case MSR_F10H_DECFG:
4110                 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
4111                         msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
4112                 break;
4113         default:
4114                 return 1;
4115         }
4116
4117         return 0;
4118 }
4119
4120 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4121 {
4122         struct vcpu_svm *svm = to_svm(vcpu);
4123
4124         switch (msr_info->index) {
4125         case MSR_STAR:
4126                 msr_info->data = svm->vmcb->save.star;
4127                 break;
4128 #ifdef CONFIG_X86_64
4129         case MSR_LSTAR:
4130                 msr_info->data = svm->vmcb->save.lstar;
4131                 break;
4132         case MSR_CSTAR:
4133                 msr_info->data = svm->vmcb->save.cstar;
4134                 break;
4135         case MSR_KERNEL_GS_BASE:
4136                 msr_info->data = svm->vmcb->save.kernel_gs_base;
4137                 break;
4138         case MSR_SYSCALL_MASK:
4139                 msr_info->data = svm->vmcb->save.sfmask;
4140                 break;
4141 #endif
4142         case MSR_IA32_SYSENTER_CS:
4143                 msr_info->data = svm->vmcb->save.sysenter_cs;
4144                 break;
4145         case MSR_IA32_SYSENTER_EIP:
4146                 msr_info->data = svm->sysenter_eip;
4147                 break;
4148         case MSR_IA32_SYSENTER_ESP:
4149                 msr_info->data = svm->sysenter_esp;
4150                 break;
4151         case MSR_TSC_AUX:
4152                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4153                         return 1;
4154                 msr_info->data = svm->tsc_aux;
4155                 break;
4156         /*
4157          * Nobody will change the following 5 values in the VMCB so we can
4158          * safely return them on rdmsr. They will always be 0 until LBRV is
4159          * implemented.
4160          */
4161         case MSR_IA32_DEBUGCTLMSR:
4162                 msr_info->data = svm->vmcb->save.dbgctl;
4163                 break;
4164         case MSR_IA32_LASTBRANCHFROMIP:
4165                 msr_info->data = svm->vmcb->save.br_from;
4166                 break;
4167         case MSR_IA32_LASTBRANCHTOIP:
4168                 msr_info->data = svm->vmcb->save.br_to;
4169                 break;
4170         case MSR_IA32_LASTINTFROMIP:
4171                 msr_info->data = svm->vmcb->save.last_excp_from;
4172                 break;
4173         case MSR_IA32_LASTINTTOIP:
4174                 msr_info->data = svm->vmcb->save.last_excp_to;
4175                 break;
4176         case MSR_VM_HSAVE_PA:
4177                 msr_info->data = svm->nested.hsave_msr;
4178                 break;
4179         case MSR_VM_CR:
4180                 msr_info->data = svm->nested.vm_cr_msr;
4181                 break;
4182         case MSR_IA32_SPEC_CTRL:
4183                 if (!msr_info->host_initiated &&
4184                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
4185                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
4186                         return 1;
4187
4188                 msr_info->data = svm->spec_ctrl;
4189                 break;
4190         case MSR_AMD64_VIRT_SPEC_CTRL:
4191                 if (!msr_info->host_initiated &&
4192                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
4193                         return 1;
4194
4195                 msr_info->data = svm->virt_spec_ctrl;
4196                 break;
4197         case MSR_F15H_IC_CFG: {
4198
4199                 int family, model;
4200
4201                 family = guest_cpuid_family(vcpu);
4202                 model  = guest_cpuid_model(vcpu);
4203
4204                 if (family < 0 || model < 0)
4205                         return kvm_get_msr_common(vcpu, msr_info);
4206
4207                 msr_info->data = 0;
4208
4209                 if (family == 0x15 &&
4210                     (model >= 0x2 && model < 0x20))
4211                         msr_info->data = 0x1E;
4212                 }
4213                 break;
4214         case MSR_F10H_DECFG:
4215                 msr_info->data = svm->msr_decfg;
4216                 break;
4217         default:
4218                 return kvm_get_msr_common(vcpu, msr_info);
4219         }
4220         return 0;
4221 }
4222
4223 static int rdmsr_interception(struct vcpu_svm *svm)
4224 {
4225         return kvm_emulate_rdmsr(&svm->vcpu);
4226 }
4227
4228 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
4229 {
4230         struct vcpu_svm *svm = to_svm(vcpu);
4231         int svm_dis, chg_mask;
4232
4233         if (data & ~SVM_VM_CR_VALID_MASK)
4234                 return 1;
4235
4236         chg_mask = SVM_VM_CR_VALID_MASK;
4237
4238         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
4239                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
4240
4241         svm->nested.vm_cr_msr &= ~chg_mask;
4242         svm->nested.vm_cr_msr |= (data & chg_mask);
4243
4244         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
4245
4246         /* check for svm_disable while efer.svme is set */
4247         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
4248                 return 1;
4249
4250         return 0;
4251 }
4252
4253 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4254 {
4255         struct vcpu_svm *svm = to_svm(vcpu);
4256
4257         u32 ecx = msr->index;
4258         u64 data = msr->data;
4259         switch (ecx) {
4260         case MSR_IA32_CR_PAT:
4261                 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
4262                         return 1;
4263                 vcpu->arch.pat = data;
4264                 svm->vmcb->save.g_pat = data;
4265                 mark_dirty(svm->vmcb, VMCB_NPT);
4266                 break;
4267         case MSR_IA32_SPEC_CTRL:
4268                 if (!msr->host_initiated &&
4269                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
4270                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
4271                         return 1;
4272
4273                 /* The STIBP bit doesn't fault even if it's not advertised */
4274                 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
4275                         return 1;
4276
4277                 svm->spec_ctrl = data;
4278
4279                 if (!data)
4280                         break;
4281
4282                 /*
4283                  * For non-nested:
4284                  * When it's written (to non-zero) for the first time, pass
4285                  * it through.
4286                  *
4287                  * For nested:
4288                  * The handling of the MSR bitmap for L2 guests is done in
4289                  * nested_svm_vmrun_msrpm.
4290                  * We update the L1 MSR bit as well since it will end up
4291                  * touching the MSR anyway now.
4292                  */
4293                 set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
4294                 break;
4295         case MSR_IA32_PRED_CMD:
4296                 if (!msr->host_initiated &&
4297                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
4298                         return 1;
4299
4300                 if (data & ~PRED_CMD_IBPB)
4301                         return 1;
4302
4303                 if (!data)
4304                         break;
4305
4306                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
4307                 if (is_guest_mode(vcpu))
4308                         break;
4309                 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
4310                 break;
4311         case MSR_AMD64_VIRT_SPEC_CTRL:
4312                 if (!msr->host_initiated &&
4313                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
4314                         return 1;
4315
4316                 if (data & ~SPEC_CTRL_SSBD)
4317                         return 1;
4318
4319                 svm->virt_spec_ctrl = data;
4320                 break;
4321         case MSR_STAR:
4322                 svm->vmcb->save.star = data;
4323                 break;
4324 #ifdef CONFIG_X86_64
4325         case MSR_LSTAR:
4326                 svm->vmcb->save.lstar = data;
4327                 break;
4328         case MSR_CSTAR:
4329                 svm->vmcb->save.cstar = data;
4330                 break;
4331         case MSR_KERNEL_GS_BASE:
4332                 svm->vmcb->save.kernel_gs_base = data;
4333                 break;
4334         case MSR_SYSCALL_MASK:
4335                 svm->vmcb->save.sfmask = data;
4336                 break;
4337 #endif
4338         case MSR_IA32_SYSENTER_CS:
4339                 svm->vmcb->save.sysenter_cs = data;
4340                 break;
4341         case MSR_IA32_SYSENTER_EIP:
4342                 svm->sysenter_eip = data;
4343                 svm->vmcb->save.sysenter_eip = data;
4344                 break;
4345         case MSR_IA32_SYSENTER_ESP:
4346                 svm->sysenter_esp = data;
4347                 svm->vmcb->save.sysenter_esp = data;
4348                 break;
4349         case MSR_TSC_AUX:
4350                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4351                         return 1;
4352
4353                 /*
4354                  * This is rare, so we update the MSR here instead of using
4355                  * direct_access_msrs.  Doing that would require a rdmsr in
4356                  * svm_vcpu_put.
4357                  */
4358                 svm->tsc_aux = data;
4359                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
4360                 break;
4361         case MSR_IA32_DEBUGCTLMSR:
4362                 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
4363                         vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
4364                                     __func__, data);
4365                         break;
4366                 }
4367                 if (data & DEBUGCTL_RESERVED_BITS)
4368                         return 1;
4369
4370                 svm->vmcb->save.dbgctl = data;
4371                 mark_dirty(svm->vmcb, VMCB_LBR);
4372                 if (data & (1ULL<<0))
4373                         svm_enable_lbrv(svm);
4374                 else
4375                         svm_disable_lbrv(svm);
4376                 break;
4377         case MSR_VM_HSAVE_PA:
4378                 svm->nested.hsave_msr = data;
4379                 break;
4380         case MSR_VM_CR:
4381                 return svm_set_vm_cr(vcpu, data);
4382         case MSR_VM_IGNNE:
4383                 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
4384                 break;
4385         case MSR_F10H_DECFG: {
4386                 struct kvm_msr_entry msr_entry;
4387
4388                 msr_entry.index = msr->index;
4389                 if (svm_get_msr_feature(&msr_entry))
4390                         return 1;
4391
4392                 /* Check the supported bits */
4393                 if (data & ~msr_entry.data)
4394                         return 1;
4395
4396                 /* Don't allow the guest to change a bit, #GP */
4397                 if (!msr->host_initiated && (data ^ msr_entry.data))
4398                         return 1;
4399
4400                 svm->msr_decfg = data;
4401                 break;
4402         }
4403         case MSR_IA32_APICBASE:
4404                 if (kvm_vcpu_apicv_active(vcpu))
4405                         avic_update_vapic_bar(to_svm(vcpu), data);
4406                 /* Fall through */
4407         default:
4408                 return kvm_set_msr_common(vcpu, msr);
4409         }
4410         return 0;
4411 }
4412
4413 static int wrmsr_interception(struct vcpu_svm *svm)
4414 {
4415         return kvm_emulate_wrmsr(&svm->vcpu);
4416 }
4417
4418 static int msr_interception(struct vcpu_svm *svm)
4419 {
4420         if (svm->vmcb->control.exit_info_1)
4421                 return wrmsr_interception(svm);
4422         else
4423                 return rdmsr_interception(svm);
4424 }
4425
4426 static int interrupt_window_interception(struct vcpu_svm *svm)
4427 {
4428         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
4429         svm_clear_vintr(svm);
4430         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
4431         mark_dirty(svm->vmcb, VMCB_INTR);
4432         ++svm->vcpu.stat.irq_window_exits;
4433         return 1;
4434 }
4435
4436 static int pause_interception(struct vcpu_svm *svm)
4437 {
4438         struct kvm_vcpu *vcpu = &svm->vcpu;
4439         bool in_kernel = (svm_get_cpl(vcpu) == 0);
4440
4441         if (pause_filter_thresh)
4442                 grow_ple_window(vcpu);
4443
4444         kvm_vcpu_on_spin(vcpu, in_kernel);
4445         return 1;
4446 }
4447
4448 static int nop_interception(struct vcpu_svm *svm)
4449 {
4450         return kvm_skip_emulated_instruction(&(svm->vcpu));
4451 }
4452
4453 static int monitor_interception(struct vcpu_svm *svm)
4454 {
4455         printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
4456         return nop_interception(svm);
4457 }
4458
4459 static int mwait_interception(struct vcpu_svm *svm)
4460 {
4461         printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
4462         return nop_interception(svm);
4463 }
4464
4465 enum avic_ipi_failure_cause {
4466         AVIC_IPI_FAILURE_INVALID_INT_TYPE,
4467         AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
4468         AVIC_IPI_FAILURE_INVALID_TARGET,
4469         AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
4470 };
4471
4472 static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4473 {
4474         u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
4475         u32 icrl = svm->vmcb->control.exit_info_1;
4476         u32 id = svm->vmcb->control.exit_info_2 >> 32;
4477         u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
4478         struct kvm_lapic *apic = svm->vcpu.arch.apic;
4479
4480         trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
4481
4482         switch (id) {
4483         case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
4484                 /*
4485                  * AVIC hardware handles the generation of
4486                  * IPIs when the specified Message Type is Fixed
4487                  * (also known as fixed delivery mode) and
4488                  * the Trigger Mode is edge-triggered. The hardware
4489                  * also supports self and broadcast delivery modes
4490                  * specified via the Destination Shorthand(DSH)
4491                  * field of the ICRL. Logical and physical APIC ID
4492                  * formats are supported. All other IPI types cause
4493                  * a #VMEXIT, which needs to emulated.
4494                  */
4495                 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4496                 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4497                 break;
4498         case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4499                 int i;
4500                 struct kvm_vcpu *vcpu;
4501                 struct kvm *kvm = svm->vcpu.kvm;
4502                 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4503
4504                 /*
4505                  * At this point, we expect that the AVIC HW has already
4506                  * set the appropriate IRR bits on the valid target
4507                  * vcpus. So, we just need to kick the appropriate vcpu.
4508                  */
4509                 kvm_for_each_vcpu(i, vcpu, kvm) {
4510                         bool m = kvm_apic_match_dest(vcpu, apic,
4511                                                      icrl & KVM_APIC_SHORT_MASK,
4512                                                      GET_APIC_DEST_FIELD(icrh),
4513                                                      icrl & KVM_APIC_DEST_MASK);
4514
4515                         if (m && !avic_vcpu_is_running(vcpu))
4516                                 kvm_vcpu_wake_up(vcpu);
4517                 }
4518                 break;
4519         }
4520         case AVIC_IPI_FAILURE_INVALID_TARGET:
4521                 WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
4522                           index, svm->vcpu.vcpu_id, icrh, icrl);
4523                 break;
4524         case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4525                 WARN_ONCE(1, "Invalid backing page\n");
4526                 break;
4527         default:
4528                 pr_err("Unknown IPI interception\n");
4529         }
4530
4531         return 1;
4532 }
4533
4534 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
4535 {
4536         struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
4537         int index;
4538         u32 *logical_apic_id_table;
4539         int dlid = GET_APIC_LOGICAL_ID(ldr);
4540
4541         if (!dlid)
4542                 return NULL;
4543
4544         if (flat) { /* flat */
4545                 index = ffs(dlid) - 1;
4546                 if (index > 7)
4547                         return NULL;
4548         } else { /* cluster */
4549                 int cluster = (dlid & 0xf0) >> 4;
4550                 int apic = ffs(dlid & 0x0f) - 1;
4551
4552                 if ((apic < 0) || (apic > 7) ||
4553                     (cluster >= 0xf))
4554                         return NULL;
4555                 index = (cluster << 2) + apic;
4556         }
4557
4558         logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
4559
4560         return &logical_apic_id_table[index];
4561 }
4562
4563 static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
4564 {
4565         bool flat;
4566         u32 *entry, new_entry;
4567
4568         flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
4569         entry = avic_get_logical_id_entry(vcpu, ldr, flat);
4570         if (!entry)
4571                 return -EINVAL;
4572
4573         new_entry = READ_ONCE(*entry);
4574         new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
4575         new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
4576         new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4577         WRITE_ONCE(*entry, new_entry);
4578
4579         return 0;
4580 }
4581
4582 static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
4583 {
4584         struct vcpu_svm *svm = to_svm(vcpu);
4585         bool flat = svm->dfr_reg == APIC_DFR_FLAT;
4586         u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
4587
4588         if (entry)
4589                 clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
4590 }
4591
4592 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
4593 {
4594         int ret = 0;
4595         struct vcpu_svm *svm = to_svm(vcpu);
4596         u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
4597
4598         if (ldr == svm->ldr_reg)
4599                 return 0;
4600
4601         avic_invalidate_logical_id_entry(vcpu);
4602
4603         if (ldr)
4604                 ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr);
4605
4606         if (!ret)
4607                 svm->ldr_reg = ldr;
4608
4609         return ret;
4610 }
4611
4612 static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
4613 {
4614         u64 *old, *new;
4615         struct vcpu_svm *svm = to_svm(vcpu);
4616         u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
4617         u32 id = (apic_id_reg >> 24) & 0xff;
4618
4619         if (vcpu->vcpu_id == id)
4620                 return 0;
4621
4622         old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
4623         new = avic_get_physical_id_entry(vcpu, id);
4624         if (!new || !old)
4625                 return 1;
4626
4627         /* We need to move physical_id_entry to new offset */
4628         *new = *old;
4629         *old = 0ULL;
4630         to_svm(vcpu)->avic_physical_id_cache = new;
4631
4632         /*
4633          * Also update the guest physical APIC ID in the logical
4634          * APIC ID table entry if already setup the LDR.
4635          */
4636         if (svm->ldr_reg)
4637                 avic_handle_ldr_update(vcpu);
4638
4639         return 0;
4640 }
4641
4642 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
4643 {
4644         struct vcpu_svm *svm = to_svm(vcpu);
4645         u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
4646
4647         if (svm->dfr_reg == dfr)
4648                 return;
4649
4650         avic_invalidate_logical_id_entry(vcpu);
4651         svm->dfr_reg = dfr;
4652 }
4653
4654 static int avic_unaccel_trap_write(struct vcpu_svm *svm)
4655 {
4656         struct kvm_lapic *apic = svm->vcpu.arch.apic;
4657         u32 offset = svm->vmcb->control.exit_info_1 &
4658                                 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4659
4660         switch (offset) {
4661         case APIC_ID:
4662                 if (avic_handle_apic_id_update(&svm->vcpu))
4663                         return 0;
4664                 break;
4665         case APIC_LDR:
4666                 if (avic_handle_ldr_update(&svm->vcpu))
4667                         return 0;
4668                 break;
4669         case APIC_DFR:
4670                 avic_handle_dfr_update(&svm->vcpu);
4671                 break;
4672         default:
4673                 break;
4674         }
4675
4676         kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
4677
4678         return 1;
4679 }
4680
4681 static bool is_avic_unaccelerated_access_trap(u32 offset)
4682 {
4683         bool ret = false;
4684
4685         switch (offset) {
4686         case APIC_ID:
4687         case APIC_EOI:
4688         case APIC_RRR:
4689         case APIC_LDR:
4690         case APIC_DFR:
4691         case APIC_SPIV:
4692         case APIC_ESR:
4693         case APIC_ICR:
4694         case APIC_LVTT:
4695         case APIC_LVTTHMR:
4696         case APIC_LVTPC:
4697         case APIC_LVT0:
4698         case APIC_LVT1:
4699         case APIC_LVTERR:
4700         case APIC_TMICT:
4701         case APIC_TDCR:
4702                 ret = true;
4703                 break;
4704         default:
4705                 break;
4706         }
4707         return ret;
4708 }
4709
4710 static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4711 {
4712         int ret = 0;
4713         u32 offset = svm->vmcb->control.exit_info_1 &
4714                      AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4715         u32 vector = svm->vmcb->control.exit_info_2 &
4716                      AVIC_UNACCEL_ACCESS_VECTOR_MASK;
4717         bool write = (svm->vmcb->control.exit_info_1 >> 32) &
4718                      AVIC_UNACCEL_ACCESS_WRITE_MASK;
4719         bool trap = is_avic_unaccelerated_access_trap(offset);
4720
4721         trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
4722                                             trap, write, vector);
4723         if (trap) {
4724                 /* Handling Trap */
4725                 WARN_ONCE(!write, "svm: Handling trap read.\n");
4726                 ret = avic_unaccel_trap_write(svm);
4727         } else {
4728                 /* Handling Fault */
4729                 ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
4730         }
4731
4732         return ret;
4733 }
4734
4735 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
4736         [SVM_EXIT_READ_CR0]                     = cr_interception,
4737         [SVM_EXIT_READ_CR3]                     = cr_interception,
4738         [SVM_EXIT_READ_CR4]                     = cr_interception,
4739         [SVM_EXIT_READ_CR8]                     = cr_interception,
4740         [SVM_EXIT_CR0_SEL_WRITE]                = cr_interception,
4741         [SVM_EXIT_WRITE_CR0]                    = cr_interception,
4742         [SVM_EXIT_WRITE_CR3]                    = cr_interception,
4743         [SVM_EXIT_WRITE_CR4]                    = cr_interception,
4744         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
4745         [SVM_EXIT_READ_DR0]                     = dr_interception,
4746         [SVM_EXIT_READ_DR1]                     = dr_interception,
4747         [SVM_EXIT_READ_DR2]                     = dr_interception,
4748         [SVM_EXIT_READ_DR3]                     = dr_interception,
4749         [SVM_EXIT_READ_DR4]                     = dr_interception,
4750         [SVM_EXIT_READ_DR5]                     = dr_interception,
4751         [SVM_EXIT_READ_DR6]                     = dr_interception,
4752         [SVM_EXIT_READ_DR7]                     = dr_interception,
4753         [SVM_EXIT_WRITE_DR0]                    = dr_interception,
4754         [SVM_EXIT_WRITE_DR1]                    = dr_interception,
4755         [SVM_EXIT_WRITE_DR2]                    = dr_interception,
4756         [SVM_EXIT_WRITE_DR3]                    = dr_interception,
4757         [SVM_EXIT_WRITE_DR4]                    = dr_interception,
4758         [SVM_EXIT_WRITE_DR5]                    = dr_interception,
4759         [SVM_EXIT_WRITE_DR6]                    = dr_interception,
4760         [SVM_EXIT_WRITE_DR7]                    = dr_interception,
4761         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
4762         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
4763         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
4764         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
4765         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
4766         [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
4767         [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
4768         [SVM_EXIT_INTR]                         = intr_interception,
4769         [SVM_EXIT_NMI]                          = nmi_interception,
4770         [SVM_EXIT_SMI]                          = nop_on_interception,
4771         [SVM_EXIT_INIT]                         = nop_on_interception,
4772         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
4773         [SVM_EXIT_RDPMC]                        = rdpmc_interception,
4774         [SVM_EXIT_CPUID]                        = cpuid_interception,
4775         [SVM_EXIT_IRET]                         = iret_interception,
4776         [SVM_EXIT_INVD]                         = emulate_on_interception,
4777         [SVM_EXIT_PAUSE]                        = pause_interception,
4778         [SVM_EXIT_HLT]                          = halt_interception,
4779         [SVM_EXIT_INVLPG]                       = invlpg_interception,
4780         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
4781         [SVM_EXIT_IOIO]                         = io_interception,
4782         [SVM_EXIT_MSR]                          = msr_interception,
4783         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
4784         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
4785         [SVM_EXIT_VMRUN]                        = vmrun_interception,
4786         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
4787         [SVM_EXIT_VMLOAD]                       = vmload_interception,
4788         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
4789         [SVM_EXIT_STGI]                         = stgi_interception,
4790         [SVM_EXIT_CLGI]                         = clgi_interception,
4791         [SVM_EXIT_SKINIT]                       = skinit_interception,
4792         [SVM_EXIT_WBINVD]                       = wbinvd_interception,
4793         [SVM_EXIT_MONITOR]                      = monitor_interception,
4794         [SVM_EXIT_MWAIT]                        = mwait_interception,
4795         [SVM_EXIT_XSETBV]                       = xsetbv_interception,
4796         [SVM_EXIT_NPF]                          = npf_interception,
4797         [SVM_EXIT_RSM]                          = rsm_interception,
4798         [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
4799         [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
4800 };
4801
4802 static void dump_vmcb(struct kvm_vcpu *vcpu)
4803 {
4804         struct vcpu_svm *svm = to_svm(vcpu);
4805         struct vmcb_control_area *control = &svm->vmcb->control;
4806         struct vmcb_save_area *save = &svm->vmcb->save;
4807
4808         if (!dump_invalid_vmcb) {
4809                 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
4810                 return;
4811         }
4812
4813         pr_err("VMCB Control Area:\n");
4814         pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4815         pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4816         pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4817         pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4818         pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4819         pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4820         pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4821         pr_err("%-20s%d\n", "pause filter threshold:",
4822                control->pause_filter_thresh);
4823         pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4824         pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4825         pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4826         pr_err("%-20s%d\n", "asid:", control->asid);
4827         pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4828         pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4829         pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4830         pr_err("%-20s%08x\n", "int_state:", control->int_state);
4831         pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4832         pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4833         pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4834         pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4835         pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4836         pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4837         pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
4838         pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
4839         pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4840         pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
4841         pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
4842         pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
4843         pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4844         pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4845         pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
4846         pr_err("VMCB State Save Area:\n");
4847         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4848                "es:",
4849                save->es.selector, save->es.attrib,
4850                save->es.limit, save->es.base);
4851         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4852                "cs:",
4853                save->cs.selector, save->cs.attrib,
4854                save->cs.limit, save->cs.base);
4855         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4856                "ss:",
4857                save->ss.selector, save->ss.attrib,
4858                save->ss.limit, save->ss.base);
4859         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4860                "ds:",
4861                save->ds.selector, save->ds.attrib,
4862                save->ds.limit, save->ds.base);
4863         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4864                "fs:",
4865                save->fs.selector, save->fs.attrib,
4866                save->fs.limit, save->fs.base);
4867         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4868                "gs:",
4869                save->gs.selector, save->gs.attrib,
4870                save->gs.limit, save->gs.base);
4871         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4872                "gdtr:",
4873                save->gdtr.selector, save->gdtr.attrib,
4874                save->gdtr.limit, save->gdtr.base);
4875         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4876                "ldtr:",
4877                save->ldtr.selector, save->ldtr.attrib,
4878                save->ldtr.limit, save->ldtr.base);
4879         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4880                "idtr:",
4881                save->idtr.selector, save->idtr.attrib,
4882                save->idtr.limit, save->idtr.base);
4883         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4884                "tr:",
4885                save->tr.selector, save->tr.attrib,
4886                save->tr.limit, save->tr.base);
4887         pr_err("cpl:            %d                efer:         %016llx\n",
4888                 save->cpl, save->efer);
4889         pr_err("%-15s %016llx %-13s %016llx\n",
4890                "cr0:", save->cr0, "cr2:", save->cr2);
4891         pr_err("%-15s %016llx %-13s %016llx\n",
4892                "cr3:", save->cr3, "cr4:", save->cr4);
4893         pr_err("%-15s %016llx %-13s %016llx\n",
4894                "dr6:", save->dr6, "dr7:", save->dr7);
4895         pr_err("%-15s %016llx %-13s %016llx\n",
4896                "rip:", save->rip, "rflags:", save->rflags);
4897         pr_err("%-15s %016llx %-13s %016llx\n",
4898                "rsp:", save->rsp, "rax:", save->rax);
4899         pr_err("%-15s %016llx %-13s %016llx\n",
4900                "star:", save->star, "lstar:", save->lstar);
4901         pr_err("%-15s %016llx %-13s %016llx\n",
4902                "cstar:", save->cstar, "sfmask:", save->sfmask);
4903         pr_err("%-15s %016llx %-13s %016llx\n",
4904                "kernel_gs_base:", save->kernel_gs_base,
4905                "sysenter_cs:", save->sysenter_cs);
4906         pr_err("%-15s %016llx %-13s %016llx\n",
4907                "sysenter_esp:", save->sysenter_esp,
4908                "sysenter_eip:", save->sysenter_eip);
4909         pr_err("%-15s %016llx %-13s %016llx\n",
4910                "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4911         pr_err("%-15s %016llx %-13s %016llx\n",
4912                "br_from:", save->br_from, "br_to:", save->br_to);
4913         pr_err("%-15s %016llx %-13s %016llx\n",
4914                "excp_from:", save->last_excp_from,
4915                "excp_to:", save->last_excp_to);
4916 }
4917
4918 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4919 {
4920         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4921
4922         *info1 = control->exit_info_1;
4923         *info2 = control->exit_info_2;
4924 }
4925
4926 static int handle_exit(struct kvm_vcpu *vcpu)
4927 {
4928         struct vcpu_svm *svm = to_svm(vcpu);
4929         struct kvm_run *kvm_run = vcpu->run;
4930         u32 exit_code = svm->vmcb->control.exit_code;
4931
4932         trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4933
4934         if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
4935                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4936         if (npt_enabled)
4937                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
4938
4939         if (unlikely(svm->nested.exit_required)) {
4940                 nested_svm_vmexit(svm);
4941                 svm->nested.exit_required = false;
4942
4943                 return 1;
4944         }
4945
4946         if (is_guest_mode(vcpu)) {
4947                 int vmexit;
4948
4949                 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4950                                         svm->vmcb->control.exit_info_1,
4951                                         svm->vmcb->control.exit_info_2,
4952                                         svm->vmcb->control.exit_int_info,
4953                                         svm->vmcb->control.exit_int_info_err,
4954                                         KVM_ISA_SVM);
4955
4956                 vmexit = nested_svm_exit_special(svm);
4957
4958                 if (vmexit == NESTED_EXIT_CONTINUE)
4959                         vmexit = nested_svm_exit_handled(svm);
4960
4961                 if (vmexit == NESTED_EXIT_DONE)
4962                         return 1;
4963         }
4964
4965         svm_complete_interrupts(svm);
4966
4967         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4968                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4969                 kvm_run->fail_entry.hardware_entry_failure_reason
4970                         = svm->vmcb->control.exit_code;
4971                 dump_vmcb(vcpu);
4972                 return 0;
4973         }
4974
4975         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
4976             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
4977             exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4978             exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
4979                 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
4980                        "exit_code 0x%x\n",
4981                        __func__, svm->vmcb->control.exit_int_info,
4982                        exit_code);
4983
4984         if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
4985             || !svm_exit_handlers[exit_code]) {
4986                 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
4987                 dump_vmcb(vcpu);
4988                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4989                 vcpu->run->internal.suberror =
4990                         KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
4991                 vcpu->run->internal.ndata = 1;
4992                 vcpu->run->internal.data[0] = exit_code;
4993                 return 0;
4994         }
4995
4996         return svm_exit_handlers[exit_code](svm);
4997 }
4998
4999 static void reload_tss(struct kvm_vcpu *vcpu)
5000 {
5001         int cpu = raw_smp_processor_id();
5002
5003         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5004         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
5005         load_TR_desc();
5006 }
5007
5008 static void pre_sev_run(struct vcpu_svm *svm, int cpu)
5009 {
5010         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5011         int asid = sev_get_asid(svm->vcpu.kvm);
5012
5013         /* Assign the asid allocated with this SEV guest */
5014         svm->vmcb->control.asid = asid;
5015
5016         /*
5017          * Flush guest TLB:
5018          *
5019          * 1) when different VMCB for the same ASID is to be run on the same host CPU.
5020          * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
5021          */
5022         if (sd->sev_vmcbs[asid] == svm->vmcb &&
5023             svm->last_cpu == cpu)
5024                 return;
5025
5026         svm->last_cpu = cpu;
5027         sd->sev_vmcbs[asid] = svm->vmcb;
5028         svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5029         mark_dirty(svm->vmcb, VMCB_ASID);
5030 }
5031
5032 static void pre_svm_run(struct vcpu_svm *svm)
5033 {
5034         int cpu = raw_smp_processor_id();
5035
5036         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5037
5038         if (sev_guest(svm->vcpu.kvm))
5039                 return pre_sev_run(svm, cpu);
5040
5041         /* FIXME: handle wraparound of asid_generation */
5042         if (svm->asid_generation != sd->asid_generation)
5043                 new_asid(svm, sd);
5044 }
5045
5046 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
5047 {
5048         struct vcpu_svm *svm = to_svm(vcpu);
5049
5050         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
5051         vcpu->arch.hflags |= HF_NMI_MASK;
5052         set_intercept(svm, INTERCEPT_IRET);
5053         ++vcpu->stat.nmi_injections;
5054 }
5055
5056 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
5057 {
5058         struct vmcb_control_area *control;
5059
5060         /* The following fields are ignored when AVIC is enabled */
5061         control = &svm->vmcb->control;
5062         control->int_vector = irq;
5063         control->int_ctl &= ~V_INTR_PRIO_MASK;
5064         control->int_ctl |= V_IRQ_MASK |
5065                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
5066         mark_dirty(svm->vmcb, VMCB_INTR);
5067 }
5068
5069 static void svm_set_irq(struct kvm_vcpu *vcpu)
5070 {
5071         struct vcpu_svm *svm = to_svm(vcpu);
5072
5073         BUG_ON(!(gif_set(svm)));
5074
5075         trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
5076         ++vcpu->stat.irq_injections;
5077
5078         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
5079                 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
5080 }
5081
5082 static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
5083 {
5084         return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
5085 }
5086
5087 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
5088 {
5089         struct vcpu_svm *svm = to_svm(vcpu);
5090
5091         if (svm_nested_virtualize_tpr(vcpu) ||
5092             kvm_vcpu_apicv_active(vcpu))
5093                 return;
5094
5095         clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
5096
5097         if (irr == -1)
5098                 return;
5099
5100         if (tpr >= irr)
5101                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
5102 }
5103
5104 static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
5105 {
5106         return;
5107 }
5108
5109 static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
5110 {
5111         return avic && irqchip_split(vcpu->kvm);
5112 }
5113
5114 static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
5115 {
5116 }
5117
5118 static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
5119 {
5120 }
5121
5122 /* Note: Currently only used by Hyper-V. */
5123 static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
5124 {
5125         struct vcpu_svm *svm = to_svm(vcpu);
5126         struct vmcb *vmcb = svm->vmcb;
5127
5128         if (kvm_vcpu_apicv_active(vcpu))
5129                 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
5130         else
5131                 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
5132         mark_dirty(vmcb, VMCB_AVIC);
5133 }
5134
5135 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
5136 {
5137         return;
5138 }
5139
5140 static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
5141 {
5142         kvm_lapic_set_irr(vec, vcpu->arch.apic);
5143         smp_mb__after_atomic();
5144
5145         if (avic_vcpu_is_running(vcpu)) {
5146                 int cpuid = vcpu->cpu;
5147
5148                 if (cpuid != get_cpu())
5149                         wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid));
5150                 put_cpu();
5151         } else
5152                 kvm_vcpu_wake_up(vcpu);
5153 }
5154
5155 static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
5156 {
5157         return false;
5158 }
5159
5160 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
5161 {
5162         unsigned long flags;
5163         struct amd_svm_iommu_ir *cur;
5164
5165         spin_lock_irqsave(&svm->ir_list_lock, flags);
5166         list_for_each_entry(cur, &svm->ir_list, node) {
5167                 if (cur->data != pi->ir_data)
5168                         continue;
5169                 list_del(&cur->node);
5170                 kfree(cur);
5171                 break;
5172         }
5173         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
5174 }
5175
5176 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
5177 {
5178         int ret = 0;
5179         unsigned long flags;
5180         struct amd_svm_iommu_ir *ir;
5181
5182         /**
5183          * In some cases, the existing irte is updaed and re-set,
5184          * so we need to check here if it's already been * added
5185          * to the ir_list.
5186          */
5187         if (pi->ir_data && (pi->prev_ga_tag != 0)) {
5188                 struct kvm *kvm = svm->vcpu.kvm;
5189                 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
5190                 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
5191                 struct vcpu_svm *prev_svm;
5192
5193                 if (!prev_vcpu) {
5194                         ret = -EINVAL;
5195                         goto out;
5196                 }
5197
5198                 prev_svm = to_svm(prev_vcpu);
5199                 svm_ir_list_del(prev_svm, pi);
5200         }
5201
5202         /**
5203          * Allocating new amd_iommu_pi_data, which will get
5204          * add to the per-vcpu ir_list.
5205          */
5206         ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
5207         if (!ir) {
5208                 ret = -ENOMEM;
5209                 goto out;
5210         }
5211         ir->data = pi->ir_data;
5212
5213         spin_lock_irqsave(&svm->ir_list_lock, flags);
5214         list_add(&ir->node, &svm->ir_list);
5215         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
5216 out:
5217         return ret;
5218 }
5219
5220 /**
5221  * Note:
5222  * The HW cannot support posting multicast/broadcast
5223  * interrupts to a vCPU. So, we still use legacy interrupt
5224  * remapping for these kind of interrupts.
5225  *
5226  * For lowest-priority interrupts, we only support
5227  * those with single CPU as the destination, e.g. user
5228  * configures the interrupts via /proc/irq or uses
5229  * irqbalance to make the interrupts single-CPU.
5230  */
5231 static int
5232 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
5233                  struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
5234 {
5235         struct kvm_lapic_irq irq;
5236         struct kvm_vcpu *vcpu = NULL;
5237
5238         kvm_set_msi_irq(kvm, e, &irq);
5239
5240         if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
5241             !kvm_irq_is_postable(&irq)) {
5242                 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
5243                          __func__, irq.vector);
5244                 return -1;
5245         }
5246
5247         pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
5248                  irq.vector);
5249         *svm = to_svm(vcpu);
5250         vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
5251         vcpu_info->vector = irq.vector;
5252
5253         return 0;
5254 }
5255
5256 /*
5257  * svm_update_pi_irte - set IRTE for Posted-Interrupts
5258  *
5259  * @kvm: kvm
5260  * @host_irq: host irq of the interrupt
5261  * @guest_irq: gsi of the interrupt
5262  * @set: set or unset PI
5263  * returns 0 on success, < 0 on failure
5264  */
5265 static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
5266                               uint32_t guest_irq, bool set)
5267 {
5268         struct kvm_kernel_irq_routing_entry *e;
5269         struct kvm_irq_routing_table *irq_rt;
5270         int idx, ret = -EINVAL;
5271
5272         if (!kvm_arch_has_assigned_device(kvm) ||
5273             !irq_remapping_cap(IRQ_POSTING_CAP))
5274                 return 0;
5275
5276         pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
5277                  __func__, host_irq, guest_irq, set);
5278
5279         idx = srcu_read_lock(&kvm->irq_srcu);
5280         irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
5281         WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
5282
5283         hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
5284                 struct vcpu_data vcpu_info;
5285                 struct vcpu_svm *svm = NULL;
5286
5287                 if (e->type != KVM_IRQ_ROUTING_MSI)
5288                         continue;
5289
5290                 /**
5291                  * Here, we setup with legacy mode in the following cases:
5292                  * 1. When cannot target interrupt to a specific vcpu.
5293                  * 2. Unsetting posted interrupt.
5294                  * 3. APIC virtialization is disabled for the vcpu.
5295                  * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
5296                  */
5297                 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
5298                     kvm_vcpu_apicv_active(&svm->vcpu)) {
5299                         struct amd_iommu_pi_data pi;
5300
5301                         /* Try to enable guest_mode in IRTE */
5302                         pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
5303                                             AVIC_HPA_MASK);
5304                         pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
5305                                                      svm->vcpu.vcpu_id);
5306                         pi.is_guest_mode = true;
5307                         pi.vcpu_data = &vcpu_info;
5308                         ret = irq_set_vcpu_affinity(host_irq, &pi);
5309
5310                         /**
5311                          * Here, we successfully setting up vcpu affinity in
5312                          * IOMMU guest mode. Now, we need to store the posted
5313                          * interrupt information in a per-vcpu ir_list so that
5314                          * we can reference to them directly when we update vcpu
5315                          * scheduling information in IOMMU irte.
5316                          */
5317                         if (!ret && pi.is_guest_mode)
5318                                 svm_ir_list_add(svm, &pi);
5319                 } else {
5320                         /* Use legacy mode in IRTE */
5321                         struct amd_iommu_pi_data pi;
5322
5323                         /**
5324                          * Here, pi is used to:
5325                          * - Tell IOMMU to use legacy mode for this interrupt.
5326                          * - Retrieve ga_tag of prior interrupt remapping data.
5327                          */
5328                         pi.is_guest_mode = false;
5329                         ret = irq_set_vcpu_affinity(host_irq, &pi);
5330
5331                         /**
5332                          * Check if the posted interrupt was previously
5333                          * setup with the guest_mode by checking if the ga_tag
5334                          * was cached. If so, we need to clean up the per-vcpu
5335                          * ir_list.
5336                          */
5337                         if (!ret && pi.prev_ga_tag) {
5338                                 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
5339                                 struct kvm_vcpu *vcpu;
5340
5341                                 vcpu = kvm_get_vcpu_by_id(kvm, id);
5342                                 if (vcpu)
5343                                         svm_ir_list_del(to_svm(vcpu), &pi);
5344                         }
5345                 }
5346
5347                 if (!ret && svm) {
5348                         trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
5349                                                  e->gsi, vcpu_info.vector,
5350                                                  vcpu_info.pi_desc_addr, set);
5351                 }
5352
5353                 if (ret < 0) {
5354                         pr_err("%s: failed to update PI IRTE\n", __func__);
5355                         goto out;
5356                 }
5357         }
5358
5359         ret = 0;
5360 out:
5361         srcu_read_unlock(&kvm->irq_srcu, idx);
5362         return ret;
5363 }
5364
5365 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
5366 {
5367         struct vcpu_svm *svm = to_svm(vcpu);
5368         struct vmcb *vmcb = svm->vmcb;
5369         int ret;
5370         ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
5371               !(svm->vcpu.arch.hflags & HF_NMI_MASK);
5372         ret = ret && gif_set(svm) && nested_svm_nmi(svm);
5373
5374         return ret;
5375 }
5376
5377 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
5378 {
5379         struct vcpu_svm *svm = to_svm(vcpu);
5380
5381         return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
5382 }
5383
5384 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5385 {
5386         struct vcpu_svm *svm = to_svm(vcpu);
5387
5388         if (masked) {
5389                 svm->vcpu.arch.hflags |= HF_NMI_MASK;
5390                 set_intercept(svm, INTERCEPT_IRET);
5391         } else {
5392                 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
5393                 clr_intercept(svm, INTERCEPT_IRET);
5394         }
5395 }
5396
5397 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
5398 {
5399         struct vcpu_svm *svm = to_svm(vcpu);
5400         struct vmcb *vmcb = svm->vmcb;
5401         int ret;
5402
5403         if (!gif_set(svm) ||
5404              (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
5405                 return 0;
5406
5407         ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
5408
5409         if (is_guest_mode(vcpu))
5410                 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
5411
5412         return ret;
5413 }
5414
5415 static void enable_irq_window(struct kvm_vcpu *vcpu)
5416 {
5417         struct vcpu_svm *svm = to_svm(vcpu);
5418
5419         if (kvm_vcpu_apicv_active(vcpu))
5420                 return;
5421
5422         /*
5423          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
5424          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
5425          * get that intercept, this function will be called again though and
5426          * we'll get the vintr intercept. However, if the vGIF feature is
5427          * enabled, the STGI interception will not occur. Enable the irq
5428          * window under the assumption that the hardware will set the GIF.
5429          */
5430         if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
5431                 svm_set_vintr(svm);
5432                 svm_inject_irq(svm, 0x0);
5433         }
5434 }
5435
5436 static void enable_nmi_window(struct kvm_vcpu *vcpu)
5437 {
5438         struct vcpu_svm *svm = to_svm(vcpu);
5439
5440         if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
5441             == HF_NMI_MASK)
5442                 return; /* IRET will cause a vm exit */
5443
5444         if (!gif_set(svm)) {
5445                 if (vgif_enabled(svm))
5446                         set_intercept(svm, INTERCEPT_STGI);
5447                 return; /* STGI will cause a vm exit */
5448         }
5449
5450         if (svm->nested.exit_required)
5451                 return; /* we're not going to run the guest yet */
5452
5453         /*
5454          * Something prevents NMI from been injected. Single step over possible
5455          * problem (IRET or exception injection or interrupt shadow)
5456          */
5457         svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
5458         svm->nmi_singlestep = true;
5459         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
5460 }
5461
5462 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
5463 {
5464         return 0;
5465 }
5466
5467 static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
5468 {
5469         return 0;
5470 }
5471
5472 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
5473 {
5474         struct vcpu_svm *svm = to_svm(vcpu);
5475
5476         if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
5477                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5478         else
5479                 svm->asid_generation--;
5480 }
5481
5482 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
5483 {
5484         struct vcpu_svm *svm = to_svm(vcpu);
5485
5486         invlpga(gva, svm->vmcb->control.asid);
5487 }
5488
5489 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
5490 {
5491 }
5492
5493 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
5494 {
5495         struct vcpu_svm *svm = to_svm(vcpu);
5496
5497         if (svm_nested_virtualize_tpr(vcpu))
5498                 return;
5499
5500         if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
5501                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
5502                 kvm_set_cr8(vcpu, cr8);
5503         }
5504 }
5505
5506 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
5507 {
5508         struct vcpu_svm *svm = to_svm(vcpu);
5509         u64 cr8;
5510
5511         if (svm_nested_virtualize_tpr(vcpu) ||
5512             kvm_vcpu_apicv_active(vcpu))
5513                 return;
5514
5515         cr8 = kvm_get_cr8(vcpu);
5516         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
5517         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
5518 }
5519
5520 static void svm_complete_interrupts(struct vcpu_svm *svm)
5521 {
5522         u8 vector;
5523         int type;
5524         u32 exitintinfo = svm->vmcb->control.exit_int_info;
5525         unsigned int3_injected = svm->int3_injected;
5526
5527         svm->int3_injected = 0;
5528
5529         /*
5530          * If we've made progress since setting HF_IRET_MASK, we've
5531          * executed an IRET and can allow NMI injection.
5532          */
5533         if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
5534             && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
5535                 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
5536                 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5537         }
5538
5539         svm->vcpu.arch.nmi_injected = false;
5540         kvm_clear_exception_queue(&svm->vcpu);
5541         kvm_clear_interrupt_queue(&svm->vcpu);
5542
5543         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
5544                 return;
5545
5546         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5547
5548         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
5549         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
5550
5551         switch (type) {
5552         case SVM_EXITINTINFO_TYPE_NMI:
5553                 svm->vcpu.arch.nmi_injected = true;
5554                 break;
5555         case SVM_EXITINTINFO_TYPE_EXEPT:
5556                 /*
5557                  * In case of software exceptions, do not reinject the vector,
5558                  * but re-execute the instruction instead. Rewind RIP first
5559                  * if we emulated INT3 before.
5560                  */
5561                 if (kvm_exception_is_soft(vector)) {
5562                         if (vector == BP_VECTOR && int3_injected &&
5563                             kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
5564                                 kvm_rip_write(&svm->vcpu,
5565                                               kvm_rip_read(&svm->vcpu) -
5566                                               int3_injected);
5567                         break;
5568                 }
5569                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
5570                         u32 err = svm->vmcb->control.exit_int_info_err;
5571                         kvm_requeue_exception_e(&svm->vcpu, vector, err);
5572
5573                 } else
5574                         kvm_requeue_exception(&svm->vcpu, vector);
5575                 break;
5576         case SVM_EXITINTINFO_TYPE_INTR:
5577                 kvm_queue_interrupt(&svm->vcpu, vector, false);
5578                 break;
5579         default:
5580                 break;
5581         }
5582 }
5583
5584 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
5585 {
5586         struct vcpu_svm *svm = to_svm(vcpu);
5587         struct vmcb_control_area *control = &svm->vmcb->control;
5588
5589         control->exit_int_info = control->event_inj;
5590         control->exit_int_info_err = control->event_inj_err;
5591         control->event_inj = 0;
5592         svm_complete_interrupts(svm);
5593 }
5594
5595 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5596 {
5597         struct vcpu_svm *svm = to_svm(vcpu);
5598
5599         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5600         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5601         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5602
5603         /*
5604          * A vmexit emulation is required before the vcpu can be executed
5605          * again.
5606          */
5607         if (unlikely(svm->nested.exit_required))
5608                 return;
5609
5610         /*
5611          * Disable singlestep if we're injecting an interrupt/exception.
5612          * We don't want our modified rflags to be pushed on the stack where
5613          * we might not be able to easily reset them if we disabled NMI
5614          * singlestep later.
5615          */
5616         if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
5617                 /*
5618                  * Event injection happens before external interrupts cause a
5619                  * vmexit and interrupts are disabled here, so smp_send_reschedule
5620                  * is enough to force an immediate vmexit.
5621                  */
5622                 disable_nmi_singlestep(svm);
5623                 smp_send_reschedule(vcpu->cpu);
5624         }
5625
5626         pre_svm_run(svm);
5627
5628         sync_lapic_to_cr8(vcpu);
5629
5630         svm->vmcb->save.cr2 = vcpu->arch.cr2;
5631
5632         clgi();
5633         kvm_load_guest_xcr0(vcpu);
5634
5635         if (lapic_in_kernel(vcpu) &&
5636                 vcpu->arch.apic->lapic_timer.timer_advance_ns)
5637                 kvm_wait_lapic_expire(vcpu);
5638
5639         /*
5640          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
5641          * it's non-zero. Since vmentry is serialising on affected CPUs, there
5642          * is no need to worry about the conditional branch over the wrmsr
5643          * being speculatively taken.
5644          */
5645         x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
5646
5647         local_irq_enable();
5648
5649         asm volatile (
5650                 "push %%" _ASM_BP "; \n\t"
5651                 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
5652                 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
5653                 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
5654                 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
5655                 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
5656                 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
5657 #ifdef CONFIG_X86_64
5658                 "mov %c[r8](%[svm]),  %%r8  \n\t"
5659                 "mov %c[r9](%[svm]),  %%r9  \n\t"
5660                 "mov %c[r10](%[svm]), %%r10 \n\t"
5661                 "mov %c[r11](%[svm]), %%r11 \n\t"
5662                 "mov %c[r12](%[svm]), %%r12 \n\t"
5663                 "mov %c[r13](%[svm]), %%r13 \n\t"
5664                 "mov %c[r14](%[svm]), %%r14 \n\t"
5665                 "mov %c[r15](%[svm]), %%r15 \n\t"
5666 #endif
5667
5668                 /* Enter guest mode */
5669                 "push %%" _ASM_AX " \n\t"
5670                 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
5671                 __ex("vmload %%" _ASM_AX) "\n\t"
5672                 __ex("vmrun %%" _ASM_AX) "\n\t"
5673                 __ex("vmsave %%" _ASM_AX) "\n\t"
5674                 "pop %%" _ASM_AX " \n\t"
5675
5676                 /* Save guest registers, load host registers */
5677                 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
5678                 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
5679                 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
5680                 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
5681                 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
5682                 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
5683 #ifdef CONFIG_X86_64
5684                 "mov %%r8,  %c[r8](%[svm]) \n\t"
5685                 "mov %%r9,  %c[r9](%[svm]) \n\t"
5686                 "mov %%r10, %c[r10](%[svm]) \n\t"
5687                 "mov %%r11, %c[r11](%[svm]) \n\t"
5688                 "mov %%r12, %c[r12](%[svm]) \n\t"
5689                 "mov %%r13, %c[r13](%[svm]) \n\t"
5690                 "mov %%r14, %c[r14](%[svm]) \n\t"
5691                 "mov %%r15, %c[r15](%[svm]) \n\t"
5692                 /*
5693                 * Clear host registers marked as clobbered to prevent
5694                 * speculative use.
5695                 */
5696                 "xor %%r8d, %%r8d \n\t"
5697                 "xor %%r9d, %%r9d \n\t"
5698                 "xor %%r10d, %%r10d \n\t"
5699                 "xor %%r11d, %%r11d \n\t"
5700                 "xor %%r12d, %%r12d \n\t"
5701                 "xor %%r13d, %%r13d \n\t"
5702                 "xor %%r14d, %%r14d \n\t"
5703                 "xor %%r15d, %%r15d \n\t"
5704 #endif
5705                 "xor %%ebx, %%ebx \n\t"
5706                 "xor %%ecx, %%ecx \n\t"
5707                 "xor %%edx, %%edx \n\t"
5708                 "xor %%esi, %%esi \n\t"
5709                 "xor %%edi, %%edi \n\t"
5710                 "pop %%" _ASM_BP
5711                 :
5712                 : [svm]"a"(svm),
5713                   [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
5714                   [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
5715                   [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
5716                   [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
5717                   [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
5718                   [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
5719                   [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
5720 #ifdef CONFIG_X86_64
5721                   , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
5722                   [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
5723                   [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
5724                   [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
5725                   [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
5726                   [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
5727                   [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
5728                   [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
5729 #endif
5730                 : "cc", "memory"
5731 #ifdef CONFIG_X86_64
5732                 , "rbx", "rcx", "rdx", "rsi", "rdi"
5733                 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
5734 #else
5735                 , "ebx", "ecx", "edx", "esi", "edi"
5736 #endif
5737                 );
5738
5739         /* Eliminate branch target predictions from guest mode */
5740         vmexit_fill_RSB();
5741
5742 #ifdef CONFIG_X86_64
5743         wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5744 #else
5745         loadsegment(fs, svm->host.fs);
5746 #ifndef CONFIG_X86_32_LAZY_GS
5747         loadsegment(gs, svm->host.gs);
5748 #endif
5749 #endif
5750
5751         /*
5752          * We do not use IBRS in the kernel. If this vCPU has used the
5753          * SPEC_CTRL MSR it may have left it on; save the value and
5754          * turn it off. This is much more efficient than blindly adding
5755          * it to the atomic save/restore list. Especially as the former
5756          * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
5757          *
5758          * For non-nested case:
5759          * If the L01 MSR bitmap does not intercept the MSR, then we need to
5760          * save it.
5761          *
5762          * For nested case:
5763          * If the L02 MSR bitmap does not intercept the MSR, then we need to
5764          * save it.
5765          */
5766         if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
5767                 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
5768
5769         reload_tss(vcpu);
5770
5771         local_irq_disable();
5772
5773         x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
5774
5775         vcpu->arch.cr2 = svm->vmcb->save.cr2;
5776         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
5777         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
5778         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
5779
5780         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5781                 kvm_before_interrupt(&svm->vcpu);
5782
5783         kvm_put_guest_xcr0(vcpu);
5784         stgi();
5785
5786         /* Any pending NMI will happen here */
5787
5788         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5789                 kvm_after_interrupt(&svm->vcpu);
5790
5791         sync_cr8_to_lapic(vcpu);
5792
5793         svm->next_rip = 0;
5794
5795         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5796
5797         /* if exit due to PF check for async PF */
5798         if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
5799                 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
5800
5801         if (npt_enabled) {
5802                 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
5803                 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
5804         }
5805
5806         /*
5807          * We need to handle MC intercepts here before the vcpu has a chance to
5808          * change the physical cpu
5809          */
5810         if (unlikely(svm->vmcb->control.exit_code ==
5811                      SVM_EXIT_EXCP_BASE + MC_VECTOR))
5812                 svm_handle_mce(svm);
5813
5814         mark_all_clean(svm->vmcb);
5815 }
5816 STACK_FRAME_NON_STANDARD(svm_vcpu_run);
5817
5818 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5819 {
5820         struct vcpu_svm *svm = to_svm(vcpu);
5821
5822         svm->vmcb->save.cr3 = __sme_set(root);
5823         mark_dirty(svm->vmcb, VMCB_CR);
5824 }
5825
5826 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5827 {
5828         struct vcpu_svm *svm = to_svm(vcpu);
5829
5830         svm->vmcb->control.nested_cr3 = __sme_set(root);
5831         mark_dirty(svm->vmcb, VMCB_NPT);
5832
5833         /* Also sync guest cr3 here in case we live migrate */
5834         svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
5835         mark_dirty(svm->vmcb, VMCB_CR);
5836 }
5837
5838 static int is_disabled(void)
5839 {
5840         u64 vm_cr;
5841
5842         rdmsrl(MSR_VM_CR, vm_cr);
5843         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
5844                 return 1;
5845
5846         return 0;
5847 }
5848
5849 static void
5850 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5851 {
5852         /*
5853          * Patch in the VMMCALL instruction:
5854          */
5855         hypercall[0] = 0x0f;
5856         hypercall[1] = 0x01;
5857         hypercall[2] = 0xd9;
5858 }
5859
5860 static int __init svm_check_processor_compat(void)
5861 {
5862         return 0;
5863 }
5864
5865 static bool svm_cpu_has_accelerated_tpr(void)
5866 {
5867         return false;
5868 }
5869
5870 static bool svm_has_emulated_msr(int index)
5871 {
5872         switch (index) {
5873         case MSR_IA32_MCG_EXT_CTL:
5874         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
5875                 return false;
5876         default:
5877                 break;
5878         }
5879
5880         return true;
5881 }
5882
5883 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5884 {
5885         return 0;
5886 }
5887
5888 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
5889 {
5890         struct vcpu_svm *svm = to_svm(vcpu);
5891
5892         /* Update nrips enabled cache */
5893         svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
5894
5895         if (!kvm_vcpu_apicv_active(vcpu))
5896                 return;
5897
5898         guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
5899 }
5900
5901 #define F(x) bit(X86_FEATURE_##x)
5902
5903 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5904 {
5905         switch (func) {
5906         case 0x1:
5907                 if (avic)
5908                         entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5909                 break;
5910         case 0x80000001:
5911                 if (nested)
5912                         entry->ecx |= (1 << 2); /* Set SVM bit */
5913                 break;
5914         case 0x80000008:
5915                 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
5916                      boot_cpu_has(X86_FEATURE_AMD_SSBD))
5917                         entry->ebx |= F(VIRT_SSBD);
5918                 break;
5919         case 0x8000000A:
5920                 entry->eax = 1; /* SVM revision 1 */
5921                 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5922                                    ASID emulation to nested SVM */
5923                 entry->ecx = 0; /* Reserved */
5924                 entry->edx = 0; /* Per default do not support any
5925                                    additional features */
5926
5927                 /* Support next_rip if host supports it */
5928                 if (boot_cpu_has(X86_FEATURE_NRIPS))
5929                         entry->edx |= F(NRIPS);
5930
5931                 /* Support NPT for the guest if enabled */
5932                 if (npt_enabled)
5933                         entry->edx |= F(NPT);
5934
5935                 break;
5936         case 0x8000001F:
5937                 /* Support memory encryption cpuid if host supports it */
5938                 if (boot_cpu_has(X86_FEATURE_SEV))
5939                         cpuid(0x8000001f, &entry->eax, &entry->ebx,
5940                                 &entry->ecx, &entry->edx);
5941
5942         }
5943 }
5944
5945 static int svm_get_lpage_level(void)
5946 {
5947         return PT_PDPE_LEVEL;
5948 }
5949
5950 static bool svm_rdtscp_supported(void)
5951 {
5952         return boot_cpu_has(X86_FEATURE_RDTSCP);
5953 }
5954
5955 static bool svm_invpcid_supported(void)
5956 {
5957         return false;
5958 }
5959
5960 static bool svm_mpx_supported(void)
5961 {
5962         return false;
5963 }
5964
5965 static bool svm_xsaves_supported(void)
5966 {
5967         return false;
5968 }
5969
5970 static bool svm_umip_emulated(void)
5971 {
5972         return false;
5973 }
5974
5975 static bool svm_pt_supported(void)
5976 {
5977         return false;
5978 }
5979
5980 static bool svm_has_wbinvd_exit(void)
5981 {
5982         return true;
5983 }
5984
5985 #define PRE_EX(exit)  { .exit_code = (exit), \
5986                         .stage = X86_ICPT_PRE_EXCEPT, }
5987 #define POST_EX(exit) { .exit_code = (exit), \
5988                         .stage = X86_ICPT_POST_EXCEPT, }
5989 #define POST_MEM(exit) { .exit_code = (exit), \
5990                         .stage = X86_ICPT_POST_MEMACCESS, }
5991
5992 static const struct __x86_intercept {
5993         u32 exit_code;
5994         enum x86_intercept_stage stage;
5995 } x86_intercept_map[] = {
5996         [x86_intercept_cr_read]         = POST_EX(SVM_EXIT_READ_CR0),
5997         [x86_intercept_cr_write]        = POST_EX(SVM_EXIT_WRITE_CR0),
5998         [x86_intercept_clts]            = POST_EX(SVM_EXIT_WRITE_CR0),
5999         [x86_intercept_lmsw]            = POST_EX(SVM_EXIT_WRITE_CR0),
6000         [x86_intercept_smsw]            = POST_EX(SVM_EXIT_READ_CR0),
6001         [x86_intercept_dr_read]         = POST_EX(SVM_EXIT_READ_DR0),
6002         [x86_intercept_dr_write]        = POST_EX(SVM_EXIT_WRITE_DR0),
6003         [x86_intercept_sldt]            = POST_EX(SVM_EXIT_LDTR_READ),
6004         [x86_intercept_str]             = POST_EX(SVM_EXIT_TR_READ),
6005         [x86_intercept_lldt]            = POST_EX(SVM_EXIT_LDTR_WRITE),
6006         [x86_intercept_ltr]             = POST_EX(SVM_EXIT_TR_WRITE),
6007         [x86_intercept_sgdt]            = POST_EX(SVM_EXIT_GDTR_READ),
6008         [x86_intercept_sidt]            = POST_EX(SVM_EXIT_IDTR_READ),
6009         [x86_intercept_lgdt]            = POST_EX(SVM_EXIT_GDTR_WRITE),
6010         [x86_intercept_lidt]            = POST_EX(SVM_EXIT_IDTR_WRITE),
6011         [x86_intercept_vmrun]           = POST_EX(SVM_EXIT_VMRUN),
6012         [x86_intercept_vmmcall]         = POST_EX(SVM_EXIT_VMMCALL),
6013         [x86_intercept_vmload]          = POST_EX(SVM_EXIT_VMLOAD),
6014         [x86_intercept_vmsave]          = POST_EX(SVM_EXIT_VMSAVE),
6015         [x86_intercept_stgi]            = POST_EX(SVM_EXIT_STGI),
6016         [x86_intercept_clgi]            = POST_EX(SVM_EXIT_CLGI),
6017         [x86_intercept_skinit]          = POST_EX(SVM_EXIT_SKINIT),
6018         [x86_intercept_invlpga]         = POST_EX(SVM_EXIT_INVLPGA),
6019         [x86_intercept_rdtscp]          = POST_EX(SVM_EXIT_RDTSCP),
6020         [x86_intercept_monitor]         = POST_MEM(SVM_EXIT_MONITOR),
6021         [x86_intercept_mwait]           = POST_EX(SVM_EXIT_MWAIT),
6022         [x86_intercept_invlpg]          = POST_EX(SVM_EXIT_INVLPG),
6023         [x86_intercept_invd]            = POST_EX(SVM_EXIT_INVD),
6024         [x86_intercept_wbinvd]          = POST_EX(SVM_EXIT_WBINVD),
6025         [x86_intercept_wrmsr]           = POST_EX(SVM_EXIT_MSR),
6026         [x86_intercept_rdtsc]           = POST_EX(SVM_EXIT_RDTSC),
6027         [x86_intercept_rdmsr]           = POST_EX(SVM_EXIT_MSR),
6028         [x86_intercept_rdpmc]           = POST_EX(SVM_EXIT_RDPMC),
6029         [x86_intercept_cpuid]           = PRE_EX(SVM_EXIT_CPUID),
6030         [x86_intercept_rsm]             = PRE_EX(SVM_EXIT_RSM),
6031         [x86_intercept_pause]           = PRE_EX(SVM_EXIT_PAUSE),
6032         [x86_intercept_pushf]           = PRE_EX(SVM_EXIT_PUSHF),
6033         [x86_intercept_popf]            = PRE_EX(SVM_EXIT_POPF),
6034         [x86_intercept_intn]            = PRE_EX(SVM_EXIT_SWINT),
6035         [x86_intercept_iret]            = PRE_EX(SVM_EXIT_IRET),
6036         [x86_intercept_icebp]           = PRE_EX(SVM_EXIT_ICEBP),
6037         [x86_intercept_hlt]             = POST_EX(SVM_EXIT_HLT),
6038         [x86_intercept_in]              = POST_EX(SVM_EXIT_IOIO),
6039         [x86_intercept_ins]             = POST_EX(SVM_EXIT_IOIO),
6040         [x86_intercept_out]             = POST_EX(SVM_EXIT_IOIO),
6041         [x86_intercept_outs]            = POST_EX(SVM_EXIT_IOIO),
6042         [x86_intercept_xsetbv]          = PRE_EX(SVM_EXIT_XSETBV),
6043 };
6044
6045 #undef PRE_EX
6046 #undef POST_EX
6047 #undef POST_MEM
6048
6049 static int svm_check_intercept(struct kvm_vcpu *vcpu,
6050                                struct x86_instruction_info *info,
6051                                enum x86_intercept_stage stage)
6052 {
6053         struct vcpu_svm *svm = to_svm(vcpu);
6054         int vmexit, ret = X86EMUL_CONTINUE;
6055         struct __x86_intercept icpt_info;
6056         struct vmcb *vmcb = svm->vmcb;
6057
6058         if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
6059                 goto out;
6060
6061         icpt_info = x86_intercept_map[info->intercept];
6062
6063         if (stage != icpt_info.stage)
6064                 goto out;
6065
6066         switch (icpt_info.exit_code) {
6067         case SVM_EXIT_READ_CR0:
6068                 if (info->intercept == x86_intercept_cr_read)
6069                         icpt_info.exit_code += info->modrm_reg;
6070                 break;
6071         case SVM_EXIT_WRITE_CR0: {
6072                 unsigned long cr0, val;
6073                 u64 intercept;
6074
6075                 if (info->intercept == x86_intercept_cr_write)
6076                         icpt_info.exit_code += info->modrm_reg;
6077
6078                 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
6079                     info->intercept == x86_intercept_clts)
6080                         break;
6081
6082                 intercept = svm->nested.intercept;
6083
6084                 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
6085                         break;
6086
6087                 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
6088                 val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
6089
6090                 if (info->intercept == x86_intercept_lmsw) {
6091                         cr0 &= 0xfUL;
6092                         val &= 0xfUL;
6093                         /* lmsw can't clear PE - catch this here */
6094                         if (cr0 & X86_CR0_PE)
6095                                 val |= X86_CR0_PE;
6096                 }
6097
6098                 if (cr0 ^ val)
6099                         icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
6100
6101                 break;
6102         }
6103         case SVM_EXIT_READ_DR0:
6104         case SVM_EXIT_WRITE_DR0:
6105                 icpt_info.exit_code += info->modrm_reg;
6106                 break;
6107         case SVM_EXIT_MSR:
6108                 if (info->intercept == x86_intercept_wrmsr)
6109                         vmcb->control.exit_info_1 = 1;
6110                 else
6111                         vmcb->control.exit_info_1 = 0;
6112                 break;
6113         case SVM_EXIT_PAUSE:
6114                 /*
6115                  * We get this for NOP only, but pause
6116                  * is rep not, check this here
6117                  */
6118                 if (info->rep_prefix != REPE_PREFIX)
6119                         goto out;
6120                 break;
6121         case SVM_EXIT_IOIO: {
6122                 u64 exit_info;
6123                 u32 bytes;
6124
6125                 if (info->intercept == x86_intercept_in ||
6126                     info->intercept == x86_intercept_ins) {
6127                         exit_info = ((info->src_val & 0xffff) << 16) |
6128                                 SVM_IOIO_TYPE_MASK;
6129                         bytes = info->dst_bytes;
6130                 } else {
6131                         exit_info = (info->dst_val & 0xffff) << 16;
6132                         bytes = info->src_bytes;
6133                 }
6134
6135                 if (info->intercept == x86_intercept_outs ||
6136                     info->intercept == x86_intercept_ins)
6137                         exit_info |= SVM_IOIO_STR_MASK;
6138
6139                 if (info->rep_prefix)
6140                         exit_info |= SVM_IOIO_REP_MASK;
6141
6142                 bytes = min(bytes, 4u);
6143
6144                 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
6145
6146                 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
6147
6148                 vmcb->control.exit_info_1 = exit_info;
6149                 vmcb->control.exit_info_2 = info->next_rip;
6150
6151                 break;
6152         }
6153         default:
6154                 break;
6155         }
6156
6157         /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
6158         if (static_cpu_has(X86_FEATURE_NRIPS))
6159                 vmcb->control.next_rip  = info->next_rip;
6160         vmcb->control.exit_code = icpt_info.exit_code;
6161         vmexit = nested_svm_exit_handled(svm);
6162
6163         ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
6164                                            : X86EMUL_CONTINUE;
6165
6166 out:
6167         return ret;
6168 }
6169
6170 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
6171 {
6172
6173 }
6174
6175 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
6176 {
6177         if (pause_filter_thresh)
6178                 shrink_ple_window(vcpu);
6179 }
6180
6181 static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
6182 {
6183         if (avic_handle_apic_id_update(vcpu) != 0)
6184                 return;
6185         avic_handle_dfr_update(vcpu);
6186         avic_handle_ldr_update(vcpu);
6187 }
6188
6189 static void svm_setup_mce(struct kvm_vcpu *vcpu)
6190 {
6191         /* [63:9] are reserved. */
6192         vcpu->arch.mcg_cap &= 0x1ff;
6193 }
6194
6195 static int svm_smi_allowed(struct kvm_vcpu *vcpu)
6196 {
6197         struct vcpu_svm *svm = to_svm(vcpu);
6198
6199         /* Per APM Vol.2 15.22.2 "Response to SMI" */
6200         if (!gif_set(svm))
6201                 return 0;
6202
6203         if (is_guest_mode(&svm->vcpu) &&
6204             svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
6205                 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
6206                 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
6207                 svm->nested.exit_required = true;
6208                 return 0;
6209         }
6210
6211         return 1;
6212 }
6213
6214 static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
6215 {
6216         struct vcpu_svm *svm = to_svm(vcpu);
6217         int ret;
6218
6219         if (is_guest_mode(vcpu)) {
6220                 /* FED8h - SVM Guest */
6221                 put_smstate(u64, smstate, 0x7ed8, 1);
6222                 /* FEE0h - SVM Guest VMCB Physical Address */
6223                 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
6224
6225                 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
6226                 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
6227                 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
6228
6229                 ret = nested_svm_vmexit(svm);
6230                 if (ret)
6231                         return ret;
6232         }
6233         return 0;
6234 }
6235
6236 static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
6237 {
6238         struct vcpu_svm *svm = to_svm(vcpu);
6239         struct vmcb *nested_vmcb;
6240         struct kvm_host_map map;
6241         u64 guest;
6242         u64 vmcb;
6243
6244         guest = GET_SMSTATE(u64, smstate, 0x7ed8);
6245         vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
6246
6247         if (guest) {
6248                 if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
6249                         return 1;
6250                 nested_vmcb = map.hva;
6251                 enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
6252         }
6253         return 0;
6254 }
6255
6256 static int enable_smi_window(struct kvm_vcpu *vcpu)
6257 {
6258         struct vcpu_svm *svm = to_svm(vcpu);
6259
6260         if (!gif_set(svm)) {
6261                 if (vgif_enabled(svm))
6262                         set_intercept(svm, INTERCEPT_STGI);
6263                 /* STGI will cause a vm exit */
6264                 return 1;
6265         }
6266         return 0;
6267 }
6268
6269 static int sev_asid_new(void)
6270 {
6271         int pos;
6272
6273         /*
6274          * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
6275          */
6276         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
6277         if (pos >= max_sev_asid)
6278                 return -EBUSY;
6279
6280         set_bit(pos, sev_asid_bitmap);
6281         return pos + 1;
6282 }
6283
6284 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
6285 {
6286         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6287         int asid, ret;
6288
6289         ret = -EBUSY;
6290         if (unlikely(sev->active))
6291                 return ret;
6292
6293         asid = sev_asid_new();
6294         if (asid < 0)
6295                 return ret;
6296
6297         ret = sev_platform_init(&argp->error);
6298         if (ret)
6299                 goto e_free;
6300
6301         sev->active = true;
6302         sev->asid = asid;
6303         INIT_LIST_HEAD(&sev->regions_list);
6304
6305         return 0;
6306
6307 e_free:
6308         __sev_asid_free(asid);
6309         return ret;
6310 }
6311
6312 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
6313 {
6314         struct sev_data_activate *data;
6315         int asid = sev_get_asid(kvm);
6316         int ret;
6317
6318         wbinvd_on_all_cpus();
6319
6320         ret = sev_guest_df_flush(error);
6321         if (ret)
6322                 return ret;
6323
6324         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6325         if (!data)
6326                 return -ENOMEM;
6327
6328         /* activate ASID on the given handle */
6329         data->handle = handle;
6330         data->asid   = asid;
6331         ret = sev_guest_activate(data, error);
6332         kfree(data);
6333
6334         return ret;
6335 }
6336
6337 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
6338 {
6339         struct fd f;
6340         int ret;
6341
6342         f = fdget(fd);
6343         if (!f.file)
6344                 return -EBADF;
6345
6346         ret = sev_issue_cmd_external_user(f.file, id, data, error);
6347
6348         fdput(f);
6349         return ret;
6350 }
6351
6352 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
6353 {
6354         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6355
6356         return __sev_issue_cmd(sev->fd, id, data, error);
6357 }
6358
6359 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
6360 {
6361         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6362         struct sev_data_launch_start *start;
6363         struct kvm_sev_launch_start params;
6364         void *dh_blob, *session_blob;
6365         int *error = &argp->error;
6366         int ret;
6367
6368         if (!sev_guest(kvm))
6369                 return -ENOTTY;
6370
6371         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6372                 return -EFAULT;
6373
6374         start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
6375         if (!start)
6376                 return -ENOMEM;
6377
6378         dh_blob = NULL;
6379         if (params.dh_uaddr) {
6380                 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
6381                 if (IS_ERR(dh_blob)) {
6382                         ret = PTR_ERR(dh_blob);
6383                         goto e_free;
6384                 }
6385
6386                 start->dh_cert_address = __sme_set(__pa(dh_blob));
6387                 start->dh_cert_len = params.dh_len;
6388         }
6389
6390         session_blob = NULL;
6391         if (params.session_uaddr) {
6392                 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
6393                 if (IS_ERR(session_blob)) {
6394                         ret = PTR_ERR(session_blob);
6395                         goto e_free_dh;
6396                 }
6397
6398                 start->session_address = __sme_set(__pa(session_blob));
6399                 start->session_len = params.session_len;
6400         }
6401
6402         start->handle = params.handle;
6403         start->policy = params.policy;
6404
6405         /* create memory encryption context */
6406         ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
6407         if (ret)
6408                 goto e_free_session;
6409
6410         /* Bind ASID to this guest */
6411         ret = sev_bind_asid(kvm, start->handle, error);
6412         if (ret)
6413                 goto e_free_session;
6414
6415         /* return handle to userspace */
6416         params.handle = start->handle;
6417         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
6418                 sev_unbind_asid(kvm, start->handle);
6419                 ret = -EFAULT;
6420                 goto e_free_session;
6421         }
6422
6423         sev->handle = start->handle;
6424         sev->fd = argp->sev_fd;
6425
6426 e_free_session:
6427         kfree(session_blob);
6428 e_free_dh:
6429         kfree(dh_blob);
6430 e_free:
6431         kfree(start);
6432         return ret;
6433 }
6434
6435 static unsigned long get_num_contig_pages(unsigned long idx,
6436                                 struct page **inpages, unsigned long npages)
6437 {
6438         unsigned long paddr, next_paddr;
6439         unsigned long i = idx + 1, pages = 1;
6440
6441         /* find the number of contiguous pages starting from idx */
6442         paddr = __sme_page_pa(inpages[idx]);
6443         while (i < npages) {
6444                 next_paddr = __sme_page_pa(inpages[i++]);
6445                 if ((paddr + PAGE_SIZE) == next_paddr) {
6446                         pages++;
6447                         paddr = next_paddr;
6448                         continue;
6449                 }
6450                 break;
6451         }
6452
6453         return pages;
6454 }
6455
6456 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
6457 {
6458         unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
6459         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6460         struct kvm_sev_launch_update_data params;
6461         struct sev_data_launch_update_data *data;
6462         struct page **inpages;
6463         int ret;
6464
6465         if (!sev_guest(kvm))
6466                 return -ENOTTY;
6467
6468         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6469                 return -EFAULT;
6470
6471         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6472         if (!data)
6473                 return -ENOMEM;
6474
6475         vaddr = params.uaddr;
6476         size = params.len;
6477         vaddr_end = vaddr + size;
6478
6479         /* Lock the user memory. */
6480         inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
6481         if (!inpages) {
6482                 ret = -ENOMEM;
6483                 goto e_free;
6484         }
6485
6486         /*
6487          * The LAUNCH_UPDATE command will perform in-place encryption of the
6488          * memory content (i.e it will write the same memory region with C=1).
6489          * It's possible that the cache may contain the data with C=0, i.e.,
6490          * unencrypted so invalidate it first.
6491          */
6492         sev_clflush_pages(inpages, npages);
6493
6494         for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
6495                 int offset, len;
6496
6497                 /*
6498                  * If the user buffer is not page-aligned, calculate the offset
6499                  * within the page.
6500                  */
6501                 offset = vaddr & (PAGE_SIZE - 1);
6502
6503                 /* Calculate the number of pages that can be encrypted in one go. */
6504                 pages = get_num_contig_pages(i, inpages, npages);
6505
6506                 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
6507
6508                 data->handle = sev->handle;
6509                 data->len = len;
6510                 data->address = __sme_page_pa(inpages[i]) + offset;
6511                 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
6512                 if (ret)
6513                         goto e_unpin;
6514
6515                 size -= len;
6516                 next_vaddr = vaddr + len;
6517         }
6518
6519 e_unpin:
6520         /* content of memory is updated, mark pages dirty */
6521         for (i = 0; i < npages; i++) {
6522                 set_page_dirty_lock(inpages[i]);
6523                 mark_page_accessed(inpages[i]);
6524         }
6525         /* unlock the user pages */
6526         sev_unpin_memory(kvm, inpages, npages);
6527 e_free:
6528         kfree(data);
6529         return ret;
6530 }
6531
6532 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6533 {
6534         void __user *measure = (void __user *)(uintptr_t)argp->data;
6535         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6536         struct sev_data_launch_measure *data;
6537         struct kvm_sev_launch_measure params;
6538         void __user *p = NULL;
6539         void *blob = NULL;
6540         int ret;
6541
6542         if (!sev_guest(kvm))
6543                 return -ENOTTY;
6544
6545         if (copy_from_user(&params, measure, sizeof(params)))
6546                 return -EFAULT;
6547
6548         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6549         if (!data)
6550                 return -ENOMEM;
6551
6552         /* User wants to query the blob length */
6553         if (!params.len)
6554                 goto cmd;
6555
6556         p = (void __user *)(uintptr_t)params.uaddr;
6557         if (p) {
6558                 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
6559                         ret = -EINVAL;
6560                         goto e_free;
6561                 }
6562
6563                 ret = -ENOMEM;
6564                 blob = kmalloc(params.len, GFP_KERNEL);
6565                 if (!blob)
6566                         goto e_free;
6567
6568                 data->address = __psp_pa(blob);
6569                 data->len = params.len;
6570         }
6571
6572 cmd:
6573         data->handle = sev->handle;
6574         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
6575
6576         /*
6577          * If we query the session length, FW responded with expected data.
6578          */
6579         if (!params.len)
6580                 goto done;
6581
6582         if (ret)
6583                 goto e_free_blob;
6584
6585         if (blob) {
6586                 if (copy_to_user(p, blob, params.len))
6587                         ret = -EFAULT;
6588         }
6589
6590 done:
6591         params.len = data->len;
6592         if (copy_to_user(measure, &params, sizeof(params)))
6593                 ret = -EFAULT;
6594 e_free_blob:
6595         kfree(blob);
6596 e_free:
6597         kfree(data);
6598         return ret;
6599 }
6600
6601 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
6602 {
6603         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6604         struct sev_data_launch_finish *data;
6605         int ret;
6606
6607         if (!sev_guest(kvm))
6608                 return -ENOTTY;
6609
6610         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6611         if (!data)
6612                 return -ENOMEM;
6613
6614         data->handle = sev->handle;
6615         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
6616
6617         kfree(data);
6618         return ret;
6619 }
6620
6621 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
6622 {
6623         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6624         struct kvm_sev_guest_status params;
6625         struct sev_data_guest_status *data;
6626         int ret;
6627
6628         if (!sev_guest(kvm))
6629                 return -ENOTTY;
6630
6631         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6632         if (!data)
6633                 return -ENOMEM;
6634
6635         data->handle = sev->handle;
6636         ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
6637         if (ret)
6638                 goto e_free;
6639
6640         params.policy = data->policy;
6641         params.state = data->state;
6642         params.handle = data->handle;
6643
6644         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6645                 ret = -EFAULT;
6646 e_free:
6647         kfree(data);
6648         return ret;
6649 }
6650
6651 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
6652                                unsigned long dst, int size,
6653                                int *error, bool enc)
6654 {
6655         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6656         struct sev_data_dbg *data;
6657         int ret;
6658
6659         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6660         if (!data)
6661                 return -ENOMEM;
6662
6663         data->handle = sev->handle;
6664         data->dst_addr = dst;
6665         data->src_addr = src;
6666         data->len = size;
6667
6668         ret = sev_issue_cmd(kvm,
6669                             enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
6670                             data, error);
6671         kfree(data);
6672         return ret;
6673 }
6674
6675 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
6676                              unsigned long dst_paddr, int sz, int *err)
6677 {
6678         int offset;
6679
6680         /*
6681          * Its safe to read more than we are asked, caller should ensure that
6682          * destination has enough space.
6683          */
6684         src_paddr = round_down(src_paddr, 16);
6685         offset = src_paddr & 15;
6686         sz = round_up(sz + offset, 16);
6687
6688         return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
6689 }
6690
6691 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
6692                                   unsigned long __user dst_uaddr,
6693                                   unsigned long dst_paddr,
6694                                   int size, int *err)
6695 {
6696         struct page *tpage = NULL;
6697         int ret, offset;
6698
6699         /* if inputs are not 16-byte then use intermediate buffer */
6700         if (!IS_ALIGNED(dst_paddr, 16) ||
6701             !IS_ALIGNED(paddr,     16) ||
6702             !IS_ALIGNED(size,      16)) {
6703                 tpage = (void *)alloc_page(GFP_KERNEL);
6704                 if (!tpage)
6705                         return -ENOMEM;
6706
6707                 dst_paddr = __sme_page_pa(tpage);
6708         }
6709
6710         ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
6711         if (ret)
6712                 goto e_free;
6713
6714         if (tpage) {
6715                 offset = paddr & 15;
6716                 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
6717                                  page_address(tpage) + offset, size))
6718                         ret = -EFAULT;
6719         }
6720
6721 e_free:
6722         if (tpage)
6723                 __free_page(tpage);
6724
6725         return ret;
6726 }
6727
6728 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
6729                                   unsigned long __user vaddr,
6730                                   unsigned long dst_paddr,
6731                                   unsigned long __user dst_vaddr,
6732                                   int size, int *error)
6733 {
6734         struct page *src_tpage = NULL;
6735         struct page *dst_tpage = NULL;
6736         int ret, len = size;
6737
6738         /* If source buffer is not aligned then use an intermediate buffer */
6739         if (!IS_ALIGNED(vaddr, 16)) {
6740                 src_tpage = alloc_page(GFP_KERNEL);
6741                 if (!src_tpage)
6742                         return -ENOMEM;
6743
6744                 if (copy_from_user(page_address(src_tpage),
6745                                 (void __user *)(uintptr_t)vaddr, size)) {
6746                         __free_page(src_tpage);
6747                         return -EFAULT;
6748                 }
6749
6750                 paddr = __sme_page_pa(src_tpage);
6751         }
6752
6753         /*
6754          *  If destination buffer or length is not aligned then do read-modify-write:
6755          *   - decrypt destination in an intermediate buffer
6756          *   - copy the source buffer in an intermediate buffer
6757          *   - use the intermediate buffer as source buffer
6758          */
6759         if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
6760                 int dst_offset;
6761
6762                 dst_tpage = alloc_page(GFP_KERNEL);
6763                 if (!dst_tpage) {
6764                         ret = -ENOMEM;
6765                         goto e_free;
6766                 }
6767
6768                 ret = __sev_dbg_decrypt(kvm, dst_paddr,
6769                                         __sme_page_pa(dst_tpage), size, error);
6770                 if (ret)
6771                         goto e_free;
6772
6773                 /*
6774                  *  If source is kernel buffer then use memcpy() otherwise
6775                  *  copy_from_user().
6776                  */
6777                 dst_offset = dst_paddr & 15;
6778
6779                 if (src_tpage)
6780                         memcpy(page_address(dst_tpage) + dst_offset,
6781                                page_address(src_tpage), size);
6782                 else {
6783                         if (copy_from_user(page_address(dst_tpage) + dst_offset,
6784                                            (void __user *)(uintptr_t)vaddr, size)) {
6785                                 ret = -EFAULT;
6786                                 goto e_free;
6787                         }
6788                 }
6789
6790                 paddr = __sme_page_pa(dst_tpage);
6791                 dst_paddr = round_down(dst_paddr, 16);
6792                 len = round_up(size, 16);
6793         }
6794
6795         ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
6796
6797 e_free:
6798         if (src_tpage)
6799                 __free_page(src_tpage);
6800         if (dst_tpage)
6801                 __free_page(dst_tpage);
6802         return ret;
6803 }
6804
6805 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6806 {
6807         unsigned long vaddr, vaddr_end, next_vaddr;
6808         unsigned long dst_vaddr;
6809         struct page **src_p, **dst_p;
6810         struct kvm_sev_dbg debug;
6811         unsigned long n;
6812         unsigned int size;
6813         int ret;
6814
6815         if (!sev_guest(kvm))
6816                 return -ENOTTY;
6817
6818         if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
6819                 return -EFAULT;
6820
6821         if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
6822                 return -EINVAL;
6823         if (!debug.dst_uaddr)
6824                 return -EINVAL;
6825
6826         vaddr = debug.src_uaddr;
6827         size = debug.len;
6828         vaddr_end = vaddr + size;
6829         dst_vaddr = debug.dst_uaddr;
6830
6831         for (; vaddr < vaddr_end; vaddr = next_vaddr) {
6832                 int len, s_off, d_off;
6833
6834                 /* lock userspace source and destination page */
6835                 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
6836                 if (!src_p)
6837                         return -EFAULT;
6838
6839                 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
6840                 if (!dst_p) {
6841                         sev_unpin_memory(kvm, src_p, n);
6842                         return -EFAULT;
6843                 }
6844
6845                 /*
6846                  * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
6847                  * memory content (i.e it will write the same memory region with C=1).
6848                  * It's possible that the cache may contain the data with C=0, i.e.,
6849                  * unencrypted so invalidate it first.
6850                  */
6851                 sev_clflush_pages(src_p, 1);
6852                 sev_clflush_pages(dst_p, 1);
6853
6854                 /*
6855                  * Since user buffer may not be page aligned, calculate the
6856                  * offset within the page.
6857                  */
6858                 s_off = vaddr & ~PAGE_MASK;
6859                 d_off = dst_vaddr & ~PAGE_MASK;
6860                 len = min_t(size_t, (PAGE_SIZE - s_off), size);
6861
6862                 if (dec)
6863                         ret = __sev_dbg_decrypt_user(kvm,
6864                                                      __sme_page_pa(src_p[0]) + s_off,
6865                                                      dst_vaddr,
6866                                                      __sme_page_pa(dst_p[0]) + d_off,
6867                                                      len, &argp->error);
6868                 else
6869                         ret = __sev_dbg_encrypt_user(kvm,
6870                                                      __sme_page_pa(src_p[0]) + s_off,
6871                                                      vaddr,
6872                                                      __sme_page_pa(dst_p[0]) + d_off,
6873                                                      dst_vaddr,
6874                                                      len, &argp->error);
6875
6876                 sev_unpin_memory(kvm, src_p, n);
6877                 sev_unpin_memory(kvm, dst_p, n);
6878
6879                 if (ret)
6880                         goto err;
6881
6882                 next_vaddr = vaddr + len;
6883                 dst_vaddr = dst_vaddr + len;
6884                 size -= len;
6885         }
6886 err:
6887         return ret;
6888 }
6889
6890 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6891 {
6892         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6893         struct sev_data_launch_secret *data;
6894         struct kvm_sev_launch_secret params;
6895         struct page **pages;
6896         void *blob, *hdr;
6897         unsigned long n;
6898         int ret, offset;
6899
6900         if (!sev_guest(kvm))
6901                 return -ENOTTY;
6902
6903         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6904                 return -EFAULT;
6905
6906         pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
6907         if (!pages)
6908                 return -ENOMEM;
6909
6910         /*
6911          * The secret must be copied into contiguous memory region, lets verify
6912          * that userspace memory pages are contiguous before we issue command.
6913          */
6914         if (get_num_contig_pages(0, pages, n) != n) {
6915                 ret = -EINVAL;
6916                 goto e_unpin_memory;
6917         }
6918
6919         ret = -ENOMEM;
6920         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6921         if (!data)
6922                 goto e_unpin_memory;
6923
6924         offset = params.guest_uaddr & (PAGE_SIZE - 1);
6925         data->guest_address = __sme_page_pa(pages[0]) + offset;
6926         data->guest_len = params.guest_len;
6927
6928         blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
6929         if (IS_ERR(blob)) {
6930                 ret = PTR_ERR(blob);
6931                 goto e_free;
6932         }
6933
6934         data->trans_address = __psp_pa(blob);
6935         data->trans_len = params.trans_len;
6936
6937         hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
6938         if (IS_ERR(hdr)) {
6939                 ret = PTR_ERR(hdr);
6940                 goto e_free_blob;
6941         }
6942         data->hdr_address = __psp_pa(hdr);
6943         data->hdr_len = params.hdr_len;
6944
6945         data->handle = sev->handle;
6946         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
6947
6948         kfree(hdr);
6949
6950 e_free_blob:
6951         kfree(blob);
6952 e_free:
6953         kfree(data);
6954 e_unpin_memory:
6955         sev_unpin_memory(kvm, pages, n);
6956         return ret;
6957 }
6958
6959 static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
6960 {
6961         struct kvm_sev_cmd sev_cmd;
6962         int r;
6963
6964         if (!svm_sev_enabled())
6965                 return -ENOTTY;
6966
6967         if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
6968                 return -EFAULT;
6969
6970         mutex_lock(&kvm->lock);
6971
6972         switch (sev_cmd.id) {
6973         case KVM_SEV_INIT:
6974                 r = sev_guest_init(kvm, &sev_cmd);
6975                 break;
6976         case KVM_SEV_LAUNCH_START:
6977                 r = sev_launch_start(kvm, &sev_cmd);
6978                 break;
6979         case KVM_SEV_LAUNCH_UPDATE_DATA:
6980                 r = sev_launch_update_data(kvm, &sev_cmd);
6981                 break;
6982         case KVM_SEV_LAUNCH_MEASURE:
6983                 r = sev_launch_measure(kvm, &sev_cmd);
6984                 break;
6985         case KVM_SEV_LAUNCH_FINISH:
6986                 r = sev_launch_finish(kvm, &sev_cmd);
6987                 break;
6988         case KVM_SEV_GUEST_STATUS:
6989                 r = sev_guest_status(kvm, &sev_cmd);
6990                 break;
6991         case KVM_SEV_DBG_DECRYPT:
6992                 r = sev_dbg_crypt(kvm, &sev_cmd, true);
6993                 break;
6994         case KVM_SEV_DBG_ENCRYPT:
6995                 r = sev_dbg_crypt(kvm, &sev_cmd, false);
6996                 break;
6997         case KVM_SEV_LAUNCH_SECRET:
6998                 r = sev_launch_secret(kvm, &sev_cmd);
6999                 break;
7000         default:
7001                 r = -EINVAL;
7002                 goto out;
7003         }
7004
7005         if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
7006                 r = -EFAULT;
7007
7008 out:
7009         mutex_unlock(&kvm->lock);
7010         return r;
7011 }
7012
7013 static int svm_register_enc_region(struct kvm *kvm,
7014                                    struct kvm_enc_region *range)
7015 {
7016         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
7017         struct enc_region *region;
7018         int ret = 0;
7019
7020         if (!sev_guest(kvm))
7021                 return -ENOTTY;
7022
7023         if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
7024                 return -EINVAL;
7025
7026         region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
7027         if (!region)
7028                 return -ENOMEM;
7029
7030         region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
7031         if (!region->pages) {
7032                 ret = -ENOMEM;
7033                 goto e_free;
7034         }
7035
7036         /*
7037          * The guest may change the memory encryption attribute from C=0 -> C=1
7038          * or vice versa for this memory range. Lets make sure caches are
7039          * flushed to ensure that guest data gets written into memory with
7040          * correct C-bit.
7041          */
7042         sev_clflush_pages(region->pages, region->npages);
7043
7044         region->uaddr = range->addr;
7045         region->size = range->size;
7046
7047         mutex_lock(&kvm->lock);
7048         list_add_tail(&region->list, &sev->regions_list);
7049         mutex_unlock(&kvm->lock);
7050
7051         return ret;
7052
7053 e_free:
7054         kfree(region);
7055         return ret;
7056 }
7057
7058 static struct enc_region *
7059 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
7060 {
7061         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
7062         struct list_head *head = &sev->regions_list;
7063         struct enc_region *i;
7064
7065         list_for_each_entry(i, head, list) {
7066                 if (i->uaddr == range->addr &&
7067                     i->size == range->size)
7068                         return i;
7069         }
7070
7071         return NULL;
7072 }
7073
7074
7075 static int svm_unregister_enc_region(struct kvm *kvm,
7076                                      struct kvm_enc_region *range)
7077 {
7078         struct enc_region *region;
7079         int ret;
7080
7081         mutex_lock(&kvm->lock);
7082
7083         if (!sev_guest(kvm)) {
7084                 ret = -ENOTTY;
7085                 goto failed;
7086         }
7087
7088         region = find_enc_region(kvm, range);
7089         if (!region) {
7090                 ret = -EINVAL;
7091                 goto failed;
7092         }
7093
7094         __unregister_enc_region_locked(kvm, region);
7095
7096         mutex_unlock(&kvm->lock);
7097         return 0;
7098
7099 failed:
7100         mutex_unlock(&kvm->lock);
7101         return ret;
7102 }
7103
7104 static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
7105 {
7106         unsigned long cr4 = kvm_read_cr4(vcpu);
7107         bool smep = cr4 & X86_CR4_SMEP;
7108         bool smap = cr4 & X86_CR4_SMAP;
7109         bool is_user = svm_get_cpl(vcpu) == 3;
7110
7111         /*
7112          * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
7113          *
7114          * Errata:
7115          * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
7116          * possible that CPU microcode implementing DecodeAssist will fail
7117          * to read bytes of instruction which caused #NPF. In this case,
7118          * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
7119          * return 0 instead of the correct guest instruction bytes.
7120          *
7121          * This happens because CPU microcode reading instruction bytes
7122          * uses a special opcode which attempts to read data using CPL=0
7123          * priviledges. The microcode reads CS:RIP and if it hits a SMAP
7124          * fault, it gives up and returns no instruction bytes.
7125          *
7126          * Detection:
7127          * We reach here in case CPU supports DecodeAssist, raised #NPF and
7128          * returned 0 in GuestIntrBytes field of the VMCB.
7129          * First, errata can only be triggered in case vCPU CR4.SMAP=1.
7130          * Second, if vCPU CR4.SMEP=1, errata could only be triggered
7131          * in case vCPU CPL==3 (Because otherwise guest would have triggered
7132          * a SMEP fault instead of #NPF).
7133          * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
7134          * As most guests enable SMAP if they have also enabled SMEP, use above
7135          * logic in order to attempt minimize false-positive of detecting errata
7136          * while still preserving all cases semantic correctness.
7137          *
7138          * Workaround:
7139          * To determine what instruction the guest was executing, the hypervisor
7140          * will have to decode the instruction at the instruction pointer.
7141          *
7142          * In non SEV guest, hypervisor will be able to read the guest
7143          * memory to decode the instruction pointer when insn_len is zero
7144          * so we return true to indicate that decoding is possible.
7145          *
7146          * But in the SEV guest, the guest memory is encrypted with the
7147          * guest specific key and hypervisor will not be able to decode the
7148          * instruction pointer so we will not able to workaround it. Lets
7149          * print the error and request to kill the guest.
7150          */
7151         if (smap && (!smep || is_user)) {
7152                 if (!sev_guest(vcpu->kvm))
7153                         return true;
7154
7155                 pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
7156                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
7157         }
7158
7159         return false;
7160 }
7161
7162 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
7163 {
7164         struct vcpu_svm *svm = to_svm(vcpu);
7165
7166         /*
7167          * TODO: Last condition latch INIT signals on vCPU when
7168          * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
7169          * To properly emulate the INIT intercept, SVM should implement
7170          * kvm_x86_ops->check_nested_events() and call nested_svm_vmexit()
7171          * there if an INIT signal is pending.
7172          */
7173         return !gif_set(svm) ||
7174                    (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
7175 }
7176
7177 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7178         .cpu_has_kvm_support = has_svm,
7179         .disabled_by_bios = is_disabled,
7180         .hardware_setup = svm_hardware_setup,
7181         .hardware_unsetup = svm_hardware_unsetup,
7182         .check_processor_compatibility = svm_check_processor_compat,
7183         .hardware_enable = svm_hardware_enable,
7184         .hardware_disable = svm_hardware_disable,
7185         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
7186         .has_emulated_msr = svm_has_emulated_msr,
7187
7188         .vcpu_create = svm_create_vcpu,
7189         .vcpu_free = svm_free_vcpu,
7190         .vcpu_reset = svm_vcpu_reset,
7191
7192         .vm_alloc = svm_vm_alloc,
7193         .vm_free = svm_vm_free,
7194         .vm_init = avic_vm_init,
7195         .vm_destroy = svm_vm_destroy,
7196
7197         .prepare_guest_switch = svm_prepare_guest_switch,
7198         .vcpu_load = svm_vcpu_load,
7199         .vcpu_put = svm_vcpu_put,
7200         .vcpu_blocking = svm_vcpu_blocking,
7201         .vcpu_unblocking = svm_vcpu_unblocking,
7202
7203         .update_bp_intercept = update_bp_intercept,
7204         .get_msr_feature = svm_get_msr_feature,
7205         .get_msr = svm_get_msr,
7206         .set_msr = svm_set_msr,
7207         .get_segment_base = svm_get_segment_base,
7208         .get_segment = svm_get_segment,
7209         .set_segment = svm_set_segment,
7210         .get_cpl = svm_get_cpl,
7211         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
7212         .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
7213         .decache_cr3 = svm_decache_cr3,
7214         .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
7215         .set_cr0 = svm_set_cr0,
7216         .set_cr3 = svm_set_cr3,
7217         .set_cr4 = svm_set_cr4,
7218         .set_efer = svm_set_efer,
7219         .get_idt = svm_get_idt,
7220         .set_idt = svm_set_idt,
7221         .get_gdt = svm_get_gdt,
7222         .set_gdt = svm_set_gdt,
7223         .get_dr6 = svm_get_dr6,
7224         .set_dr6 = svm_set_dr6,
7225         .set_dr7 = svm_set_dr7,
7226         .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
7227         .cache_reg = svm_cache_reg,
7228         .get_rflags = svm_get_rflags,
7229         .set_rflags = svm_set_rflags,
7230
7231         .tlb_flush = svm_flush_tlb,
7232         .tlb_flush_gva = svm_flush_tlb_gva,
7233
7234         .run = svm_vcpu_run,
7235         .handle_exit = handle_exit,
7236         .skip_emulated_instruction = skip_emulated_instruction,
7237         .set_interrupt_shadow = svm_set_interrupt_shadow,
7238         .get_interrupt_shadow = svm_get_interrupt_shadow,
7239         .patch_hypercall = svm_patch_hypercall,
7240         .set_irq = svm_set_irq,
7241         .set_nmi = svm_inject_nmi,
7242         .queue_exception = svm_queue_exception,
7243         .cancel_injection = svm_cancel_injection,
7244         .interrupt_allowed = svm_interrupt_allowed,
7245         .nmi_allowed = svm_nmi_allowed,
7246         .get_nmi_mask = svm_get_nmi_mask,
7247         .set_nmi_mask = svm_set_nmi_mask,
7248         .enable_nmi_window = enable_nmi_window,
7249         .enable_irq_window = enable_irq_window,
7250         .update_cr8_intercept = update_cr8_intercept,
7251         .set_virtual_apic_mode = svm_set_virtual_apic_mode,
7252         .get_enable_apicv = svm_get_enable_apicv,
7253         .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
7254         .load_eoi_exitmap = svm_load_eoi_exitmap,
7255         .hwapic_irr_update = svm_hwapic_irr_update,
7256         .hwapic_isr_update = svm_hwapic_isr_update,
7257         .sync_pir_to_irr = kvm_lapic_find_highest_irr,
7258         .apicv_post_state_restore = avic_post_state_restore,
7259
7260         .set_tss_addr = svm_set_tss_addr,
7261         .set_identity_map_addr = svm_set_identity_map_addr,
7262         .get_tdp_level = get_npt_level,
7263         .get_mt_mask = svm_get_mt_mask,
7264
7265         .get_exit_info = svm_get_exit_info,
7266
7267         .get_lpage_level = svm_get_lpage_level,
7268
7269         .cpuid_update = svm_cpuid_update,
7270
7271         .rdtscp_supported = svm_rdtscp_supported,
7272         .invpcid_supported = svm_invpcid_supported,
7273         .mpx_supported = svm_mpx_supported,
7274         .xsaves_supported = svm_xsaves_supported,
7275         .umip_emulated = svm_umip_emulated,
7276         .pt_supported = svm_pt_supported,
7277
7278         .set_supported_cpuid = svm_set_supported_cpuid,
7279
7280         .has_wbinvd_exit = svm_has_wbinvd_exit,
7281
7282         .read_l1_tsc_offset = svm_read_l1_tsc_offset,
7283         .write_l1_tsc_offset = svm_write_l1_tsc_offset,
7284
7285         .set_tdp_cr3 = set_tdp_cr3,
7286
7287         .check_intercept = svm_check_intercept,
7288         .handle_exit_irqoff = svm_handle_exit_irqoff,
7289
7290         .request_immediate_exit = __kvm_request_immediate_exit,
7291
7292         .sched_in = svm_sched_in,
7293
7294         .pmu_ops = &amd_pmu_ops,
7295         .deliver_posted_interrupt = svm_deliver_avic_intr,
7296         .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
7297         .update_pi_irte = svm_update_pi_irte,
7298         .setup_mce = svm_setup_mce,
7299
7300         .smi_allowed = svm_smi_allowed,
7301         .pre_enter_smm = svm_pre_enter_smm,
7302         .pre_leave_smm = svm_pre_leave_smm,
7303         .enable_smi_window = enable_smi_window,
7304
7305         .mem_enc_op = svm_mem_enc_op,
7306         .mem_enc_reg_region = svm_register_enc_region,
7307         .mem_enc_unreg_region = svm_unregister_enc_region,
7308
7309         .nested_enable_evmcs = NULL,
7310         .nested_get_evmcs_version = NULL,
7311
7312         .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
7313
7314         .apic_init_signal_blocked = svm_apic_init_signal_blocked,
7315 };
7316
7317 static int __init svm_init(void)
7318 {
7319         return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
7320                         __alignof__(struct vcpu_svm), THIS_MODULE);
7321 }
7322
7323 static void __exit svm_exit(void)
7324 {
7325         kvm_exit();
7326 }
7327
7328 module_init(svm_init)
7329 module_exit(svm_exit)