]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/kvm/svm.c
1a64844dcdfb38781d92fd2b3097c20c7c7224a4
[linux.git] / arch / x86 / kvm / svm.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * AMD SVM support
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8  *
9  * Authors:
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  *   Avi Kivity   <avi@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #define pr_fmt(fmt) "SVM: " fmt
19
20 #include <linux/kvm_host.h>
21
22 #include "irq.h"
23 #include "mmu.h"
24 #include "kvm_cache_regs.h"
25 #include "x86.h"
26 #include "cpuid.h"
27 #include "pmu.h"
28
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/kernel.h>
32 #include <linux/vmalloc.h>
33 #include <linux/highmem.h>
34 #include <linux/sched.h>
35 #include <linux/trace_events.h>
36 #include <linux/slab.h>
37 #include <linux/amd-iommu.h>
38 #include <linux/hashtable.h>
39 #include <linux/frame.h>
40 #include <linux/psp-sev.h>
41 #include <linux/file.h>
42 #include <linux/pagemap.h>
43 #include <linux/swap.h>
44
45 #include <asm/apic.h>
46 #include <asm/perf_event.h>
47 #include <asm/tlbflush.h>
48 #include <asm/desc.h>
49 #include <asm/debugreg.h>
50 #include <asm/kvm_para.h>
51 #include <asm/irq_remapping.h>
52 #include <asm/spec-ctrl.h>
53
54 #include <asm/virtext.h>
55 #include "trace.h"
56
57 #define __ex(x) __kvm_handle_fault_on_reboot(x)
58
59 MODULE_AUTHOR("Qumranet");
60 MODULE_LICENSE("GPL");
61
62 static const struct x86_cpu_id svm_cpu_id[] = {
63         X86_FEATURE_MATCH(X86_FEATURE_SVM),
64         {}
65 };
66 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
67
68 #define IOPM_ALLOC_ORDER 2
69 #define MSRPM_ALLOC_ORDER 1
70
71 #define SEG_TYPE_LDT 2
72 #define SEG_TYPE_BUSY_TSS16 3
73
74 #define SVM_FEATURE_NPT            (1 <<  0)
75 #define SVM_FEATURE_LBRV           (1 <<  1)
76 #define SVM_FEATURE_SVML           (1 <<  2)
77 #define SVM_FEATURE_NRIP           (1 <<  3)
78 #define SVM_FEATURE_TSC_RATE       (1 <<  4)
79 #define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
80 #define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
81 #define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
82 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
83
84 #define SVM_AVIC_DOORBELL       0xc001011b
85
86 #define NESTED_EXIT_HOST        0       /* Exit handled on host level */
87 #define NESTED_EXIT_DONE        1       /* Exit caused nested vmexit  */
88 #define NESTED_EXIT_CONTINUE    2       /* Further checks needed      */
89
90 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
91
92 #define TSC_RATIO_RSVD          0xffffff0000000000ULL
93 #define TSC_RATIO_MIN           0x0000000000000001ULL
94 #define TSC_RATIO_MAX           0x000000ffffffffffULL
95
96 #define AVIC_HPA_MASK   ~((0xFFFULL << 52) | 0xFFF)
97
98 /*
99  * 0xff is broadcast, so the max index allowed for physical APIC ID
100  * table is 0xfe.  APIC IDs above 0xff are reserved.
101  */
102 #define AVIC_MAX_PHYSICAL_ID_COUNT      255
103
104 #define AVIC_UNACCEL_ACCESS_WRITE_MASK          1
105 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK         0xFF0
106 #define AVIC_UNACCEL_ACCESS_VECTOR_MASK         0xFFFFFFFF
107
108 /* AVIC GATAG is encoded using VM and VCPU IDs */
109 #define AVIC_VCPU_ID_BITS               8
110 #define AVIC_VCPU_ID_MASK               ((1 << AVIC_VCPU_ID_BITS) - 1)
111
112 #define AVIC_VM_ID_BITS                 24
113 #define AVIC_VM_ID_NR                   (1 << AVIC_VM_ID_BITS)
114 #define AVIC_VM_ID_MASK                 ((1 << AVIC_VM_ID_BITS) - 1)
115
116 #define AVIC_GATAG(x, y)                (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
117                                                 (y & AVIC_VCPU_ID_MASK))
118 #define AVIC_GATAG_TO_VMID(x)           ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
119 #define AVIC_GATAG_TO_VCPUID(x)         (x & AVIC_VCPU_ID_MASK)
120
121 static bool erratum_383_found __read_mostly;
122
123 static const u32 host_save_user_msrs[] = {
124 #ifdef CONFIG_X86_64
125         MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
126         MSR_FS_BASE,
127 #endif
128         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
129         MSR_TSC_AUX,
130 };
131
132 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
133
134 struct kvm_sev_info {
135         bool active;            /* SEV enabled guest */
136         unsigned int asid;      /* ASID used for this guest */
137         unsigned int handle;    /* SEV firmware handle */
138         int fd;                 /* SEV device fd */
139         unsigned long pages_locked; /* Number of pages locked */
140         struct list_head regions_list;  /* List of registered regions */
141 };
142
143 struct kvm_svm {
144         struct kvm kvm;
145
146         /* Struct members for AVIC */
147         u32 avic_vm_id;
148         u32 ldr_mode;
149         struct page *avic_logical_id_table_page;
150         struct page *avic_physical_id_table_page;
151         struct hlist_node hnode;
152
153         struct kvm_sev_info sev_info;
154 };
155
156 struct kvm_vcpu;
157
158 struct nested_state {
159         struct vmcb *hsave;
160         u64 hsave_msr;
161         u64 vm_cr_msr;
162         u64 vmcb;
163
164         /* These are the merged vectors */
165         u32 *msrpm;
166
167         /* gpa pointers to the real vectors */
168         u64 vmcb_msrpm;
169         u64 vmcb_iopm;
170
171         /* A VMEXIT is required but not yet emulated */
172         bool exit_required;
173
174         /* cache for intercepts of the guest */
175         u32 intercept_cr;
176         u32 intercept_dr;
177         u32 intercept_exceptions;
178         u64 intercept;
179
180         /* Nested Paging related state */
181         u64 nested_cr3;
182 };
183
184 #define MSRPM_OFFSETS   16
185 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
186
187 /*
188  * Set osvw_len to higher value when updated Revision Guides
189  * are published and we know what the new status bits are
190  */
191 static uint64_t osvw_len = 4, osvw_status;
192
193 struct vcpu_svm {
194         struct kvm_vcpu vcpu;
195         struct vmcb *vmcb;
196         unsigned long vmcb_pa;
197         struct svm_cpu_data *svm_data;
198         uint64_t asid_generation;
199         uint64_t sysenter_esp;
200         uint64_t sysenter_eip;
201         uint64_t tsc_aux;
202
203         u64 msr_decfg;
204
205         u64 next_rip;
206
207         u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
208         struct {
209                 u16 fs;
210                 u16 gs;
211                 u16 ldt;
212                 u64 gs_base;
213         } host;
214
215         u64 spec_ctrl;
216         /*
217          * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
218          * translated into the appropriate L2_CFG bits on the host to
219          * perform speculative control.
220          */
221         u64 virt_spec_ctrl;
222
223         u32 *msrpm;
224
225         ulong nmi_iret_rip;
226
227         struct nested_state nested;
228
229         bool nmi_singlestep;
230         u64 nmi_singlestep_guest_rflags;
231
232         unsigned int3_injected;
233         unsigned long int3_rip;
234
235         /* cached guest cpuid flags for faster access */
236         bool nrips_enabled      : 1;
237
238         u32 ldr_reg;
239         struct page *avic_backing_page;
240         u64 *avic_physical_id_cache;
241         bool avic_is_running;
242
243         /*
244          * Per-vcpu list of struct amd_svm_iommu_ir:
245          * This is used mainly to store interrupt remapping information used
246          * when update the vcpu affinity. This avoids the need to scan for
247          * IRTE and try to match ga_tag in the IOMMU driver.
248          */
249         struct list_head ir_list;
250         spinlock_t ir_list_lock;
251
252         /* which host CPU was used for running this vcpu */
253         unsigned int last_cpu;
254 };
255
256 /*
257  * This is a wrapper of struct amd_iommu_ir_data.
258  */
259 struct amd_svm_iommu_ir {
260         struct list_head node;  /* Used by SVM for per-vcpu ir_list */
261         void *data;             /* Storing pointer to struct amd_ir_data */
262 };
263
264 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK    (0xFF)
265 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK                (1 << 31)
266
267 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK    (0xFFULL)
268 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK        (0xFFFFFFFFFFULL << 12)
269 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK          (1ULL << 62)
270 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK               (1ULL << 63)
271
272 static DEFINE_PER_CPU(u64, current_tsc_ratio);
273 #define TSC_RATIO_DEFAULT       0x0100000000ULL
274
275 #define MSR_INVALID                     0xffffffffU
276
277 static const struct svm_direct_access_msrs {
278         u32 index;   /* Index of the MSR */
279         bool always; /* True if intercept is always on */
280 } direct_access_msrs[] = {
281         { .index = MSR_STAR,                            .always = true  },
282         { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
283 #ifdef CONFIG_X86_64
284         { .index = MSR_GS_BASE,                         .always = true  },
285         { .index = MSR_FS_BASE,                         .always = true  },
286         { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
287         { .index = MSR_LSTAR,                           .always = true  },
288         { .index = MSR_CSTAR,                           .always = true  },
289         { .index = MSR_SYSCALL_MASK,                    .always = true  },
290 #endif
291         { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
292         { .index = MSR_IA32_PRED_CMD,                   .always = false },
293         { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
294         { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
295         { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
296         { .index = MSR_IA32_LASTINTTOIP,                .always = false },
297         { .index = MSR_INVALID,                         .always = false },
298 };
299
300 /* enable NPT for AMD64 and X86 with PAE */
301 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
302 static bool npt_enabled = true;
303 #else
304 static bool npt_enabled;
305 #endif
306
307 /*
308  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
309  * pause_filter_count: On processors that support Pause filtering(indicated
310  *      by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
311  *      count value. On VMRUN this value is loaded into an internal counter.
312  *      Each time a pause instruction is executed, this counter is decremented
313  *      until it reaches zero at which time a #VMEXIT is generated if pause
314  *      intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
315  *      Intercept Filtering for more details.
316  *      This also indicate if ple logic enabled.
317  *
318  * pause_filter_thresh: In addition, some processor families support advanced
319  *      pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
320  *      the amount of time a guest is allowed to execute in a pause loop.
321  *      In this mode, a 16-bit pause filter threshold field is added in the
322  *      VMCB. The threshold value is a cycle count that is used to reset the
323  *      pause counter. As with simple pause filtering, VMRUN loads the pause
324  *      count value from VMCB into an internal counter. Then, on each pause
325  *      instruction the hardware checks the elapsed number of cycles since
326  *      the most recent pause instruction against the pause filter threshold.
327  *      If the elapsed cycle count is greater than the pause filter threshold,
328  *      then the internal pause count is reloaded from the VMCB and execution
329  *      continues. If the elapsed cycle count is less than the pause filter
330  *      threshold, then the internal pause count is decremented. If the count
331  *      value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
332  *      triggered. If advanced pause filtering is supported and pause filter
333  *      threshold field is set to zero, the filter will operate in the simpler,
334  *      count only mode.
335  */
336
337 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
338 module_param(pause_filter_thresh, ushort, 0444);
339
340 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
341 module_param(pause_filter_count, ushort, 0444);
342
343 /* Default doubles per-vcpu window every exit. */
344 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
345 module_param(pause_filter_count_grow, ushort, 0444);
346
347 /* Default resets per-vcpu window every exit to pause_filter_count. */
348 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
349 module_param(pause_filter_count_shrink, ushort, 0444);
350
351 /* Default is to compute the maximum so we can never overflow. */
352 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
353 module_param(pause_filter_count_max, ushort, 0444);
354
355 /* allow nested paging (virtualized MMU) for all guests */
356 static int npt = true;
357 module_param(npt, int, S_IRUGO);
358
359 /* allow nested virtualization in KVM/SVM */
360 static int nested = true;
361 module_param(nested, int, S_IRUGO);
362
363 /* enable / disable AVIC */
364 static int avic;
365 #ifdef CONFIG_X86_LOCAL_APIC
366 module_param(avic, int, S_IRUGO);
367 #endif
368
369 /* enable/disable Virtual VMLOAD VMSAVE */
370 static int vls = true;
371 module_param(vls, int, 0444);
372
373 /* enable/disable Virtual GIF */
374 static int vgif = true;
375 module_param(vgif, int, 0444);
376
377 /* enable/disable SEV support */
378 static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
379 module_param(sev, int, 0444);
380
381 static u8 rsm_ins_bytes[] = "\x0f\xaa";
382
383 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
384 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
385 static void svm_complete_interrupts(struct vcpu_svm *svm);
386
387 static int nested_svm_exit_handled(struct vcpu_svm *svm);
388 static int nested_svm_intercept(struct vcpu_svm *svm);
389 static int nested_svm_vmexit(struct vcpu_svm *svm);
390 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
391                                       bool has_error_code, u32 error_code);
392
393 enum {
394         VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
395                             pause filter count */
396         VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
397         VMCB_ASID,       /* ASID */
398         VMCB_INTR,       /* int_ctl, int_vector */
399         VMCB_NPT,        /* npt_en, nCR3, gPAT */
400         VMCB_CR,         /* CR0, CR3, CR4, EFER */
401         VMCB_DR,         /* DR6, DR7 */
402         VMCB_DT,         /* GDT, IDT */
403         VMCB_SEG,        /* CS, DS, SS, ES, CPL */
404         VMCB_CR2,        /* CR2 only */
405         VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
406         VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
407                           * AVIC PHYSICAL_TABLE pointer,
408                           * AVIC LOGICAL_TABLE pointer
409                           */
410         VMCB_DIRTY_MAX,
411 };
412
413 /* TPR and CR2 are always written before VMRUN */
414 #define VMCB_ALWAYS_DIRTY_MASK  ((1U << VMCB_INTR) | (1U << VMCB_CR2))
415
416 #define VMCB_AVIC_APIC_BAR_MASK         0xFFFFFFFFFF000ULL
417
418 static unsigned int max_sev_asid;
419 static unsigned int min_sev_asid;
420 static unsigned long *sev_asid_bitmap;
421 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
422
423 struct enc_region {
424         struct list_head list;
425         unsigned long npages;
426         struct page **pages;
427         unsigned long uaddr;
428         unsigned long size;
429 };
430
431
432 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
433 {
434         return container_of(kvm, struct kvm_svm, kvm);
435 }
436
437 static inline bool svm_sev_enabled(void)
438 {
439         return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
440 }
441
442 static inline bool sev_guest(struct kvm *kvm)
443 {
444 #ifdef CONFIG_KVM_AMD_SEV
445         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
446
447         return sev->active;
448 #else
449         return false;
450 #endif
451 }
452
453 static inline int sev_get_asid(struct kvm *kvm)
454 {
455         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
456
457         return sev->asid;
458 }
459
460 static inline void mark_all_dirty(struct vmcb *vmcb)
461 {
462         vmcb->control.clean = 0;
463 }
464
465 static inline void mark_all_clean(struct vmcb *vmcb)
466 {
467         vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
468                                & ~VMCB_ALWAYS_DIRTY_MASK;
469 }
470
471 static inline void mark_dirty(struct vmcb *vmcb, int bit)
472 {
473         vmcb->control.clean &= ~(1 << bit);
474 }
475
476 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
477 {
478         return container_of(vcpu, struct vcpu_svm, vcpu);
479 }
480
481 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
482 {
483         svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
484         mark_dirty(svm->vmcb, VMCB_AVIC);
485 }
486
487 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
488 {
489         struct vcpu_svm *svm = to_svm(vcpu);
490         u64 *entry = svm->avic_physical_id_cache;
491
492         if (!entry)
493                 return false;
494
495         return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
496 }
497
498 static void recalc_intercepts(struct vcpu_svm *svm)
499 {
500         struct vmcb_control_area *c, *h;
501         struct nested_state *g;
502
503         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
504
505         if (!is_guest_mode(&svm->vcpu))
506                 return;
507
508         c = &svm->vmcb->control;
509         h = &svm->nested.hsave->control;
510         g = &svm->nested;
511
512         c->intercept_cr = h->intercept_cr | g->intercept_cr;
513         c->intercept_dr = h->intercept_dr | g->intercept_dr;
514         c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
515         c->intercept = h->intercept | g->intercept;
516 }
517
518 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
519 {
520         if (is_guest_mode(&svm->vcpu))
521                 return svm->nested.hsave;
522         else
523                 return svm->vmcb;
524 }
525
526 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
527 {
528         struct vmcb *vmcb = get_host_vmcb(svm);
529
530         vmcb->control.intercept_cr |= (1U << bit);
531
532         recalc_intercepts(svm);
533 }
534
535 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
536 {
537         struct vmcb *vmcb = get_host_vmcb(svm);
538
539         vmcb->control.intercept_cr &= ~(1U << bit);
540
541         recalc_intercepts(svm);
542 }
543
544 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
545 {
546         struct vmcb *vmcb = get_host_vmcb(svm);
547
548         return vmcb->control.intercept_cr & (1U << bit);
549 }
550
551 static inline void set_dr_intercepts(struct vcpu_svm *svm)
552 {
553         struct vmcb *vmcb = get_host_vmcb(svm);
554
555         vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
556                 | (1 << INTERCEPT_DR1_READ)
557                 | (1 << INTERCEPT_DR2_READ)
558                 | (1 << INTERCEPT_DR3_READ)
559                 | (1 << INTERCEPT_DR4_READ)
560                 | (1 << INTERCEPT_DR5_READ)
561                 | (1 << INTERCEPT_DR6_READ)
562                 | (1 << INTERCEPT_DR7_READ)
563                 | (1 << INTERCEPT_DR0_WRITE)
564                 | (1 << INTERCEPT_DR1_WRITE)
565                 | (1 << INTERCEPT_DR2_WRITE)
566                 | (1 << INTERCEPT_DR3_WRITE)
567                 | (1 << INTERCEPT_DR4_WRITE)
568                 | (1 << INTERCEPT_DR5_WRITE)
569                 | (1 << INTERCEPT_DR6_WRITE)
570                 | (1 << INTERCEPT_DR7_WRITE);
571
572         recalc_intercepts(svm);
573 }
574
575 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
576 {
577         struct vmcb *vmcb = get_host_vmcb(svm);
578
579         vmcb->control.intercept_dr = 0;
580
581         recalc_intercepts(svm);
582 }
583
584 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
585 {
586         struct vmcb *vmcb = get_host_vmcb(svm);
587
588         vmcb->control.intercept_exceptions |= (1U << bit);
589
590         recalc_intercepts(svm);
591 }
592
593 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
594 {
595         struct vmcb *vmcb = get_host_vmcb(svm);
596
597         vmcb->control.intercept_exceptions &= ~(1U << bit);
598
599         recalc_intercepts(svm);
600 }
601
602 static inline void set_intercept(struct vcpu_svm *svm, int bit)
603 {
604         struct vmcb *vmcb = get_host_vmcb(svm);
605
606         vmcb->control.intercept |= (1ULL << bit);
607
608         recalc_intercepts(svm);
609 }
610
611 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
612 {
613         struct vmcb *vmcb = get_host_vmcb(svm);
614
615         vmcb->control.intercept &= ~(1ULL << bit);
616
617         recalc_intercepts(svm);
618 }
619
620 static inline bool vgif_enabled(struct vcpu_svm *svm)
621 {
622         return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
623 }
624
625 static inline void enable_gif(struct vcpu_svm *svm)
626 {
627         if (vgif_enabled(svm))
628                 svm->vmcb->control.int_ctl |= V_GIF_MASK;
629         else
630                 svm->vcpu.arch.hflags |= HF_GIF_MASK;
631 }
632
633 static inline void disable_gif(struct vcpu_svm *svm)
634 {
635         if (vgif_enabled(svm))
636                 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
637         else
638                 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
639 }
640
641 static inline bool gif_set(struct vcpu_svm *svm)
642 {
643         if (vgif_enabled(svm))
644                 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
645         else
646                 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
647 }
648
649 static unsigned long iopm_base;
650
651 struct kvm_ldttss_desc {
652         u16 limit0;
653         u16 base0;
654         unsigned base1:8, type:5, dpl:2, p:1;
655         unsigned limit1:4, zero0:3, g:1, base2:8;
656         u32 base3;
657         u32 zero1;
658 } __attribute__((packed));
659
660 struct svm_cpu_data {
661         int cpu;
662
663         u64 asid_generation;
664         u32 max_asid;
665         u32 next_asid;
666         u32 min_asid;
667         struct kvm_ldttss_desc *tss_desc;
668
669         struct page *save_area;
670         struct vmcb *current_vmcb;
671
672         /* index = sev_asid, value = vmcb pointer */
673         struct vmcb **sev_vmcbs;
674 };
675
676 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
677
678 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
679
680 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
681 #define MSRS_RANGE_SIZE 2048
682 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
683
684 static u32 svm_msrpm_offset(u32 msr)
685 {
686         u32 offset;
687         int i;
688
689         for (i = 0; i < NUM_MSR_MAPS; i++) {
690                 if (msr < msrpm_ranges[i] ||
691                     msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
692                         continue;
693
694                 offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
695                 offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
696
697                 /* Now we have the u8 offset - but need the u32 offset */
698                 return offset / 4;
699         }
700
701         /* MSR not in any range */
702         return MSR_INVALID;
703 }
704
705 #define MAX_INST_SIZE 15
706
707 static inline void clgi(void)
708 {
709         asm volatile (__ex(SVM_CLGI));
710 }
711
712 static inline void stgi(void)
713 {
714         asm volatile (__ex(SVM_STGI));
715 }
716
717 static inline void invlpga(unsigned long addr, u32 asid)
718 {
719         asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
720 }
721
722 static int get_npt_level(struct kvm_vcpu *vcpu)
723 {
724 #ifdef CONFIG_X86_64
725         return PT64_ROOT_4LEVEL;
726 #else
727         return PT32E_ROOT_LEVEL;
728 #endif
729 }
730
731 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
732 {
733         vcpu->arch.efer = efer;
734         if (!npt_enabled && !(efer & EFER_LMA))
735                 efer &= ~EFER_LME;
736
737         to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
738         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
739 }
740
741 static int is_external_interrupt(u32 info)
742 {
743         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
744         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
745 }
746
747 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
748 {
749         struct vcpu_svm *svm = to_svm(vcpu);
750         u32 ret = 0;
751
752         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
753                 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
754         return ret;
755 }
756
757 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
758 {
759         struct vcpu_svm *svm = to_svm(vcpu);
760
761         if (mask == 0)
762                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
763         else
764                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
765
766 }
767
768 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
769 {
770         struct vcpu_svm *svm = to_svm(vcpu);
771
772         if (svm->vmcb->control.next_rip != 0) {
773                 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
774                 svm->next_rip = svm->vmcb->control.next_rip;
775         }
776
777         if (!svm->next_rip) {
778                 if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) !=
779                                 EMULATE_DONE)
780                         printk(KERN_DEBUG "%s: NOP\n", __func__);
781                 return;
782         }
783         if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
784                 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
785                        __func__, kvm_rip_read(vcpu), svm->next_rip);
786
787         kvm_rip_write(vcpu, svm->next_rip);
788         svm_set_interrupt_shadow(vcpu, 0);
789 }
790
791 static void svm_queue_exception(struct kvm_vcpu *vcpu)
792 {
793         struct vcpu_svm *svm = to_svm(vcpu);
794         unsigned nr = vcpu->arch.exception.nr;
795         bool has_error_code = vcpu->arch.exception.has_error_code;
796         bool reinject = vcpu->arch.exception.injected;
797         u32 error_code = vcpu->arch.exception.error_code;
798
799         /*
800          * If we are within a nested VM we'd better #VMEXIT and let the guest
801          * handle the exception
802          */
803         if (!reinject &&
804             nested_svm_check_exception(svm, nr, has_error_code, error_code))
805                 return;
806
807         kvm_deliver_exception_payload(&svm->vcpu);
808
809         if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
810                 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
811
812                 /*
813                  * For guest debugging where we have to reinject #BP if some
814                  * INT3 is guest-owned:
815                  * Emulate nRIP by moving RIP forward. Will fail if injection
816                  * raises a fault that is not intercepted. Still better than
817                  * failing in all cases.
818                  */
819                 skip_emulated_instruction(&svm->vcpu);
820                 rip = kvm_rip_read(&svm->vcpu);
821                 svm->int3_rip = rip + svm->vmcb->save.cs.base;
822                 svm->int3_injected = rip - old_rip;
823         }
824
825         svm->vmcb->control.event_inj = nr
826                 | SVM_EVTINJ_VALID
827                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
828                 | SVM_EVTINJ_TYPE_EXEPT;
829         svm->vmcb->control.event_inj_err = error_code;
830 }
831
832 static void svm_init_erratum_383(void)
833 {
834         u32 low, high;
835         int err;
836         u64 val;
837
838         if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
839                 return;
840
841         /* Use _safe variants to not break nested virtualization */
842         val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
843         if (err)
844                 return;
845
846         val |= (1ULL << 47);
847
848         low  = lower_32_bits(val);
849         high = upper_32_bits(val);
850
851         native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
852
853         erratum_383_found = true;
854 }
855
856 static void svm_init_osvw(struct kvm_vcpu *vcpu)
857 {
858         /*
859          * Guests should see errata 400 and 415 as fixed (assuming that
860          * HLT and IO instructions are intercepted).
861          */
862         vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
863         vcpu->arch.osvw.status = osvw_status & ~(6ULL);
864
865         /*
866          * By increasing VCPU's osvw.length to 3 we are telling the guest that
867          * all osvw.status bits inside that length, including bit 0 (which is
868          * reserved for erratum 298), are valid. However, if host processor's
869          * osvw_len is 0 then osvw_status[0] carries no information. We need to
870          * be conservative here and therefore we tell the guest that erratum 298
871          * is present (because we really don't know).
872          */
873         if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
874                 vcpu->arch.osvw.status |= 1;
875 }
876
877 static int has_svm(void)
878 {
879         const char *msg;
880
881         if (!cpu_has_svm(&msg)) {
882                 printk(KERN_INFO "has_svm: %s\n", msg);
883                 return 0;
884         }
885
886         return 1;
887 }
888
889 static void svm_hardware_disable(void)
890 {
891         /* Make sure we clean up behind us */
892         if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
893                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
894
895         cpu_svm_disable();
896
897         amd_pmu_disable_virt();
898 }
899
900 static int svm_hardware_enable(void)
901 {
902
903         struct svm_cpu_data *sd;
904         uint64_t efer;
905         struct desc_struct *gdt;
906         int me = raw_smp_processor_id();
907
908         rdmsrl(MSR_EFER, efer);
909         if (efer & EFER_SVME)
910                 return -EBUSY;
911
912         if (!has_svm()) {
913                 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
914                 return -EINVAL;
915         }
916         sd = per_cpu(svm_data, me);
917         if (!sd) {
918                 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
919                 return -EINVAL;
920         }
921
922         sd->asid_generation = 1;
923         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
924         sd->next_asid = sd->max_asid + 1;
925         sd->min_asid = max_sev_asid + 1;
926
927         gdt = get_current_gdt_rw();
928         sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
929
930         wrmsrl(MSR_EFER, efer | EFER_SVME);
931
932         wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
933
934         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
935                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
936                 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
937         }
938
939
940         /*
941          * Get OSVW bits.
942          *
943          * Note that it is possible to have a system with mixed processor
944          * revisions and therefore different OSVW bits. If bits are not the same
945          * on different processors then choose the worst case (i.e. if erratum
946          * is present on one processor and not on another then assume that the
947          * erratum is present everywhere).
948          */
949         if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
950                 uint64_t len, status = 0;
951                 int err;
952
953                 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
954                 if (!err)
955                         status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
956                                                       &err);
957
958                 if (err)
959                         osvw_status = osvw_len = 0;
960                 else {
961                         if (len < osvw_len)
962                                 osvw_len = len;
963                         osvw_status |= status;
964                         osvw_status &= (1ULL << osvw_len) - 1;
965                 }
966         } else
967                 osvw_status = osvw_len = 0;
968
969         svm_init_erratum_383();
970
971         amd_pmu_enable_virt();
972
973         return 0;
974 }
975
976 static void svm_cpu_uninit(int cpu)
977 {
978         struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
979
980         if (!sd)
981                 return;
982
983         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
984         kfree(sd->sev_vmcbs);
985         __free_page(sd->save_area);
986         kfree(sd);
987 }
988
989 static int svm_cpu_init(int cpu)
990 {
991         struct svm_cpu_data *sd;
992         int r;
993
994         sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
995         if (!sd)
996                 return -ENOMEM;
997         sd->cpu = cpu;
998         r = -ENOMEM;
999         sd->save_area = alloc_page(GFP_KERNEL);
1000         if (!sd->save_area)
1001                 goto err_1;
1002
1003         if (svm_sev_enabled()) {
1004                 r = -ENOMEM;
1005                 sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
1006                                               sizeof(void *),
1007                                               GFP_KERNEL);
1008                 if (!sd->sev_vmcbs)
1009                         goto err_1;
1010         }
1011
1012         per_cpu(svm_data, cpu) = sd;
1013
1014         return 0;
1015
1016 err_1:
1017         kfree(sd);
1018         return r;
1019
1020 }
1021
1022 static bool valid_msr_intercept(u32 index)
1023 {
1024         int i;
1025
1026         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
1027                 if (direct_access_msrs[i].index == index)
1028                         return true;
1029
1030         return false;
1031 }
1032
1033 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
1034 {
1035         u8 bit_write;
1036         unsigned long tmp;
1037         u32 offset;
1038         u32 *msrpm;
1039
1040         msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
1041                                       to_svm(vcpu)->msrpm;
1042
1043         offset    = svm_msrpm_offset(msr);
1044         bit_write = 2 * (msr & 0x0f) + 1;
1045         tmp       = msrpm[offset];
1046
1047         BUG_ON(offset == MSR_INVALID);
1048
1049         return !!test_bit(bit_write,  &tmp);
1050 }
1051
1052 static void set_msr_interception(u32 *msrpm, unsigned msr,
1053                                  int read, int write)
1054 {
1055         u8 bit_read, bit_write;
1056         unsigned long tmp;
1057         u32 offset;
1058
1059         /*
1060          * If this warning triggers extend the direct_access_msrs list at the
1061          * beginning of the file
1062          */
1063         WARN_ON(!valid_msr_intercept(msr));
1064
1065         offset    = svm_msrpm_offset(msr);
1066         bit_read  = 2 * (msr & 0x0f);
1067         bit_write = 2 * (msr & 0x0f) + 1;
1068         tmp       = msrpm[offset];
1069
1070         BUG_ON(offset == MSR_INVALID);
1071
1072         read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
1073         write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
1074
1075         msrpm[offset] = tmp;
1076 }
1077
1078 static void svm_vcpu_init_msrpm(u32 *msrpm)
1079 {
1080         int i;
1081
1082         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
1083
1084         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1085                 if (!direct_access_msrs[i].always)
1086                         continue;
1087
1088                 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
1089         }
1090 }
1091
1092 static void add_msr_offset(u32 offset)
1093 {
1094         int i;
1095
1096         for (i = 0; i < MSRPM_OFFSETS; ++i) {
1097
1098                 /* Offset already in list? */
1099                 if (msrpm_offsets[i] == offset)
1100                         return;
1101
1102                 /* Slot used by another offset? */
1103                 if (msrpm_offsets[i] != MSR_INVALID)
1104                         continue;
1105
1106                 /* Add offset to list */
1107                 msrpm_offsets[i] = offset;
1108
1109                 return;
1110         }
1111
1112         /*
1113          * If this BUG triggers the msrpm_offsets table has an overflow. Just
1114          * increase MSRPM_OFFSETS in this case.
1115          */
1116         BUG();
1117 }
1118
1119 static void init_msrpm_offsets(void)
1120 {
1121         int i;
1122
1123         memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
1124
1125         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1126                 u32 offset;
1127
1128                 offset = svm_msrpm_offset(direct_access_msrs[i].index);
1129                 BUG_ON(offset == MSR_INVALID);
1130
1131                 add_msr_offset(offset);
1132         }
1133 }
1134
1135 static void svm_enable_lbrv(struct vcpu_svm *svm)
1136 {
1137         u32 *msrpm = svm->msrpm;
1138
1139         svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
1140         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1141         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1142         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1143         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1144 }
1145
1146 static void svm_disable_lbrv(struct vcpu_svm *svm)
1147 {
1148         u32 *msrpm = svm->msrpm;
1149
1150         svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
1151         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
1152         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
1153         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
1154         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1155 }
1156
1157 static void disable_nmi_singlestep(struct vcpu_svm *svm)
1158 {
1159         svm->nmi_singlestep = false;
1160
1161         if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1162                 /* Clear our flags if they were not set by the guest */
1163                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1164                         svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1165                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1166                         svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1167         }
1168 }
1169
1170 /* Note:
1171  * This hash table is used to map VM_ID to a struct kvm_svm,
1172  * when handling AMD IOMMU GALOG notification to schedule in
1173  * a particular vCPU.
1174  */
1175 #define SVM_VM_DATA_HASH_BITS   8
1176 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
1177 static u32 next_vm_id = 0;
1178 static bool next_vm_id_wrapped = 0;
1179 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
1180
1181 /* Note:
1182  * This function is called from IOMMU driver to notify
1183  * SVM to schedule in a particular vCPU of a particular VM.
1184  */
1185 static int avic_ga_log_notifier(u32 ga_tag)
1186 {
1187         unsigned long flags;
1188         struct kvm_svm *kvm_svm;
1189         struct kvm_vcpu *vcpu = NULL;
1190         u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
1191         u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
1192
1193         pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
1194
1195         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1196         hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
1197                 if (kvm_svm->avic_vm_id != vm_id)
1198                         continue;
1199                 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
1200                 break;
1201         }
1202         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1203
1204         /* Note:
1205          * At this point, the IOMMU should have already set the pending
1206          * bit in the vAPIC backing page. So, we just need to schedule
1207          * in the vcpu.
1208          */
1209         if (vcpu)
1210                 kvm_vcpu_wake_up(vcpu);
1211
1212         return 0;
1213 }
1214
1215 static __init int sev_hardware_setup(void)
1216 {
1217         struct sev_user_data_status *status;
1218         int rc;
1219
1220         /* Maximum number of encrypted guests supported simultaneously */
1221         max_sev_asid = cpuid_ecx(0x8000001F);
1222
1223         if (!max_sev_asid)
1224                 return 1;
1225
1226         /* Minimum ASID value that should be used for SEV guest */
1227         min_sev_asid = cpuid_edx(0x8000001F);
1228
1229         /* Initialize SEV ASID bitmap */
1230         sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1231         if (!sev_asid_bitmap)
1232                 return 1;
1233
1234         status = kmalloc(sizeof(*status), GFP_KERNEL);
1235         if (!status)
1236                 return 1;
1237
1238         /*
1239          * Check SEV platform status.
1240          *
1241          * PLATFORM_STATUS can be called in any state, if we failed to query
1242          * the PLATFORM status then either PSP firmware does not support SEV
1243          * feature or SEV firmware is dead.
1244          */
1245         rc = sev_platform_status(status, NULL);
1246         if (rc)
1247                 goto err;
1248
1249         pr_info("SEV supported\n");
1250
1251 err:
1252         kfree(status);
1253         return rc;
1254 }
1255
1256 static void grow_ple_window(struct kvm_vcpu *vcpu)
1257 {
1258         struct vcpu_svm *svm = to_svm(vcpu);
1259         struct vmcb_control_area *control = &svm->vmcb->control;
1260         int old = control->pause_filter_count;
1261
1262         control->pause_filter_count = __grow_ple_window(old,
1263                                                         pause_filter_count,
1264                                                         pause_filter_count_grow,
1265                                                         pause_filter_count_max);
1266
1267         if (control->pause_filter_count != old)
1268                 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1269
1270         trace_kvm_ple_window_grow(vcpu->vcpu_id,
1271                                   control->pause_filter_count, old);
1272 }
1273
1274 static void shrink_ple_window(struct kvm_vcpu *vcpu)
1275 {
1276         struct vcpu_svm *svm = to_svm(vcpu);
1277         struct vmcb_control_area *control = &svm->vmcb->control;
1278         int old = control->pause_filter_count;
1279
1280         control->pause_filter_count =
1281                                 __shrink_ple_window(old,
1282                                                     pause_filter_count,
1283                                                     pause_filter_count_shrink,
1284                                                     pause_filter_count);
1285         if (control->pause_filter_count != old)
1286                 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1287
1288         trace_kvm_ple_window_shrink(vcpu->vcpu_id,
1289                                     control->pause_filter_count, old);
1290 }
1291
1292 static __init int svm_hardware_setup(void)
1293 {
1294         int cpu;
1295         struct page *iopm_pages;
1296         void *iopm_va;
1297         int r;
1298
1299         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1300
1301         if (!iopm_pages)
1302                 return -ENOMEM;
1303
1304         iopm_va = page_address(iopm_pages);
1305         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
1306         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1307
1308         init_msrpm_offsets();
1309
1310         if (boot_cpu_has(X86_FEATURE_NX))
1311                 kvm_enable_efer_bits(EFER_NX);
1312
1313         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1314                 kvm_enable_efer_bits(EFER_FFXSR);
1315
1316         if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1317                 kvm_has_tsc_control = true;
1318                 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1319                 kvm_tsc_scaling_ratio_frac_bits = 32;
1320         }
1321
1322         /* Check for pause filtering support */
1323         if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1324                 pause_filter_count = 0;
1325                 pause_filter_thresh = 0;
1326         } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
1327                 pause_filter_thresh = 0;
1328         }
1329
1330         if (nested) {
1331                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
1332                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
1333         }
1334
1335         if (sev) {
1336                 if (boot_cpu_has(X86_FEATURE_SEV) &&
1337                     IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
1338                         r = sev_hardware_setup();
1339                         if (r)
1340                                 sev = false;
1341                 } else {
1342                         sev = false;
1343                 }
1344         }
1345
1346         for_each_possible_cpu(cpu) {
1347                 r = svm_cpu_init(cpu);
1348                 if (r)
1349                         goto err;
1350         }
1351
1352         if (!boot_cpu_has(X86_FEATURE_NPT))
1353                 npt_enabled = false;
1354
1355         if (npt_enabled && !npt) {
1356                 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1357                 npt_enabled = false;
1358         }
1359
1360         if (npt_enabled) {
1361                 printk(KERN_INFO "kvm: Nested Paging enabled\n");
1362                 kvm_enable_tdp();
1363         } else
1364                 kvm_disable_tdp();
1365
1366         if (avic) {
1367                 if (!npt_enabled ||
1368                     !boot_cpu_has(X86_FEATURE_AVIC) ||
1369                     !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
1370                         avic = false;
1371                 } else {
1372                         pr_info("AVIC enabled\n");
1373
1374                         amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1375                 }
1376         }
1377
1378         if (vls) {
1379                 if (!npt_enabled ||
1380                     !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
1381                     !IS_ENABLED(CONFIG_X86_64)) {
1382                         vls = false;
1383                 } else {
1384                         pr_info("Virtual VMLOAD VMSAVE supported\n");
1385                 }
1386         }
1387
1388         if (vgif) {
1389                 if (!boot_cpu_has(X86_FEATURE_VGIF))
1390                         vgif = false;
1391                 else
1392                         pr_info("Virtual GIF supported\n");
1393         }
1394
1395         return 0;
1396
1397 err:
1398         __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1399         iopm_base = 0;
1400         return r;
1401 }
1402
1403 static __exit void svm_hardware_unsetup(void)
1404 {
1405         int cpu;
1406
1407         if (svm_sev_enabled())
1408                 bitmap_free(sev_asid_bitmap);
1409
1410         for_each_possible_cpu(cpu)
1411                 svm_cpu_uninit(cpu);
1412
1413         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
1414         iopm_base = 0;
1415 }
1416
1417 static void init_seg(struct vmcb_seg *seg)
1418 {
1419         seg->selector = 0;
1420         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1421                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
1422         seg->limit = 0xffff;
1423         seg->base = 0;
1424 }
1425
1426 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1427 {
1428         seg->selector = 0;
1429         seg->attrib = SVM_SELECTOR_P_MASK | type;
1430         seg->limit = 0xffff;
1431         seg->base = 0;
1432 }
1433
1434 static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
1435 {
1436         struct vcpu_svm *svm = to_svm(vcpu);
1437
1438         if (is_guest_mode(vcpu))
1439                 return svm->nested.hsave->control.tsc_offset;
1440
1441         return vcpu->arch.tsc_offset;
1442 }
1443
1444 static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1445 {
1446         struct vcpu_svm *svm = to_svm(vcpu);
1447         u64 g_tsc_offset = 0;
1448
1449         if (is_guest_mode(vcpu)) {
1450                 /* Write L1's TSC offset.  */
1451                 g_tsc_offset = svm->vmcb->control.tsc_offset -
1452                                svm->nested.hsave->control.tsc_offset;
1453                 svm->nested.hsave->control.tsc_offset = offset;
1454         }
1455
1456         trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1457                                    svm->vmcb->control.tsc_offset - g_tsc_offset,
1458                                    offset);
1459
1460         svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1461
1462         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1463         return svm->vmcb->control.tsc_offset;
1464 }
1465
1466 static void avic_init_vmcb(struct vcpu_svm *svm)
1467 {
1468         struct vmcb *vmcb = svm->vmcb;
1469         struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
1470         phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
1471         phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
1472         phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
1473
1474         vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1475         vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1476         vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1477         vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1478         vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
1479 }
1480
1481 static void init_vmcb(struct vcpu_svm *svm)
1482 {
1483         struct vmcb_control_area *control = &svm->vmcb->control;
1484         struct vmcb_save_area *save = &svm->vmcb->save;
1485
1486         svm->vcpu.arch.hflags = 0;
1487
1488         set_cr_intercept(svm, INTERCEPT_CR0_READ);
1489         set_cr_intercept(svm, INTERCEPT_CR3_READ);
1490         set_cr_intercept(svm, INTERCEPT_CR4_READ);
1491         set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1492         set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1493         set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1494         if (!kvm_vcpu_apicv_active(&svm->vcpu))
1495                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
1496
1497         set_dr_intercepts(svm);
1498
1499         set_exception_intercept(svm, PF_VECTOR);
1500         set_exception_intercept(svm, UD_VECTOR);
1501         set_exception_intercept(svm, MC_VECTOR);
1502         set_exception_intercept(svm, AC_VECTOR);
1503         set_exception_intercept(svm, DB_VECTOR);
1504         /*
1505          * Guest access to VMware backdoor ports could legitimately
1506          * trigger #GP because of TSS I/O permission bitmap.
1507          * We intercept those #GP and allow access to them anyway
1508          * as VMware does.
1509          */
1510         if (enable_vmware_backdoor)
1511                 set_exception_intercept(svm, GP_VECTOR);
1512
1513         set_intercept(svm, INTERCEPT_INTR);
1514         set_intercept(svm, INTERCEPT_NMI);
1515         set_intercept(svm, INTERCEPT_SMI);
1516         set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1517         set_intercept(svm, INTERCEPT_RDPMC);
1518         set_intercept(svm, INTERCEPT_CPUID);
1519         set_intercept(svm, INTERCEPT_INVD);
1520         set_intercept(svm, INTERCEPT_INVLPG);
1521         set_intercept(svm, INTERCEPT_INVLPGA);
1522         set_intercept(svm, INTERCEPT_IOIO_PROT);
1523         set_intercept(svm, INTERCEPT_MSR_PROT);
1524         set_intercept(svm, INTERCEPT_TASK_SWITCH);
1525         set_intercept(svm, INTERCEPT_SHUTDOWN);
1526         set_intercept(svm, INTERCEPT_VMRUN);
1527         set_intercept(svm, INTERCEPT_VMMCALL);
1528         set_intercept(svm, INTERCEPT_VMLOAD);
1529         set_intercept(svm, INTERCEPT_VMSAVE);
1530         set_intercept(svm, INTERCEPT_STGI);
1531         set_intercept(svm, INTERCEPT_CLGI);
1532         set_intercept(svm, INTERCEPT_SKINIT);
1533         set_intercept(svm, INTERCEPT_WBINVD);
1534         set_intercept(svm, INTERCEPT_XSETBV);
1535         set_intercept(svm, INTERCEPT_RSM);
1536
1537         if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
1538                 set_intercept(svm, INTERCEPT_MONITOR);
1539                 set_intercept(svm, INTERCEPT_MWAIT);
1540         }
1541
1542         if (!kvm_hlt_in_guest(svm->vcpu.kvm))
1543                 set_intercept(svm, INTERCEPT_HLT);
1544
1545         control->iopm_base_pa = __sme_set(iopm_base);
1546         control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1547         control->int_ctl = V_INTR_MASKING_MASK;
1548
1549         init_seg(&save->es);
1550         init_seg(&save->ss);
1551         init_seg(&save->ds);
1552         init_seg(&save->fs);
1553         init_seg(&save->gs);
1554
1555         save->cs.selector = 0xf000;
1556         save->cs.base = 0xffff0000;
1557         /* Executable/Readable Code Segment */
1558         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1559                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1560         save->cs.limit = 0xffff;
1561
1562         save->gdtr.limit = 0xffff;
1563         save->idtr.limit = 0xffff;
1564
1565         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1566         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1567
1568         svm_set_efer(&svm->vcpu, 0);
1569         save->dr6 = 0xffff0ff0;
1570         kvm_set_rflags(&svm->vcpu, 2);
1571         save->rip = 0x0000fff0;
1572         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1573
1574         /*
1575          * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1576          * It also updates the guest-visible cr0 value.
1577          */
1578         svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1579         kvm_mmu_reset_context(&svm->vcpu);
1580
1581         save->cr4 = X86_CR4_PAE;
1582         /* rdx = ?? */
1583
1584         if (npt_enabled) {
1585                 /* Setup VMCB for Nested Paging */
1586                 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1587                 clr_intercept(svm, INTERCEPT_INVLPG);
1588                 clr_exception_intercept(svm, PF_VECTOR);
1589                 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1590                 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1591                 save->g_pat = svm->vcpu.arch.pat;
1592                 save->cr3 = 0;
1593                 save->cr4 = 0;
1594         }
1595         svm->asid_generation = 0;
1596
1597         svm->nested.vmcb = 0;
1598         svm->vcpu.arch.hflags = 0;
1599
1600         if (pause_filter_count) {
1601                 control->pause_filter_count = pause_filter_count;
1602                 if (pause_filter_thresh)
1603                         control->pause_filter_thresh = pause_filter_thresh;
1604                 set_intercept(svm, INTERCEPT_PAUSE);
1605         } else {
1606                 clr_intercept(svm, INTERCEPT_PAUSE);
1607         }
1608
1609         if (kvm_vcpu_apicv_active(&svm->vcpu))
1610                 avic_init_vmcb(svm);
1611
1612         /*
1613          * If hardware supports Virtual VMLOAD VMSAVE then enable it
1614          * in VMCB and clear intercepts to avoid #VMEXIT.
1615          */
1616         if (vls) {
1617                 clr_intercept(svm, INTERCEPT_VMLOAD);
1618                 clr_intercept(svm, INTERCEPT_VMSAVE);
1619                 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1620         }
1621
1622         if (vgif) {
1623                 clr_intercept(svm, INTERCEPT_STGI);
1624                 clr_intercept(svm, INTERCEPT_CLGI);
1625                 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1626         }
1627
1628         if (sev_guest(svm->vcpu.kvm)) {
1629                 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1630                 clr_exception_intercept(svm, UD_VECTOR);
1631         }
1632
1633         mark_all_dirty(svm->vmcb);
1634
1635         enable_gif(svm);
1636
1637 }
1638
1639 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1640                                        unsigned int index)
1641 {
1642         u64 *avic_physical_id_table;
1643         struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
1644
1645         if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1646                 return NULL;
1647
1648         avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
1649
1650         return &avic_physical_id_table[index];
1651 }
1652
1653 /**
1654  * Note:
1655  * AVIC hardware walks the nested page table to check permissions,
1656  * but does not use the SPA address specified in the leaf page
1657  * table entry since it uses  address in the AVIC_BACKING_PAGE pointer
1658  * field of the VMCB. Therefore, we set up the
1659  * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1660  */
1661 static int avic_init_access_page(struct kvm_vcpu *vcpu)
1662 {
1663         struct kvm *kvm = vcpu->kvm;
1664         int ret = 0;
1665
1666         mutex_lock(&kvm->slots_lock);
1667         if (kvm->arch.apic_access_page_done)
1668                 goto out;
1669
1670         ret = __x86_set_memory_region(kvm,
1671                                       APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1672                                       APIC_DEFAULT_PHYS_BASE,
1673                                       PAGE_SIZE);
1674         if (ret)
1675                 goto out;
1676
1677         kvm->arch.apic_access_page_done = true;
1678 out:
1679         mutex_unlock(&kvm->slots_lock);
1680         return ret;
1681 }
1682
1683 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1684 {
1685         int ret;
1686         u64 *entry, new_entry;
1687         int id = vcpu->vcpu_id;
1688         struct vcpu_svm *svm = to_svm(vcpu);
1689
1690         ret = avic_init_access_page(vcpu);
1691         if (ret)
1692                 return ret;
1693
1694         if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1695                 return -EINVAL;
1696
1697         if (!svm->vcpu.arch.apic->regs)
1698                 return -EINVAL;
1699
1700         svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1701
1702         /* Setting AVIC backing page address in the phy APIC ID table */
1703         entry = avic_get_physical_id_entry(vcpu, id);
1704         if (!entry)
1705                 return -EINVAL;
1706
1707         new_entry = READ_ONCE(*entry);
1708         new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1709                               AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1710                               AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
1711         WRITE_ONCE(*entry, new_entry);
1712
1713         svm->avic_physical_id_cache = entry;
1714
1715         return 0;
1716 }
1717
1718 static void __sev_asid_free(int asid)
1719 {
1720         struct svm_cpu_data *sd;
1721         int cpu, pos;
1722
1723         pos = asid - 1;
1724         clear_bit(pos, sev_asid_bitmap);
1725
1726         for_each_possible_cpu(cpu) {
1727                 sd = per_cpu(svm_data, cpu);
1728                 sd->sev_vmcbs[pos] = NULL;
1729         }
1730 }
1731
1732 static void sev_asid_free(struct kvm *kvm)
1733 {
1734         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1735
1736         __sev_asid_free(sev->asid);
1737 }
1738
1739 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
1740 {
1741         struct sev_data_decommission *decommission;
1742         struct sev_data_deactivate *data;
1743
1744         if (!handle)
1745                 return;
1746
1747         data = kzalloc(sizeof(*data), GFP_KERNEL);
1748         if (!data)
1749                 return;
1750
1751         /* deactivate handle */
1752         data->handle = handle;
1753         sev_guest_deactivate(data, NULL);
1754
1755         wbinvd_on_all_cpus();
1756         sev_guest_df_flush(NULL);
1757         kfree(data);
1758
1759         decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
1760         if (!decommission)
1761                 return;
1762
1763         /* decommission handle */
1764         decommission->handle = handle;
1765         sev_guest_decommission(decommission, NULL);
1766
1767         kfree(decommission);
1768 }
1769
1770 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
1771                                     unsigned long ulen, unsigned long *n,
1772                                     int write)
1773 {
1774         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1775         unsigned long npages, npinned, size;
1776         unsigned long locked, lock_limit;
1777         struct page **pages;
1778         unsigned long first, last;
1779
1780         if (ulen == 0 || uaddr + ulen < uaddr)
1781                 return NULL;
1782
1783         /* Calculate number of pages. */
1784         first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
1785         last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
1786         npages = (last - first + 1);
1787
1788         locked = sev->pages_locked + npages;
1789         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1790         if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
1791                 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
1792                 return NULL;
1793         }
1794
1795         /* Avoid using vmalloc for smaller buffers. */
1796         size = npages * sizeof(struct page *);
1797         if (size > PAGE_SIZE)
1798                 pages = vmalloc(size);
1799         else
1800                 pages = kmalloc(size, GFP_KERNEL);
1801
1802         if (!pages)
1803                 return NULL;
1804
1805         /* Pin the user virtual address. */
1806         npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
1807         if (npinned != npages) {
1808                 pr_err("SEV: Failure locking %lu pages.\n", npages);
1809                 goto err;
1810         }
1811
1812         *n = npages;
1813         sev->pages_locked = locked;
1814
1815         return pages;
1816
1817 err:
1818         if (npinned > 0)
1819                 release_pages(pages, npinned);
1820
1821         kvfree(pages);
1822         return NULL;
1823 }
1824
1825 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
1826                              unsigned long npages)
1827 {
1828         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1829
1830         release_pages(pages, npages);
1831         kvfree(pages);
1832         sev->pages_locked -= npages;
1833 }
1834
1835 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
1836 {
1837         uint8_t *page_virtual;
1838         unsigned long i;
1839
1840         if (npages == 0 || pages == NULL)
1841                 return;
1842
1843         for (i = 0; i < npages; i++) {
1844                 page_virtual = kmap_atomic(pages[i]);
1845                 clflush_cache_range(page_virtual, PAGE_SIZE);
1846                 kunmap_atomic(page_virtual);
1847         }
1848 }
1849
1850 static void __unregister_enc_region_locked(struct kvm *kvm,
1851                                            struct enc_region *region)
1852 {
1853         /*
1854          * The guest may change the memory encryption attribute from C=0 -> C=1
1855          * or vice versa for this memory range. Lets make sure caches are
1856          * flushed to ensure that guest data gets written into memory with
1857          * correct C-bit.
1858          */
1859         sev_clflush_pages(region->pages, region->npages);
1860
1861         sev_unpin_memory(kvm, region->pages, region->npages);
1862         list_del(&region->list);
1863         kfree(region);
1864 }
1865
1866 static struct kvm *svm_vm_alloc(void)
1867 {
1868         struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm));
1869         return &kvm_svm->kvm;
1870 }
1871
1872 static void svm_vm_free(struct kvm *kvm)
1873 {
1874         vfree(to_kvm_svm(kvm));
1875 }
1876
1877 static void sev_vm_destroy(struct kvm *kvm)
1878 {
1879         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1880         struct list_head *head = &sev->regions_list;
1881         struct list_head *pos, *q;
1882
1883         if (!sev_guest(kvm))
1884                 return;
1885
1886         mutex_lock(&kvm->lock);
1887
1888         /*
1889          * if userspace was terminated before unregistering the memory regions
1890          * then lets unpin all the registered memory.
1891          */
1892         if (!list_empty(head)) {
1893                 list_for_each_safe(pos, q, head) {
1894                         __unregister_enc_region_locked(kvm,
1895                                 list_entry(pos, struct enc_region, list));
1896                 }
1897         }
1898
1899         mutex_unlock(&kvm->lock);
1900
1901         sev_unbind_asid(kvm, sev->handle);
1902         sev_asid_free(kvm);
1903 }
1904
1905 static void avic_vm_destroy(struct kvm *kvm)
1906 {
1907         unsigned long flags;
1908         struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
1909
1910         if (!avic)
1911                 return;
1912
1913         if (kvm_svm->avic_logical_id_table_page)
1914                 __free_page(kvm_svm->avic_logical_id_table_page);
1915         if (kvm_svm->avic_physical_id_table_page)
1916                 __free_page(kvm_svm->avic_physical_id_table_page);
1917
1918         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1919         hash_del(&kvm_svm->hnode);
1920         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1921 }
1922
1923 static void svm_vm_destroy(struct kvm *kvm)
1924 {
1925         avic_vm_destroy(kvm);
1926         sev_vm_destroy(kvm);
1927 }
1928
1929 static int avic_vm_init(struct kvm *kvm)
1930 {
1931         unsigned long flags;
1932         int err = -ENOMEM;
1933         struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
1934         struct kvm_svm *k2;
1935         struct page *p_page;
1936         struct page *l_page;
1937         u32 vm_id;
1938
1939         if (!avic)
1940                 return 0;
1941
1942         /* Allocating physical APIC ID table (4KB) */
1943         p_page = alloc_page(GFP_KERNEL);
1944         if (!p_page)
1945                 goto free_avic;
1946
1947         kvm_svm->avic_physical_id_table_page = p_page;
1948         clear_page(page_address(p_page));
1949
1950         /* Allocating logical APIC ID table (4KB) */
1951         l_page = alloc_page(GFP_KERNEL);
1952         if (!l_page)
1953                 goto free_avic;
1954
1955         kvm_svm->avic_logical_id_table_page = l_page;
1956         clear_page(page_address(l_page));
1957
1958         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1959  again:
1960         vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
1961         if (vm_id == 0) { /* id is 1-based, zero is not okay */
1962                 next_vm_id_wrapped = 1;
1963                 goto again;
1964         }
1965         /* Is it still in use? Only possible if wrapped at least once */
1966         if (next_vm_id_wrapped) {
1967                 hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
1968                         if (k2->avic_vm_id == vm_id)
1969                                 goto again;
1970                 }
1971         }
1972         kvm_svm->avic_vm_id = vm_id;
1973         hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
1974         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1975
1976         return 0;
1977
1978 free_avic:
1979         avic_vm_destroy(kvm);
1980         return err;
1981 }
1982
1983 static inline int
1984 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
1985 {
1986         int ret = 0;
1987         unsigned long flags;
1988         struct amd_svm_iommu_ir *ir;
1989         struct vcpu_svm *svm = to_svm(vcpu);
1990
1991         if (!kvm_arch_has_assigned_device(vcpu->kvm))
1992                 return 0;
1993
1994         /*
1995          * Here, we go through the per-vcpu ir_list to update all existing
1996          * interrupt remapping table entry targeting this vcpu.
1997          */
1998         spin_lock_irqsave(&svm->ir_list_lock, flags);
1999
2000         if (list_empty(&svm->ir_list))
2001                 goto out;
2002
2003         list_for_each_entry(ir, &svm->ir_list, node) {
2004                 ret = amd_iommu_update_ga(cpu, r, ir->data);
2005                 if (ret)
2006                         break;
2007         }
2008 out:
2009         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
2010         return ret;
2011 }
2012
2013 static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2014 {
2015         u64 entry;
2016         /* ID = 0xff (broadcast), ID > 0xff (reserved) */
2017         int h_physical_id = kvm_cpu_get_apicid(cpu);
2018         struct vcpu_svm *svm = to_svm(vcpu);
2019
2020         if (!kvm_vcpu_apicv_active(vcpu))
2021                 return;
2022
2023         if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
2024                 return;
2025
2026         entry = READ_ONCE(*(svm->avic_physical_id_cache));
2027         WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
2028
2029         entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
2030         entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
2031
2032         entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2033         if (svm->avic_is_running)
2034                 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2035
2036         WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
2037         avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
2038                                         svm->avic_is_running);
2039 }
2040
2041 static void avic_vcpu_put(struct kvm_vcpu *vcpu)
2042 {
2043         u64 entry;
2044         struct vcpu_svm *svm = to_svm(vcpu);
2045
2046         if (!kvm_vcpu_apicv_active(vcpu))
2047                 return;
2048
2049         entry = READ_ONCE(*(svm->avic_physical_id_cache));
2050         if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
2051                 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
2052
2053         entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2054         WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
2055 }
2056
2057 /**
2058  * This function is called during VCPU halt/unhalt.
2059  */
2060 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
2061 {
2062         struct vcpu_svm *svm = to_svm(vcpu);
2063
2064         svm->avic_is_running = is_run;
2065         if (is_run)
2066                 avic_vcpu_load(vcpu, vcpu->cpu);
2067         else
2068                 avic_vcpu_put(vcpu);
2069 }
2070
2071 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
2072 {
2073         struct vcpu_svm *svm = to_svm(vcpu);
2074         u32 dummy;
2075         u32 eax = 1;
2076
2077         vcpu->arch.microcode_version = 0x01000065;
2078         svm->spec_ctrl = 0;
2079         svm->virt_spec_ctrl = 0;
2080
2081         if (!init_event) {
2082                 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
2083                                            MSR_IA32_APICBASE_ENABLE;
2084                 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
2085                         svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
2086         }
2087         init_vmcb(svm);
2088
2089         kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
2090         kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
2091
2092         if (kvm_vcpu_apicv_active(vcpu) && !init_event)
2093                 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
2094 }
2095
2096 static int avic_init_vcpu(struct vcpu_svm *svm)
2097 {
2098         int ret;
2099
2100         if (!kvm_vcpu_apicv_active(&svm->vcpu))
2101                 return 0;
2102
2103         ret = avic_init_backing_page(&svm->vcpu);
2104         if (ret)
2105                 return ret;
2106
2107         INIT_LIST_HEAD(&svm->ir_list);
2108         spin_lock_init(&svm->ir_list_lock);
2109
2110         return ret;
2111 }
2112
2113 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
2114 {
2115         struct vcpu_svm *svm;
2116         struct page *page;
2117         struct page *msrpm_pages;
2118         struct page *hsave_page;
2119         struct page *nested_msrpm_pages;
2120         int err;
2121
2122         svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
2123         if (!svm) {
2124                 err = -ENOMEM;
2125                 goto out;
2126         }
2127
2128         svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
2129         if (!svm->vcpu.arch.guest_fpu) {
2130                 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
2131                 err = -ENOMEM;
2132                 goto free_partial_svm;
2133         }
2134
2135         err = kvm_vcpu_init(&svm->vcpu, kvm, id);
2136         if (err)
2137                 goto free_svm;
2138
2139         err = -ENOMEM;
2140         page = alloc_page(GFP_KERNEL);
2141         if (!page)
2142                 goto uninit;
2143
2144         msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
2145         if (!msrpm_pages)
2146                 goto free_page1;
2147
2148         nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
2149         if (!nested_msrpm_pages)
2150                 goto free_page2;
2151
2152         hsave_page = alloc_page(GFP_KERNEL);
2153         if (!hsave_page)
2154                 goto free_page3;
2155
2156         err = avic_init_vcpu(svm);
2157         if (err)
2158                 goto free_page4;
2159
2160         /* We initialize this flag to true to make sure that the is_running
2161          * bit would be set the first time the vcpu is loaded.
2162          */
2163         svm->avic_is_running = true;
2164
2165         svm->nested.hsave = page_address(hsave_page);
2166
2167         svm->msrpm = page_address(msrpm_pages);
2168         svm_vcpu_init_msrpm(svm->msrpm);
2169
2170         svm->nested.msrpm = page_address(nested_msrpm_pages);
2171         svm_vcpu_init_msrpm(svm->nested.msrpm);
2172
2173         svm->vmcb = page_address(page);
2174         clear_page(svm->vmcb);
2175         svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
2176         svm->asid_generation = 0;
2177         init_vmcb(svm);
2178
2179         svm_init_osvw(&svm->vcpu);
2180
2181         return &svm->vcpu;
2182
2183 free_page4:
2184         __free_page(hsave_page);
2185 free_page3:
2186         __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
2187 free_page2:
2188         __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
2189 free_page1:
2190         __free_page(page);
2191 uninit:
2192         kvm_vcpu_uninit(&svm->vcpu);
2193 free_svm:
2194         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2195 free_partial_svm:
2196         kmem_cache_free(kvm_vcpu_cache, svm);
2197 out:
2198         return ERR_PTR(err);
2199 }
2200
2201 static void svm_clear_current_vmcb(struct vmcb *vmcb)
2202 {
2203         int i;
2204
2205         for_each_online_cpu(i)
2206                 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
2207 }
2208
2209 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
2210 {
2211         struct vcpu_svm *svm = to_svm(vcpu);
2212
2213         /*
2214          * The vmcb page can be recycled, causing a false negative in
2215          * svm_vcpu_load(). So, ensure that no logical CPU has this
2216          * vmcb page recorded as its current vmcb.
2217          */
2218         svm_clear_current_vmcb(svm->vmcb);
2219
2220         __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
2221         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
2222         __free_page(virt_to_page(svm->nested.hsave));
2223         __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
2224         kvm_vcpu_uninit(vcpu);
2225         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2226         kmem_cache_free(kvm_vcpu_cache, svm);
2227 }
2228
2229 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2230 {
2231         struct vcpu_svm *svm = to_svm(vcpu);
2232         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2233         int i;
2234
2235         if (unlikely(cpu != vcpu->cpu)) {
2236                 svm->asid_generation = 0;
2237                 mark_all_dirty(svm->vmcb);
2238         }
2239
2240 #ifdef CONFIG_X86_64
2241         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
2242 #endif
2243         savesegment(fs, svm->host.fs);
2244         savesegment(gs, svm->host.gs);
2245         svm->host.ldt = kvm_read_ldt();
2246
2247         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
2248                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
2249
2250         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
2251                 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
2252                 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
2253                         __this_cpu_write(current_tsc_ratio, tsc_ratio);
2254                         wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
2255                 }
2256         }
2257         /* This assumes that the kernel never uses MSR_TSC_AUX */
2258         if (static_cpu_has(X86_FEATURE_RDTSCP))
2259                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
2260
2261         if (sd->current_vmcb != svm->vmcb) {
2262                 sd->current_vmcb = svm->vmcb;
2263                 indirect_branch_prediction_barrier();
2264         }
2265         avic_vcpu_load(vcpu, cpu);
2266 }
2267
2268 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
2269 {
2270         struct vcpu_svm *svm = to_svm(vcpu);
2271         int i;
2272
2273         avic_vcpu_put(vcpu);
2274
2275         ++vcpu->stat.host_state_reload;
2276         kvm_load_ldt(svm->host.ldt);
2277 #ifdef CONFIG_X86_64
2278         loadsegment(fs, svm->host.fs);
2279         wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
2280         load_gs_index(svm->host.gs);
2281 #else
2282 #ifdef CONFIG_X86_32_LAZY_GS
2283         loadsegment(gs, svm->host.gs);
2284 #endif
2285 #endif
2286         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
2287                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
2288 }
2289
2290 static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
2291 {
2292         avic_set_running(vcpu, false);
2293 }
2294
2295 static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
2296 {
2297         avic_set_running(vcpu, true);
2298 }
2299
2300 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
2301 {
2302         struct vcpu_svm *svm = to_svm(vcpu);
2303         unsigned long rflags = svm->vmcb->save.rflags;
2304
2305         if (svm->nmi_singlestep) {
2306                 /* Hide our flags if they were not set by the guest */
2307                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
2308                         rflags &= ~X86_EFLAGS_TF;
2309                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
2310                         rflags &= ~X86_EFLAGS_RF;
2311         }
2312         return rflags;
2313 }
2314
2315 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2316 {
2317         if (to_svm(vcpu)->nmi_singlestep)
2318                 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2319
2320        /*
2321         * Any change of EFLAGS.VM is accompanied by a reload of SS
2322         * (caused by either a task switch or an inter-privilege IRET),
2323         * so we do not need to update the CPL here.
2324         */
2325         to_svm(vcpu)->vmcb->save.rflags = rflags;
2326 }
2327
2328 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2329 {
2330         switch (reg) {
2331         case VCPU_EXREG_PDPTR:
2332                 BUG_ON(!npt_enabled);
2333                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
2334                 break;
2335         default:
2336                 BUG();
2337         }
2338 }
2339
2340 static void svm_set_vintr(struct vcpu_svm *svm)
2341 {
2342         set_intercept(svm, INTERCEPT_VINTR);
2343 }
2344
2345 static void svm_clear_vintr(struct vcpu_svm *svm)
2346 {
2347         clr_intercept(svm, INTERCEPT_VINTR);
2348 }
2349
2350 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
2351 {
2352         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2353
2354         switch (seg) {
2355         case VCPU_SREG_CS: return &save->cs;
2356         case VCPU_SREG_DS: return &save->ds;
2357         case VCPU_SREG_ES: return &save->es;
2358         case VCPU_SREG_FS: return &save->fs;
2359         case VCPU_SREG_GS: return &save->gs;
2360         case VCPU_SREG_SS: return &save->ss;
2361         case VCPU_SREG_TR: return &save->tr;
2362         case VCPU_SREG_LDTR: return &save->ldtr;
2363         }
2364         BUG();
2365         return NULL;
2366 }
2367
2368 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2369 {
2370         struct vmcb_seg *s = svm_seg(vcpu, seg);
2371
2372         return s->base;
2373 }
2374
2375 static void svm_get_segment(struct kvm_vcpu *vcpu,
2376                             struct kvm_segment *var, int seg)
2377 {
2378         struct vmcb_seg *s = svm_seg(vcpu, seg);
2379
2380         var->base = s->base;
2381         var->limit = s->limit;
2382         var->selector = s->selector;
2383         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
2384         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
2385         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
2386         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
2387         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
2388         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
2389         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
2390
2391         /*
2392          * AMD CPUs circa 2014 track the G bit for all segments except CS.
2393          * However, the SVM spec states that the G bit is not observed by the
2394          * CPU, and some VMware virtual CPUs drop the G bit for all segments.
2395          * So let's synthesize a legal G bit for all segments, this helps
2396          * running KVM nested. It also helps cross-vendor migration, because
2397          * Intel's vmentry has a check on the 'G' bit.
2398          */
2399         var->g = s->limit > 0xfffff;
2400
2401         /*
2402          * AMD's VMCB does not have an explicit unusable field, so emulate it
2403          * for cross vendor migration purposes by "not present"
2404          */
2405         var->unusable = !var->present;
2406
2407         switch (seg) {
2408         case VCPU_SREG_TR:
2409                 /*
2410                  * Work around a bug where the busy flag in the tr selector
2411                  * isn't exposed
2412                  */
2413                 var->type |= 0x2;
2414                 break;
2415         case VCPU_SREG_DS:
2416         case VCPU_SREG_ES:
2417         case VCPU_SREG_FS:
2418         case VCPU_SREG_GS:
2419                 /*
2420                  * The accessed bit must always be set in the segment
2421                  * descriptor cache, although it can be cleared in the
2422                  * descriptor, the cached bit always remains at 1. Since
2423                  * Intel has a check on this, set it here to support
2424                  * cross-vendor migration.
2425                  */
2426                 if (!var->unusable)
2427                         var->type |= 0x1;
2428                 break;
2429         case VCPU_SREG_SS:
2430                 /*
2431                  * On AMD CPUs sometimes the DB bit in the segment
2432                  * descriptor is left as 1, although the whole segment has
2433                  * been made unusable. Clear it here to pass an Intel VMX
2434                  * entry check when cross vendor migrating.
2435                  */
2436                 if (var->unusable)
2437                         var->db = 0;
2438                 /* This is symmetric with svm_set_segment() */
2439                 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
2440                 break;
2441         }
2442 }
2443
2444 static int svm_get_cpl(struct kvm_vcpu *vcpu)
2445 {
2446         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2447
2448         return save->cpl;
2449 }
2450
2451 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2452 {
2453         struct vcpu_svm *svm = to_svm(vcpu);
2454
2455         dt->size = svm->vmcb->save.idtr.limit;
2456         dt->address = svm->vmcb->save.idtr.base;
2457 }
2458
2459 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2460 {
2461         struct vcpu_svm *svm = to_svm(vcpu);
2462
2463         svm->vmcb->save.idtr.limit = dt->size;
2464         svm->vmcb->save.idtr.base = dt->address ;
2465         mark_dirty(svm->vmcb, VMCB_DT);
2466 }
2467
2468 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2469 {
2470         struct vcpu_svm *svm = to_svm(vcpu);
2471
2472         dt->size = svm->vmcb->save.gdtr.limit;
2473         dt->address = svm->vmcb->save.gdtr.base;
2474 }
2475
2476 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2477 {
2478         struct vcpu_svm *svm = to_svm(vcpu);
2479
2480         svm->vmcb->save.gdtr.limit = dt->size;
2481         svm->vmcb->save.gdtr.base = dt->address ;
2482         mark_dirty(svm->vmcb, VMCB_DT);
2483 }
2484
2485 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2486 {
2487 }
2488
2489 static void svm_decache_cr3(struct kvm_vcpu *vcpu)
2490 {
2491 }
2492
2493 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
2494 {
2495 }
2496
2497 static void update_cr0_intercept(struct vcpu_svm *svm)
2498 {
2499         ulong gcr0 = svm->vcpu.arch.cr0;
2500         u64 *hcr0 = &svm->vmcb->save.cr0;
2501
2502         *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
2503                 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
2504
2505         mark_dirty(svm->vmcb, VMCB_CR);
2506
2507         if (gcr0 == *hcr0) {
2508                 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
2509                 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
2510         } else {
2511                 set_cr_intercept(svm, INTERCEPT_CR0_READ);
2512                 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
2513         }
2514 }
2515
2516 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2517 {
2518         struct vcpu_svm *svm = to_svm(vcpu);
2519
2520 #ifdef CONFIG_X86_64
2521         if (vcpu->arch.efer & EFER_LME) {
2522                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
2523                         vcpu->arch.efer |= EFER_LMA;
2524                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
2525                 }
2526
2527                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
2528                         vcpu->arch.efer &= ~EFER_LMA;
2529                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
2530                 }
2531         }
2532 #endif
2533         vcpu->arch.cr0 = cr0;
2534
2535         if (!npt_enabled)
2536                 cr0 |= X86_CR0_PG | X86_CR0_WP;
2537
2538         /*
2539          * re-enable caching here because the QEMU bios
2540          * does not do it - this results in some delay at
2541          * reboot
2542          */
2543         if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
2544                 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
2545         svm->vmcb->save.cr0 = cr0;
2546         mark_dirty(svm->vmcb, VMCB_CR);
2547         update_cr0_intercept(svm);
2548 }
2549
2550 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2551 {
2552         unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
2553         unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
2554
2555         if (cr4 & X86_CR4_VMXE)
2556                 return 1;
2557
2558         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
2559                 svm_flush_tlb(vcpu, true);
2560
2561         vcpu->arch.cr4 = cr4;
2562         if (!npt_enabled)
2563                 cr4 |= X86_CR4_PAE;
2564         cr4 |= host_cr4_mce;
2565         to_svm(vcpu)->vmcb->save.cr4 = cr4;
2566         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
2567         return 0;
2568 }
2569
2570 static void svm_set_segment(struct kvm_vcpu *vcpu,
2571                             struct kvm_segment *var, int seg)
2572 {
2573         struct vcpu_svm *svm = to_svm(vcpu);
2574         struct vmcb_seg *s = svm_seg(vcpu, seg);
2575
2576         s->base = var->base;
2577         s->limit = var->limit;
2578         s->selector = var->selector;
2579         s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
2580         s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
2581         s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
2582         s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
2583         s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
2584         s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
2585         s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
2586         s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
2587
2588         /*
2589          * This is always accurate, except if SYSRET returned to a segment
2590          * with SS.DPL != 3.  Intel does not have this quirk, and always
2591          * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
2592          * would entail passing the CPL to userspace and back.
2593          */
2594         if (seg == VCPU_SREG_SS)
2595                 /* This is symmetric with svm_get_segment() */
2596                 svm->vmcb->save.cpl = (var->dpl & 3);
2597
2598         mark_dirty(svm->vmcb, VMCB_SEG);
2599 }
2600
2601 static void update_bp_intercept(struct kvm_vcpu *vcpu)
2602 {
2603         struct vcpu_svm *svm = to_svm(vcpu);
2604
2605         clr_exception_intercept(svm, BP_VECTOR);
2606
2607         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
2608                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2609                         set_exception_intercept(svm, BP_VECTOR);
2610         } else
2611                 vcpu->guest_debug = 0;
2612 }
2613
2614 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
2615 {
2616         if (sd->next_asid > sd->max_asid) {
2617                 ++sd->asid_generation;
2618                 sd->next_asid = sd->min_asid;
2619                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
2620         }
2621
2622         svm->asid_generation = sd->asid_generation;
2623         svm->vmcb->control.asid = sd->next_asid++;
2624
2625         mark_dirty(svm->vmcb, VMCB_ASID);
2626 }
2627
2628 static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2629 {
2630         return to_svm(vcpu)->vmcb->save.dr6;
2631 }
2632
2633 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2634 {
2635         struct vcpu_svm *svm = to_svm(vcpu);
2636
2637         svm->vmcb->save.dr6 = value;
2638         mark_dirty(svm->vmcb, VMCB_DR);
2639 }
2640
2641 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2642 {
2643         struct vcpu_svm *svm = to_svm(vcpu);
2644
2645         get_debugreg(vcpu->arch.db[0], 0);
2646         get_debugreg(vcpu->arch.db[1], 1);
2647         get_debugreg(vcpu->arch.db[2], 2);
2648         get_debugreg(vcpu->arch.db[3], 3);
2649         vcpu->arch.dr6 = svm_get_dr6(vcpu);
2650         vcpu->arch.dr7 = svm->vmcb->save.dr7;
2651
2652         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2653         set_dr_intercepts(svm);
2654 }
2655
2656 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
2657 {
2658         struct vcpu_svm *svm = to_svm(vcpu);
2659
2660         svm->vmcb->save.dr7 = value;
2661         mark_dirty(svm->vmcb, VMCB_DR);
2662 }
2663
2664 static int pf_interception(struct vcpu_svm *svm)
2665 {
2666         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
2667         u64 error_code = svm->vmcb->control.exit_info_1;
2668
2669         return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
2670                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2671                         svm->vmcb->control.insn_bytes : NULL,
2672                         svm->vmcb->control.insn_len);
2673 }
2674
2675 static int npf_interception(struct vcpu_svm *svm)
2676 {
2677         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
2678         u64 error_code = svm->vmcb->control.exit_info_1;
2679
2680         trace_kvm_page_fault(fault_address, error_code);
2681         return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
2682                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2683                         svm->vmcb->control.insn_bytes : NULL,
2684                         svm->vmcb->control.insn_len);
2685 }
2686
2687 static int db_interception(struct vcpu_svm *svm)
2688 {
2689         struct kvm_run *kvm_run = svm->vcpu.run;
2690
2691         if (!(svm->vcpu.guest_debug &
2692               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2693                 !svm->nmi_singlestep) {
2694                 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2695                 return 1;
2696         }
2697
2698         if (svm->nmi_singlestep) {
2699                 disable_nmi_singlestep(svm);
2700         }
2701
2702         if (svm->vcpu.guest_debug &
2703             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2704                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2705                 kvm_run->debug.arch.pc =
2706                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2707                 kvm_run->debug.arch.exception = DB_VECTOR;
2708                 return 0;
2709         }
2710
2711         return 1;
2712 }
2713
2714 static int bp_interception(struct vcpu_svm *svm)
2715 {
2716         struct kvm_run *kvm_run = svm->vcpu.run;
2717
2718         kvm_run->exit_reason = KVM_EXIT_DEBUG;
2719         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2720         kvm_run->debug.arch.exception = BP_VECTOR;
2721         return 0;
2722 }
2723
2724 static int ud_interception(struct vcpu_svm *svm)
2725 {
2726         return handle_ud(&svm->vcpu);
2727 }
2728
2729 static int ac_interception(struct vcpu_svm *svm)
2730 {
2731         kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2732         return 1;
2733 }
2734
2735 static int gp_interception(struct vcpu_svm *svm)
2736 {
2737         struct kvm_vcpu *vcpu = &svm->vcpu;
2738         u32 error_code = svm->vmcb->control.exit_info_1;
2739         int er;
2740
2741         WARN_ON_ONCE(!enable_vmware_backdoor);
2742
2743         er = kvm_emulate_instruction(vcpu,
2744                 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
2745         if (er == EMULATE_USER_EXIT)
2746                 return 0;
2747         else if (er != EMULATE_DONE)
2748                 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2749         return 1;
2750 }
2751
2752 static bool is_erratum_383(void)
2753 {
2754         int err, i;
2755         u64 value;
2756
2757         if (!erratum_383_found)
2758                 return false;
2759
2760         value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2761         if (err)
2762                 return false;
2763
2764         /* Bit 62 may or may not be set for this mce */
2765         value &= ~(1ULL << 62);
2766
2767         if (value != 0xb600000000010015ULL)
2768                 return false;
2769
2770         /* Clear MCi_STATUS registers */
2771         for (i = 0; i < 6; ++i)
2772                 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2773
2774         value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2775         if (!err) {
2776                 u32 low, high;
2777
2778                 value &= ~(1ULL << 2);
2779                 low    = lower_32_bits(value);
2780                 high   = upper_32_bits(value);
2781
2782                 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2783         }
2784
2785         /* Flush tlb to evict multi-match entries */
2786         __flush_tlb_all();
2787
2788         return true;
2789 }
2790
2791 static void svm_handle_mce(struct vcpu_svm *svm)
2792 {
2793         if (is_erratum_383()) {
2794                 /*
2795                  * Erratum 383 triggered. Guest state is corrupt so kill the
2796                  * guest.
2797                  */
2798                 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2799
2800                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
2801
2802                 return;
2803         }
2804
2805         /*
2806          * On an #MC intercept the MCE handler is not called automatically in
2807          * the host. So do it by hand here.
2808          */
2809         asm volatile (
2810                 "int $0x12\n");
2811         /* not sure if we ever come back to this point */
2812
2813         return;
2814 }
2815
2816 static int mc_interception(struct vcpu_svm *svm)
2817 {
2818         return 1;
2819 }
2820
2821 static int shutdown_interception(struct vcpu_svm *svm)
2822 {
2823         struct kvm_run *kvm_run = svm->vcpu.run;
2824
2825         /*
2826          * VMCB is undefined after a SHUTDOWN intercept
2827          * so reinitialize it.
2828          */
2829         clear_page(svm->vmcb);
2830         init_vmcb(svm);
2831
2832         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2833         return 0;
2834 }
2835
2836 static int io_interception(struct vcpu_svm *svm)
2837 {
2838         struct kvm_vcpu *vcpu = &svm->vcpu;
2839         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2840         int size, in, string;
2841         unsigned port;
2842
2843         ++svm->vcpu.stat.io_exits;
2844         string = (io_info & SVM_IOIO_STR_MASK) != 0;
2845         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2846         if (string)
2847                 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
2848
2849         port = io_info >> 16;
2850         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2851         svm->next_rip = svm->vmcb->control.exit_info_2;
2852
2853         return kvm_fast_pio(&svm->vcpu, size, port, in);
2854 }
2855
2856 static int nmi_interception(struct vcpu_svm *svm)
2857 {
2858         return 1;
2859 }
2860
2861 static int intr_interception(struct vcpu_svm *svm)
2862 {
2863         ++svm->vcpu.stat.irq_exits;
2864         return 1;
2865 }
2866
2867 static int nop_on_interception(struct vcpu_svm *svm)
2868 {
2869         return 1;
2870 }
2871
2872 static int halt_interception(struct vcpu_svm *svm)
2873 {
2874         svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
2875         return kvm_emulate_halt(&svm->vcpu);
2876 }
2877
2878 static int vmmcall_interception(struct vcpu_svm *svm)
2879 {
2880         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2881         return kvm_emulate_hypercall(&svm->vcpu);
2882 }
2883
2884 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2885 {
2886         struct vcpu_svm *svm = to_svm(vcpu);
2887
2888         return svm->nested.nested_cr3;
2889 }
2890
2891 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2892 {
2893         struct vcpu_svm *svm = to_svm(vcpu);
2894         u64 cr3 = svm->nested.nested_cr3;
2895         u64 pdpte;
2896         int ret;
2897
2898         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
2899                                        offset_in_page(cr3) + index * 8, 8);
2900         if (ret)
2901                 return 0;
2902         return pdpte;
2903 }
2904
2905 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2906                                    unsigned long root)
2907 {
2908         struct vcpu_svm *svm = to_svm(vcpu);
2909
2910         svm->vmcb->control.nested_cr3 = __sme_set(root);
2911         mark_dirty(svm->vmcb, VMCB_NPT);
2912 }
2913
2914 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2915                                        struct x86_exception *fault)
2916 {
2917         struct vcpu_svm *svm = to_svm(vcpu);
2918
2919         if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2920                 /*
2921                  * TODO: track the cause of the nested page fault, and
2922                  * correctly fill in the high bits of exit_info_1.
2923                  */
2924                 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2925                 svm->vmcb->control.exit_code_hi = 0;
2926                 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2927                 svm->vmcb->control.exit_info_2 = fault->address;
2928         }
2929
2930         svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2931         svm->vmcb->control.exit_info_1 |= fault->error_code;
2932
2933         /*
2934          * The present bit is always zero for page structure faults on real
2935          * hardware.
2936          */
2937         if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2938                 svm->vmcb->control.exit_info_1 &= ~1;
2939
2940         nested_svm_vmexit(svm);
2941 }
2942
2943 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
2944 {
2945         WARN_ON(mmu_is_nested(vcpu));
2946         kvm_init_shadow_mmu(vcpu);
2947         vcpu->arch.mmu->set_cr3           = nested_svm_set_tdp_cr3;
2948         vcpu->arch.mmu->get_cr3           = nested_svm_get_tdp_cr3;
2949         vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
2950         vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
2951         vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
2952         reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
2953         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
2954 }
2955
2956 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2957 {
2958         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
2959 }
2960
2961 static int nested_svm_check_permissions(struct vcpu_svm *svm)
2962 {
2963         if (!(svm->vcpu.arch.efer & EFER_SVME) ||
2964             !is_paging(&svm->vcpu)) {
2965                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2966                 return 1;
2967         }
2968
2969         if (svm->vmcb->save.cpl) {
2970                 kvm_inject_gp(&svm->vcpu, 0);
2971                 return 1;
2972         }
2973
2974         return 0;
2975 }
2976
2977 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2978                                       bool has_error_code, u32 error_code)
2979 {
2980         int vmexit;
2981
2982         if (!is_guest_mode(&svm->vcpu))
2983                 return 0;
2984
2985         vmexit = nested_svm_intercept(svm);
2986         if (vmexit != NESTED_EXIT_DONE)
2987                 return 0;
2988
2989         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2990         svm->vmcb->control.exit_code_hi = 0;
2991         svm->vmcb->control.exit_info_1 = error_code;
2992
2993         /*
2994          * EXITINFO2 is undefined for all exception intercepts other
2995          * than #PF.
2996          */
2997         if (svm->vcpu.arch.exception.nested_apf)
2998                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
2999         else if (svm->vcpu.arch.exception.has_payload)
3000                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
3001         else
3002                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
3003
3004         svm->nested.exit_required = true;
3005         return vmexit;
3006 }
3007
3008 /* This function returns true if it is save to enable the irq window */
3009 static inline bool nested_svm_intr(struct vcpu_svm *svm)
3010 {
3011         if (!is_guest_mode(&svm->vcpu))
3012                 return true;
3013
3014         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3015                 return true;
3016
3017         if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
3018                 return false;
3019
3020         /*
3021          * if vmexit was already requested (by intercepted exception
3022          * for instance) do not overwrite it with "external interrupt"
3023          * vmexit.
3024          */
3025         if (svm->nested.exit_required)
3026                 return false;
3027
3028         svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
3029         svm->vmcb->control.exit_info_1 = 0;
3030         svm->vmcb->control.exit_info_2 = 0;
3031
3032         if (svm->nested.intercept & 1ULL) {
3033                 /*
3034                  * The #vmexit can't be emulated here directly because this
3035                  * code path runs with irqs and preemption disabled. A
3036                  * #vmexit emulation might sleep. Only signal request for
3037                  * the #vmexit here.
3038                  */
3039                 svm->nested.exit_required = true;
3040                 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
3041                 return false;
3042         }
3043
3044         return true;
3045 }
3046
3047 /* This function returns true if it is save to enable the nmi window */
3048 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
3049 {
3050         if (!is_guest_mode(&svm->vcpu))
3051                 return true;
3052
3053         if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
3054                 return true;
3055
3056         svm->vmcb->control.exit_code = SVM_EXIT_NMI;
3057         svm->nested.exit_required = true;
3058
3059         return false;
3060 }
3061
3062 static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
3063 {
3064         struct page *page;
3065
3066         might_sleep();
3067
3068         page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
3069         if (is_error_page(page))
3070                 goto error;
3071
3072         *_page = page;
3073
3074         return kmap(page);
3075
3076 error:
3077         kvm_inject_gp(&svm->vcpu, 0);
3078
3079         return NULL;
3080 }
3081
3082 static void nested_svm_unmap(struct page *page)
3083 {
3084         kunmap(page);
3085         kvm_release_page_dirty(page);
3086 }
3087
3088 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
3089 {
3090         unsigned port, size, iopm_len;
3091         u16 val, mask;
3092         u8 start_bit;
3093         u64 gpa;
3094
3095         if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
3096                 return NESTED_EXIT_HOST;
3097
3098         port = svm->vmcb->control.exit_info_1 >> 16;
3099         size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
3100                 SVM_IOIO_SIZE_SHIFT;
3101         gpa  = svm->nested.vmcb_iopm + (port / 8);
3102         start_bit = port % 8;
3103         iopm_len = (start_bit + size > 8) ? 2 : 1;
3104         mask = (0xf >> (4 - size)) << start_bit;
3105         val = 0;
3106
3107         if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
3108                 return NESTED_EXIT_DONE;
3109
3110         return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
3111 }
3112
3113 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
3114 {
3115         u32 offset, msr, value;
3116         int write, mask;
3117
3118         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3119                 return NESTED_EXIT_HOST;
3120
3121         msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
3122         offset = svm_msrpm_offset(msr);
3123         write  = svm->vmcb->control.exit_info_1 & 1;
3124         mask   = 1 << ((2 * (msr & 0xf)) + write);
3125
3126         if (offset == MSR_INVALID)
3127                 return NESTED_EXIT_DONE;
3128
3129         /* Offset is in 32 bit units but need in 8 bit units */
3130         offset *= 4;
3131
3132         if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
3133                 return NESTED_EXIT_DONE;
3134
3135         return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
3136 }
3137
3138 /* DB exceptions for our internal use must not cause vmexit */
3139 static int nested_svm_intercept_db(struct vcpu_svm *svm)
3140 {
3141         unsigned long dr6;
3142
3143         /* if we're not singlestepping, it's not ours */
3144         if (!svm->nmi_singlestep)
3145                 return NESTED_EXIT_DONE;
3146
3147         /* if it's not a singlestep exception, it's not ours */
3148         if (kvm_get_dr(&svm->vcpu, 6, &dr6))
3149                 return NESTED_EXIT_DONE;
3150         if (!(dr6 & DR6_BS))
3151                 return NESTED_EXIT_DONE;
3152
3153         /* if the guest is singlestepping, it should get the vmexit */
3154         if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
3155                 disable_nmi_singlestep(svm);
3156                 return NESTED_EXIT_DONE;
3157         }
3158
3159         /* it's ours, the nested hypervisor must not see this one */
3160         return NESTED_EXIT_HOST;
3161 }
3162
3163 static int nested_svm_exit_special(struct vcpu_svm *svm)
3164 {
3165         u32 exit_code = svm->vmcb->control.exit_code;
3166
3167         switch (exit_code) {
3168         case SVM_EXIT_INTR:
3169         case SVM_EXIT_NMI:
3170         case SVM_EXIT_EXCP_BASE + MC_VECTOR:
3171                 return NESTED_EXIT_HOST;
3172         case SVM_EXIT_NPF:
3173                 /* For now we are always handling NPFs when using them */
3174                 if (npt_enabled)
3175                         return NESTED_EXIT_HOST;
3176                 break;
3177         case SVM_EXIT_EXCP_BASE + PF_VECTOR:
3178                 /* When we're shadowing, trap PFs, but not async PF */
3179                 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
3180                         return NESTED_EXIT_HOST;
3181                 break;
3182         default:
3183                 break;
3184         }
3185
3186         return NESTED_EXIT_CONTINUE;
3187 }
3188
3189 /*
3190  * If this function returns true, this #vmexit was already handled
3191  */
3192 static int nested_svm_intercept(struct vcpu_svm *svm)
3193 {
3194         u32 exit_code = svm->vmcb->control.exit_code;
3195         int vmexit = NESTED_EXIT_HOST;
3196
3197         switch (exit_code) {
3198         case SVM_EXIT_MSR:
3199                 vmexit = nested_svm_exit_handled_msr(svm);
3200                 break;
3201         case SVM_EXIT_IOIO:
3202                 vmexit = nested_svm_intercept_ioio(svm);
3203                 break;
3204         case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
3205                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
3206                 if (svm->nested.intercept_cr & bit)
3207                         vmexit = NESTED_EXIT_DONE;
3208                 break;
3209         }
3210         case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
3211                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
3212                 if (svm->nested.intercept_dr & bit)
3213                         vmexit = NESTED_EXIT_DONE;
3214                 break;
3215         }
3216         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
3217                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
3218                 if (svm->nested.intercept_exceptions & excp_bits) {
3219                         if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
3220                                 vmexit = nested_svm_intercept_db(svm);
3221                         else
3222                                 vmexit = NESTED_EXIT_DONE;
3223                 }
3224                 /* async page fault always cause vmexit */
3225                 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
3226                          svm->vcpu.arch.exception.nested_apf != 0)
3227                         vmexit = NESTED_EXIT_DONE;
3228                 break;
3229         }
3230         case SVM_EXIT_ERR: {
3231                 vmexit = NESTED_EXIT_DONE;
3232                 break;
3233         }
3234         default: {
3235                 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
3236                 if (svm->nested.intercept & exit_bits)
3237                         vmexit = NESTED_EXIT_DONE;
3238         }
3239         }
3240
3241         return vmexit;
3242 }
3243
3244 static int nested_svm_exit_handled(struct vcpu_svm *svm)
3245 {
3246         int vmexit;
3247
3248         vmexit = nested_svm_intercept(svm);
3249
3250         if (vmexit == NESTED_EXIT_DONE)
3251                 nested_svm_vmexit(svm);
3252
3253         return vmexit;
3254 }
3255
3256 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
3257 {
3258         struct vmcb_control_area *dst  = &dst_vmcb->control;
3259         struct vmcb_control_area *from = &from_vmcb->control;
3260
3261         dst->intercept_cr         = from->intercept_cr;
3262         dst->intercept_dr         = from->intercept_dr;
3263         dst->intercept_exceptions = from->intercept_exceptions;
3264         dst->intercept            = from->intercept;
3265         dst->iopm_base_pa         = from->iopm_base_pa;
3266         dst->msrpm_base_pa        = from->msrpm_base_pa;
3267         dst->tsc_offset           = from->tsc_offset;
3268         dst->asid                 = from->asid;
3269         dst->tlb_ctl              = from->tlb_ctl;
3270         dst->int_ctl              = from->int_ctl;
3271         dst->int_vector           = from->int_vector;
3272         dst->int_state            = from->int_state;
3273         dst->exit_code            = from->exit_code;
3274         dst->exit_code_hi         = from->exit_code_hi;
3275         dst->exit_info_1          = from->exit_info_1;
3276         dst->exit_info_2          = from->exit_info_2;
3277         dst->exit_int_info        = from->exit_int_info;
3278         dst->exit_int_info_err    = from->exit_int_info_err;
3279         dst->nested_ctl           = from->nested_ctl;
3280         dst->event_inj            = from->event_inj;
3281         dst->event_inj_err        = from->event_inj_err;
3282         dst->nested_cr3           = from->nested_cr3;
3283         dst->virt_ext              = from->virt_ext;
3284         dst->pause_filter_count   = from->pause_filter_count;
3285         dst->pause_filter_thresh  = from->pause_filter_thresh;
3286 }
3287
3288 static int nested_svm_vmexit(struct vcpu_svm *svm)
3289 {
3290         struct vmcb *nested_vmcb;
3291         struct vmcb *hsave = svm->nested.hsave;
3292         struct vmcb *vmcb = svm->vmcb;
3293         struct page *page;
3294
3295         trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
3296                                        vmcb->control.exit_info_1,
3297                                        vmcb->control.exit_info_2,
3298                                        vmcb->control.exit_int_info,
3299                                        vmcb->control.exit_int_info_err,
3300                                        KVM_ISA_SVM);
3301
3302         nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
3303         if (!nested_vmcb)
3304                 return 1;
3305
3306         /* Exit Guest-Mode */
3307         leave_guest_mode(&svm->vcpu);
3308         svm->nested.vmcb = 0;
3309
3310         /* Give the current vmcb to the guest */
3311         disable_gif(svm);
3312
3313         nested_vmcb->save.es     = vmcb->save.es;
3314         nested_vmcb->save.cs     = vmcb->save.cs;
3315         nested_vmcb->save.ss     = vmcb->save.ss;
3316         nested_vmcb->save.ds     = vmcb->save.ds;
3317         nested_vmcb->save.gdtr   = vmcb->save.gdtr;
3318         nested_vmcb->save.idtr   = vmcb->save.idtr;
3319         nested_vmcb->save.efer   = svm->vcpu.arch.efer;
3320         nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
3321         nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
3322         nested_vmcb->save.cr2    = vmcb->save.cr2;
3323         nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
3324         nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
3325         nested_vmcb->save.rip    = vmcb->save.rip;
3326         nested_vmcb->save.rsp    = vmcb->save.rsp;
3327         nested_vmcb->save.rax    = vmcb->save.rax;
3328         nested_vmcb->save.dr7    = vmcb->save.dr7;
3329         nested_vmcb->save.dr6    = vmcb->save.dr6;
3330         nested_vmcb->save.cpl    = vmcb->save.cpl;
3331
3332         nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
3333         nested_vmcb->control.int_vector        = vmcb->control.int_vector;
3334         nested_vmcb->control.int_state         = vmcb->control.int_state;
3335         nested_vmcb->control.exit_code         = vmcb->control.exit_code;
3336         nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
3337         nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
3338         nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
3339         nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
3340         nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
3341
3342         if (svm->nrips_enabled)
3343                 nested_vmcb->control.next_rip  = vmcb->control.next_rip;
3344
3345         /*
3346          * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
3347          * to make sure that we do not lose injected events. So check event_inj
3348          * here and copy it to exit_int_info if it is valid.
3349          * Exit_int_info and event_inj can't be both valid because the case
3350          * below only happens on a VMRUN instruction intercept which has
3351          * no valid exit_int_info set.
3352          */
3353         if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
3354                 struct vmcb_control_area *nc = &nested_vmcb->control;
3355
3356                 nc->exit_int_info     = vmcb->control.event_inj;
3357                 nc->exit_int_info_err = vmcb->control.event_inj_err;
3358         }
3359
3360         nested_vmcb->control.tlb_ctl           = 0;
3361         nested_vmcb->control.event_inj         = 0;
3362         nested_vmcb->control.event_inj_err     = 0;
3363
3364         nested_vmcb->control.pause_filter_count =
3365                 svm->vmcb->control.pause_filter_count;
3366         nested_vmcb->control.pause_filter_thresh =
3367                 svm->vmcb->control.pause_filter_thresh;
3368
3369         /* We always set V_INTR_MASKING and remember the old value in hflags */
3370         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3371                 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3372
3373         /* Restore the original control entries */
3374         copy_vmcb_control_area(vmcb, hsave);
3375
3376         svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
3377         kvm_clear_exception_queue(&svm->vcpu);
3378         kvm_clear_interrupt_queue(&svm->vcpu);
3379
3380         svm->nested.nested_cr3 = 0;
3381
3382         /* Restore selected save entries */
3383         svm->vmcb->save.es = hsave->save.es;
3384         svm->vmcb->save.cs = hsave->save.cs;
3385         svm->vmcb->save.ss = hsave->save.ss;
3386         svm->vmcb->save.ds = hsave->save.ds;
3387         svm->vmcb->save.gdtr = hsave->save.gdtr;
3388         svm->vmcb->save.idtr = hsave->save.idtr;
3389         kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
3390         svm_set_efer(&svm->vcpu, hsave->save.efer);
3391         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
3392         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
3393         if (npt_enabled) {
3394                 svm->vmcb->save.cr3 = hsave->save.cr3;
3395                 svm->vcpu.arch.cr3 = hsave->save.cr3;
3396         } else {
3397                 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
3398         }
3399         kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
3400         kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
3401         kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
3402         svm->vmcb->save.dr7 = 0;
3403         svm->vmcb->save.cpl = 0;
3404         svm->vmcb->control.exit_int_info = 0;
3405
3406         mark_all_dirty(svm->vmcb);
3407
3408         nested_svm_unmap(page);
3409
3410         nested_svm_uninit_mmu_context(&svm->vcpu);
3411         kvm_mmu_reset_context(&svm->vcpu);
3412         kvm_mmu_load(&svm->vcpu);
3413
3414         return 0;
3415 }
3416
3417 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
3418 {
3419         /*
3420          * This function merges the msr permission bitmaps of kvm and the
3421          * nested vmcb. It is optimized in that it only merges the parts where
3422          * the kvm msr permission bitmap may contain zero bits
3423          */
3424         int i;
3425
3426         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3427                 return true;
3428
3429         for (i = 0; i < MSRPM_OFFSETS; i++) {
3430                 u32 value, p;
3431                 u64 offset;
3432
3433                 if (msrpm_offsets[i] == 0xffffffff)
3434                         break;
3435
3436                 p      = msrpm_offsets[i];
3437                 offset = svm->nested.vmcb_msrpm + (p * 4);
3438
3439                 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
3440                         return false;
3441
3442                 svm->nested.msrpm[p] = svm->msrpm[p] | value;
3443         }
3444
3445         svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
3446
3447         return true;
3448 }
3449
3450 static bool nested_vmcb_checks(struct vmcb *vmcb)
3451 {
3452         if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
3453                 return false;
3454
3455         if (vmcb->control.asid == 0)
3456                 return false;
3457
3458         if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
3459             !npt_enabled)
3460                 return false;
3461
3462         return true;
3463 }
3464
3465 static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
3466                                  struct vmcb *nested_vmcb, struct page *page)
3467 {
3468         if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
3469                 svm->vcpu.arch.hflags |= HF_HIF_MASK;
3470         else
3471                 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
3472
3473         if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
3474                 kvm_mmu_unload(&svm->vcpu);
3475                 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
3476                 nested_svm_init_mmu_context(&svm->vcpu);
3477         }
3478
3479         /* Load the nested guest state */
3480         svm->vmcb->save.es = nested_vmcb->save.es;
3481         svm->vmcb->save.cs = nested_vmcb->save.cs;
3482         svm->vmcb->save.ss = nested_vmcb->save.ss;
3483         svm->vmcb->save.ds = nested_vmcb->save.ds;
3484         svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
3485         svm->vmcb->save.idtr = nested_vmcb->save.idtr;
3486         kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
3487         svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
3488         svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
3489         svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
3490         if (npt_enabled) {
3491                 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
3492                 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
3493         } else
3494                 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
3495
3496         /* Guest paging mode is active - reset mmu */
3497         kvm_mmu_reset_context(&svm->vcpu);
3498
3499         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
3500         kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
3501         kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
3502         kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
3503
3504         /* In case we don't even reach vcpu_run, the fields are not updated */
3505         svm->vmcb->save.rax = nested_vmcb->save.rax;
3506         svm->vmcb->save.rsp = nested_vmcb->save.rsp;
3507         svm->vmcb->save.rip = nested_vmcb->save.rip;
3508         svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
3509         svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
3510         svm->vmcb->save.cpl = nested_vmcb->save.cpl;
3511
3512         svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
3513         svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
3514
3515         /* cache intercepts */
3516         svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
3517         svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
3518         svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3519         svm->nested.intercept            = nested_vmcb->control.intercept;
3520
3521         svm_flush_tlb(&svm->vcpu, true);
3522         svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
3523         if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3524                 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3525         else
3526                 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
3527
3528         if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
3529                 /* We only want the cr8 intercept bits of the guest */
3530                 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
3531                 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3532         }
3533
3534         /* We don't want to see VMMCALLs from a nested guest */
3535         clr_intercept(svm, INTERCEPT_VMMCALL);
3536
3537         svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
3538         svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
3539
3540         svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
3541         svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3542         svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3543         svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3544         svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3545
3546         svm->vmcb->control.pause_filter_count =
3547                 nested_vmcb->control.pause_filter_count;
3548         svm->vmcb->control.pause_filter_thresh =
3549                 nested_vmcb->control.pause_filter_thresh;
3550
3551         nested_svm_unmap(page);
3552
3553         /* Enter Guest-Mode */
3554         enter_guest_mode(&svm->vcpu);
3555
3556         /*
3557          * Merge guest and host intercepts - must be called  with vcpu in
3558          * guest-mode to take affect here
3559          */
3560         recalc_intercepts(svm);
3561
3562         svm->nested.vmcb = vmcb_gpa;
3563
3564         enable_gif(svm);
3565
3566         mark_all_dirty(svm->vmcb);
3567 }
3568
3569 static bool nested_svm_vmrun(struct vcpu_svm *svm)
3570 {
3571         struct vmcb *nested_vmcb;
3572         struct vmcb *hsave = svm->nested.hsave;
3573         struct vmcb *vmcb = svm->vmcb;
3574         struct page *page;
3575         u64 vmcb_gpa;
3576
3577         vmcb_gpa = svm->vmcb->save.rax;
3578
3579         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
3580         if (!nested_vmcb)
3581                 return false;
3582
3583         if (!nested_vmcb_checks(nested_vmcb)) {
3584                 nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
3585                 nested_vmcb->control.exit_code_hi = 0;
3586                 nested_vmcb->control.exit_info_1  = 0;
3587                 nested_vmcb->control.exit_info_2  = 0;
3588
3589                 nested_svm_unmap(page);
3590
3591                 return false;
3592         }
3593
3594         trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
3595                                nested_vmcb->save.rip,
3596                                nested_vmcb->control.int_ctl,
3597                                nested_vmcb->control.event_inj,
3598                                nested_vmcb->control.nested_ctl);
3599
3600         trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
3601                                     nested_vmcb->control.intercept_cr >> 16,
3602                                     nested_vmcb->control.intercept_exceptions,
3603                                     nested_vmcb->control.intercept);
3604
3605         /* Clear internal status */
3606         kvm_clear_exception_queue(&svm->vcpu);
3607         kvm_clear_interrupt_queue(&svm->vcpu);
3608
3609         /*
3610          * Save the old vmcb, so we don't need to pick what we save, but can
3611          * restore everything when a VMEXIT occurs
3612          */
3613         hsave->save.es     = vmcb->save.es;
3614         hsave->save.cs     = vmcb->save.cs;
3615         hsave->save.ss     = vmcb->save.ss;
3616         hsave->save.ds     = vmcb->save.ds;
3617         hsave->save.gdtr   = vmcb->save.gdtr;
3618         hsave->save.idtr   = vmcb->save.idtr;
3619         hsave->save.efer   = svm->vcpu.arch.efer;
3620         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
3621         hsave->save.cr4    = svm->vcpu.arch.cr4;
3622         hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
3623         hsave->save.rip    = kvm_rip_read(&svm->vcpu);
3624         hsave->save.rsp    = vmcb->save.rsp;
3625         hsave->save.rax    = vmcb->save.rax;
3626         if (npt_enabled)
3627                 hsave->save.cr3    = vmcb->save.cr3;
3628         else
3629                 hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
3630
3631         copy_vmcb_control_area(hsave, vmcb);
3632
3633         enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page);
3634
3635         return true;
3636 }
3637
3638 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
3639 {
3640         to_vmcb->save.fs = from_vmcb->save.fs;
3641         to_vmcb->save.gs = from_vmcb->save.gs;
3642         to_vmcb->save.tr = from_vmcb->save.tr;
3643         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
3644         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
3645         to_vmcb->save.star = from_vmcb->save.star;
3646         to_vmcb->save.lstar = from_vmcb->save.lstar;
3647         to_vmcb->save.cstar = from_vmcb->save.cstar;
3648         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
3649         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
3650         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
3651         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
3652 }
3653
3654 static int vmload_interception(struct vcpu_svm *svm)
3655 {
3656         struct vmcb *nested_vmcb;
3657         struct page *page;
3658         int ret;
3659
3660         if (nested_svm_check_permissions(svm))
3661                 return 1;
3662
3663         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
3664         if (!nested_vmcb)
3665                 return 1;
3666
3667         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3668         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3669
3670         nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
3671         nested_svm_unmap(page);
3672
3673         return ret;
3674 }
3675
3676 static int vmsave_interception(struct vcpu_svm *svm)
3677 {
3678         struct vmcb *nested_vmcb;
3679         struct page *page;
3680         int ret;
3681
3682         if (nested_svm_check_permissions(svm))
3683                 return 1;
3684
3685         nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
3686         if (!nested_vmcb)
3687                 return 1;
3688
3689         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3690         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3691
3692         nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
3693         nested_svm_unmap(page);
3694
3695         return ret;
3696 }
3697
3698 static int vmrun_interception(struct vcpu_svm *svm)
3699 {
3700         if (nested_svm_check_permissions(svm))
3701                 return 1;
3702
3703         /* Save rip after vmrun instruction */
3704         kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
3705
3706         if (!nested_svm_vmrun(svm))
3707                 return 1;
3708
3709         if (!nested_svm_vmrun_msrpm(svm))
3710                 goto failed;
3711
3712         return 1;
3713
3714 failed:
3715
3716         svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
3717         svm->vmcb->control.exit_code_hi = 0;
3718         svm->vmcb->control.exit_info_1  = 0;
3719         svm->vmcb->control.exit_info_2  = 0;
3720
3721         nested_svm_vmexit(svm);
3722
3723         return 1;
3724 }
3725
3726 static int stgi_interception(struct vcpu_svm *svm)
3727 {
3728         int ret;
3729
3730         if (nested_svm_check_permissions(svm))
3731                 return 1;
3732
3733         /*
3734          * If VGIF is enabled, the STGI intercept is only added to
3735          * detect the opening of the SMI/NMI window; remove it now.
3736          */
3737         if (vgif_enabled(svm))
3738                 clr_intercept(svm, INTERCEPT_STGI);
3739
3740         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3741         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3742         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3743
3744         enable_gif(svm);
3745
3746         return ret;
3747 }
3748
3749 static int clgi_interception(struct vcpu_svm *svm)
3750 {
3751         int ret;
3752
3753         if (nested_svm_check_permissions(svm))
3754                 return 1;
3755
3756         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3757         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3758
3759         disable_gif(svm);
3760
3761         /* After a CLGI no interrupts should come */
3762         if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3763                 svm_clear_vintr(svm);
3764                 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3765                 mark_dirty(svm->vmcb, VMCB_INTR);
3766         }
3767
3768         return ret;
3769 }
3770
3771 static int invlpga_interception(struct vcpu_svm *svm)
3772 {
3773         struct kvm_vcpu *vcpu = &svm->vcpu;
3774
3775         trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
3776                           kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
3777
3778         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
3779         kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
3780
3781         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3782         return kvm_skip_emulated_instruction(&svm->vcpu);
3783 }
3784
3785 static int skinit_interception(struct vcpu_svm *svm)
3786 {
3787         trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
3788
3789         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3790         return 1;
3791 }
3792
3793 static int wbinvd_interception(struct vcpu_svm *svm)
3794 {
3795         return kvm_emulate_wbinvd(&svm->vcpu);
3796 }
3797
3798 static int xsetbv_interception(struct vcpu_svm *svm)
3799 {
3800         u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3801         u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3802
3803         if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3804                 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3805                 return kvm_skip_emulated_instruction(&svm->vcpu);
3806         }
3807
3808         return 1;
3809 }
3810
3811 static int task_switch_interception(struct vcpu_svm *svm)
3812 {
3813         u16 tss_selector;
3814         int reason;
3815         int int_type = svm->vmcb->control.exit_int_info &
3816                 SVM_EXITINTINFO_TYPE_MASK;
3817         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
3818         uint32_t type =
3819                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3820         uint32_t idt_v =
3821                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
3822         bool has_error_code = false;
3823         u32 error_code = 0;
3824
3825         tss_selector = (u16)svm->vmcb->control.exit_info_1;
3826
3827         if (svm->vmcb->control.exit_info_2 &
3828             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
3829                 reason = TASK_SWITCH_IRET;
3830         else if (svm->vmcb->control.exit_info_2 &
3831                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3832                 reason = TASK_SWITCH_JMP;
3833         else if (idt_v)
3834                 reason = TASK_SWITCH_GATE;
3835         else
3836                 reason = TASK_SWITCH_CALL;
3837
3838         if (reason == TASK_SWITCH_GATE) {
3839                 switch (type) {
3840                 case SVM_EXITINTINFO_TYPE_NMI:
3841                         svm->vcpu.arch.nmi_injected = false;
3842                         break;
3843                 case SVM_EXITINTINFO_TYPE_EXEPT:
3844                         if (svm->vmcb->control.exit_info_2 &
3845                             (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3846                                 has_error_code = true;
3847                                 error_code =
3848                                         (u32)svm->vmcb->control.exit_info_2;
3849                         }
3850                         kvm_clear_exception_queue(&svm->vcpu);
3851                         break;
3852                 case SVM_EXITINTINFO_TYPE_INTR:
3853                         kvm_clear_interrupt_queue(&svm->vcpu);
3854                         break;
3855                 default:
3856                         break;
3857                 }
3858         }
3859
3860         if (reason != TASK_SWITCH_GATE ||
3861             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3862             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
3863              (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
3864                 skip_emulated_instruction(&svm->vcpu);
3865
3866         if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3867                 int_vec = -1;
3868
3869         if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
3870                                 has_error_code, error_code) == EMULATE_FAIL) {
3871                 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3872                 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3873                 svm->vcpu.run->internal.ndata = 0;
3874                 return 0;
3875         }
3876         return 1;
3877 }
3878
3879 static int cpuid_interception(struct vcpu_svm *svm)
3880 {
3881         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3882         return kvm_emulate_cpuid(&svm->vcpu);
3883 }
3884
3885 static int iret_interception(struct vcpu_svm *svm)
3886 {
3887         ++svm->vcpu.stat.nmi_window_exits;
3888         clr_intercept(svm, INTERCEPT_IRET);
3889         svm->vcpu.arch.hflags |= HF_IRET_MASK;
3890         svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
3891         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3892         return 1;
3893 }
3894
3895 static int invlpg_interception(struct vcpu_svm *svm)
3896 {
3897         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3898                 return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3899
3900         kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
3901         return kvm_skip_emulated_instruction(&svm->vcpu);
3902 }
3903
3904 static int emulate_on_interception(struct vcpu_svm *svm)
3905 {
3906         return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3907 }
3908
3909 static int rsm_interception(struct vcpu_svm *svm)
3910 {
3911         return kvm_emulate_instruction_from_buffer(&svm->vcpu,
3912                                         rsm_ins_bytes, 2) == EMULATE_DONE;
3913 }
3914
3915 static int rdpmc_interception(struct vcpu_svm *svm)
3916 {
3917         int err;
3918
3919         if (!static_cpu_has(X86_FEATURE_NRIPS))
3920                 return emulate_on_interception(svm);
3921
3922         err = kvm_rdpmc(&svm->vcpu);
3923         return kvm_complete_insn_gp(&svm->vcpu, err);
3924 }
3925
3926 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3927                                             unsigned long val)
3928 {
3929         unsigned long cr0 = svm->vcpu.arch.cr0;
3930         bool ret = false;
3931         u64 intercept;
3932
3933         intercept = svm->nested.intercept;
3934
3935         if (!is_guest_mode(&svm->vcpu) ||
3936             (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3937                 return false;
3938
3939         cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3940         val &= ~SVM_CR0_SELECTIVE_MASK;
3941
3942         if (cr0 ^ val) {
3943                 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3944                 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3945         }
3946
3947         return ret;
3948 }
3949
3950 #define CR_VALID (1ULL << 63)
3951
3952 static int cr_interception(struct vcpu_svm *svm)
3953 {
3954         int reg, cr;
3955         unsigned long val;
3956         int err;
3957
3958         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3959                 return emulate_on_interception(svm);
3960
3961         if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3962                 return emulate_on_interception(svm);
3963
3964         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3965         if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3966                 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3967         else
3968                 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
3969
3970         err = 0;
3971         if (cr >= 16) { /* mov to cr */
3972                 cr -= 16;
3973                 val = kvm_register_read(&svm->vcpu, reg);
3974                 switch (cr) {
3975                 case 0:
3976                         if (!check_selective_cr0_intercepted(svm, val))
3977                                 err = kvm_set_cr0(&svm->vcpu, val);
3978                         else
3979                                 return 1;
3980
3981                         break;
3982                 case 3:
3983                         err = kvm_set_cr3(&svm->vcpu, val);
3984                         break;
3985                 case 4:
3986                         err = kvm_set_cr4(&svm->vcpu, val);
3987                         break;
3988                 case 8:
3989                         err = kvm_set_cr8(&svm->vcpu, val);
3990                         break;
3991                 default:
3992                         WARN(1, "unhandled write to CR%d", cr);
3993                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3994                         return 1;
3995                 }
3996         } else { /* mov from cr */
3997                 switch (cr) {
3998                 case 0:
3999                         val = kvm_read_cr0(&svm->vcpu);
4000                         break;
4001                 case 2:
4002                         val = svm->vcpu.arch.cr2;
4003                         break;
4004                 case 3:
4005                         val = kvm_read_cr3(&svm->vcpu);
4006                         break;
4007                 case 4:
4008                         val = kvm_read_cr4(&svm->vcpu);
4009                         break;
4010                 case 8:
4011                         val = kvm_get_cr8(&svm->vcpu);
4012                         break;
4013                 default:
4014                         WARN(1, "unhandled read from CR%d", cr);
4015                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
4016                         return 1;
4017                 }
4018                 kvm_register_write(&svm->vcpu, reg, val);
4019         }
4020         return kvm_complete_insn_gp(&svm->vcpu, err);
4021 }
4022
4023 static int dr_interception(struct vcpu_svm *svm)
4024 {
4025         int reg, dr;
4026         unsigned long val;
4027
4028         if (svm->vcpu.guest_debug == 0) {
4029                 /*
4030                  * No more DR vmexits; force a reload of the debug registers
4031                  * and reenter on this instruction.  The next vmexit will
4032                  * retrieve the full state of the debug registers.
4033                  */
4034                 clr_dr_intercepts(svm);
4035                 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
4036                 return 1;
4037         }
4038
4039         if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
4040                 return emulate_on_interception(svm);
4041
4042         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
4043         dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
4044
4045         if (dr >= 16) { /* mov to DRn */
4046                 if (!kvm_require_dr(&svm->vcpu, dr - 16))
4047                         return 1;
4048                 val = kvm_register_read(&svm->vcpu, reg);
4049                 kvm_set_dr(&svm->vcpu, dr - 16, val);
4050         } else {
4051                 if (!kvm_require_dr(&svm->vcpu, dr))
4052                         return 1;
4053                 kvm_get_dr(&svm->vcpu, dr, &val);
4054                 kvm_register_write(&svm->vcpu, reg, val);
4055         }
4056
4057         return kvm_skip_emulated_instruction(&svm->vcpu);
4058 }
4059
4060 static int cr8_write_interception(struct vcpu_svm *svm)
4061 {
4062         struct kvm_run *kvm_run = svm->vcpu.run;
4063         int r;
4064
4065         u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
4066         /* instruction emulation calls kvm_set_cr8() */
4067         r = cr_interception(svm);
4068         if (lapic_in_kernel(&svm->vcpu))
4069                 return r;
4070         if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
4071                 return r;
4072         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
4073         return 0;
4074 }
4075
4076 static int svm_get_msr_feature(struct kvm_msr_entry *msr)
4077 {
4078         msr->data = 0;
4079
4080         switch (msr->index) {
4081         case MSR_F10H_DECFG:
4082                 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
4083                         msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
4084                 break;
4085         default:
4086                 return 1;
4087         }
4088
4089         return 0;
4090 }
4091
4092 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4093 {
4094         struct vcpu_svm *svm = to_svm(vcpu);
4095
4096         switch (msr_info->index) {
4097         case MSR_STAR:
4098                 msr_info->data = svm->vmcb->save.star;
4099                 break;
4100 #ifdef CONFIG_X86_64
4101         case MSR_LSTAR:
4102                 msr_info->data = svm->vmcb->save.lstar;
4103                 break;
4104         case MSR_CSTAR:
4105                 msr_info->data = svm->vmcb->save.cstar;
4106                 break;
4107         case MSR_KERNEL_GS_BASE:
4108                 msr_info->data = svm->vmcb->save.kernel_gs_base;
4109                 break;
4110         case MSR_SYSCALL_MASK:
4111                 msr_info->data = svm->vmcb->save.sfmask;
4112                 break;
4113 #endif
4114         case MSR_IA32_SYSENTER_CS:
4115                 msr_info->data = svm->vmcb->save.sysenter_cs;
4116                 break;
4117         case MSR_IA32_SYSENTER_EIP:
4118                 msr_info->data = svm->sysenter_eip;
4119                 break;
4120         case MSR_IA32_SYSENTER_ESP:
4121                 msr_info->data = svm->sysenter_esp;
4122                 break;
4123         case MSR_TSC_AUX:
4124                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4125                         return 1;
4126                 msr_info->data = svm->tsc_aux;
4127                 break;
4128         /*
4129          * Nobody will change the following 5 values in the VMCB so we can
4130          * safely return them on rdmsr. They will always be 0 until LBRV is
4131          * implemented.
4132          */
4133         case MSR_IA32_DEBUGCTLMSR:
4134                 msr_info->data = svm->vmcb->save.dbgctl;
4135                 break;
4136         case MSR_IA32_LASTBRANCHFROMIP:
4137                 msr_info->data = svm->vmcb->save.br_from;
4138                 break;
4139         case MSR_IA32_LASTBRANCHTOIP:
4140                 msr_info->data = svm->vmcb->save.br_to;
4141                 break;
4142         case MSR_IA32_LASTINTFROMIP:
4143                 msr_info->data = svm->vmcb->save.last_excp_from;
4144                 break;
4145         case MSR_IA32_LASTINTTOIP:
4146                 msr_info->data = svm->vmcb->save.last_excp_to;
4147                 break;
4148         case MSR_VM_HSAVE_PA:
4149                 msr_info->data = svm->nested.hsave_msr;
4150                 break;
4151         case MSR_VM_CR:
4152                 msr_info->data = svm->nested.vm_cr_msr;
4153                 break;
4154         case MSR_IA32_SPEC_CTRL:
4155                 if (!msr_info->host_initiated &&
4156                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
4157                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
4158                         return 1;
4159
4160                 msr_info->data = svm->spec_ctrl;
4161                 break;
4162         case MSR_AMD64_VIRT_SPEC_CTRL:
4163                 if (!msr_info->host_initiated &&
4164                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
4165                         return 1;
4166
4167                 msr_info->data = svm->virt_spec_ctrl;
4168                 break;
4169         case MSR_F15H_IC_CFG: {
4170
4171                 int family, model;
4172
4173                 family = guest_cpuid_family(vcpu);
4174                 model  = guest_cpuid_model(vcpu);
4175
4176                 if (family < 0 || model < 0)
4177                         return kvm_get_msr_common(vcpu, msr_info);
4178
4179                 msr_info->data = 0;
4180
4181                 if (family == 0x15 &&
4182                     (model >= 0x2 && model < 0x20))
4183                         msr_info->data = 0x1E;
4184                 }
4185                 break;
4186         case MSR_F10H_DECFG:
4187                 msr_info->data = svm->msr_decfg;
4188                 break;
4189         default:
4190                 return kvm_get_msr_common(vcpu, msr_info);
4191         }
4192         return 0;
4193 }
4194
4195 static int rdmsr_interception(struct vcpu_svm *svm)
4196 {
4197         u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
4198         struct msr_data msr_info;
4199
4200         msr_info.index = ecx;
4201         msr_info.host_initiated = false;
4202         if (svm_get_msr(&svm->vcpu, &msr_info)) {
4203                 trace_kvm_msr_read_ex(ecx);
4204                 kvm_inject_gp(&svm->vcpu, 0);
4205                 return 1;
4206         } else {
4207                 trace_kvm_msr_read(ecx, msr_info.data);
4208
4209                 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
4210                                    msr_info.data & 0xffffffff);
4211                 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
4212                                    msr_info.data >> 32);
4213                 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
4214                 return kvm_skip_emulated_instruction(&svm->vcpu);
4215         }
4216 }
4217
4218 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
4219 {
4220         struct vcpu_svm *svm = to_svm(vcpu);
4221         int svm_dis, chg_mask;
4222
4223         if (data & ~SVM_VM_CR_VALID_MASK)
4224                 return 1;
4225
4226         chg_mask = SVM_VM_CR_VALID_MASK;
4227
4228         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
4229                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
4230
4231         svm->nested.vm_cr_msr &= ~chg_mask;
4232         svm->nested.vm_cr_msr |= (data & chg_mask);
4233
4234         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
4235
4236         /* check for svm_disable while efer.svme is set */
4237         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
4238                 return 1;
4239
4240         return 0;
4241 }
4242
4243 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4244 {
4245         struct vcpu_svm *svm = to_svm(vcpu);
4246
4247         u32 ecx = msr->index;
4248         u64 data = msr->data;
4249         switch (ecx) {
4250         case MSR_IA32_CR_PAT:
4251                 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
4252                         return 1;
4253                 vcpu->arch.pat = data;
4254                 svm->vmcb->save.g_pat = data;
4255                 mark_dirty(svm->vmcb, VMCB_NPT);
4256                 break;
4257         case MSR_IA32_SPEC_CTRL:
4258                 if (!msr->host_initiated &&
4259                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
4260                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
4261                         return 1;
4262
4263                 /* The STIBP bit doesn't fault even if it's not advertised */
4264                 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
4265                         return 1;
4266
4267                 svm->spec_ctrl = data;
4268
4269                 if (!data)
4270                         break;
4271
4272                 /*
4273                  * For non-nested:
4274                  * When it's written (to non-zero) for the first time, pass
4275                  * it through.
4276                  *
4277                  * For nested:
4278                  * The handling of the MSR bitmap for L2 guests is done in
4279                  * nested_svm_vmrun_msrpm.
4280                  * We update the L1 MSR bit as well since it will end up
4281                  * touching the MSR anyway now.
4282                  */
4283                 set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
4284                 break;
4285         case MSR_IA32_PRED_CMD:
4286                 if (!msr->host_initiated &&
4287                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
4288                         return 1;
4289
4290                 if (data & ~PRED_CMD_IBPB)
4291                         return 1;
4292
4293                 if (!data)
4294                         break;
4295
4296                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
4297                 if (is_guest_mode(vcpu))
4298                         break;
4299                 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
4300                 break;
4301         case MSR_AMD64_VIRT_SPEC_CTRL:
4302                 if (!msr->host_initiated &&
4303                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
4304                         return 1;
4305
4306                 if (data & ~SPEC_CTRL_SSBD)
4307                         return 1;
4308
4309                 svm->virt_spec_ctrl = data;
4310                 break;
4311         case MSR_STAR:
4312                 svm->vmcb->save.star = data;
4313                 break;
4314 #ifdef CONFIG_X86_64
4315         case MSR_LSTAR:
4316                 svm->vmcb->save.lstar = data;
4317                 break;
4318         case MSR_CSTAR:
4319                 svm->vmcb->save.cstar = data;
4320                 break;
4321         case MSR_KERNEL_GS_BASE:
4322                 svm->vmcb->save.kernel_gs_base = data;
4323                 break;
4324         case MSR_SYSCALL_MASK:
4325                 svm->vmcb->save.sfmask = data;
4326                 break;
4327 #endif
4328         case MSR_IA32_SYSENTER_CS:
4329                 svm->vmcb->save.sysenter_cs = data;
4330                 break;
4331         case MSR_IA32_SYSENTER_EIP:
4332                 svm->sysenter_eip = data;
4333                 svm->vmcb->save.sysenter_eip = data;
4334                 break;
4335         case MSR_IA32_SYSENTER_ESP:
4336                 svm->sysenter_esp = data;
4337                 svm->vmcb->save.sysenter_esp = data;
4338                 break;
4339         case MSR_TSC_AUX:
4340                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4341                         return 1;
4342
4343                 /*
4344                  * This is rare, so we update the MSR here instead of using
4345                  * direct_access_msrs.  Doing that would require a rdmsr in
4346                  * svm_vcpu_put.
4347                  */
4348                 svm->tsc_aux = data;
4349                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
4350                 break;
4351         case MSR_IA32_DEBUGCTLMSR:
4352                 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
4353                         vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
4354                                     __func__, data);
4355                         break;
4356                 }
4357                 if (data & DEBUGCTL_RESERVED_BITS)
4358                         return 1;
4359
4360                 svm->vmcb->save.dbgctl = data;
4361                 mark_dirty(svm->vmcb, VMCB_LBR);
4362                 if (data & (1ULL<<0))
4363                         svm_enable_lbrv(svm);
4364                 else
4365                         svm_disable_lbrv(svm);
4366                 break;
4367         case MSR_VM_HSAVE_PA:
4368                 svm->nested.hsave_msr = data;
4369                 break;
4370         case MSR_VM_CR:
4371                 return svm_set_vm_cr(vcpu, data);
4372         case MSR_VM_IGNNE:
4373                 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
4374                 break;
4375         case MSR_F10H_DECFG: {
4376                 struct kvm_msr_entry msr_entry;
4377
4378                 msr_entry.index = msr->index;
4379                 if (svm_get_msr_feature(&msr_entry))
4380                         return 1;
4381
4382                 /* Check the supported bits */
4383                 if (data & ~msr_entry.data)
4384                         return 1;
4385
4386                 /* Don't allow the guest to change a bit, #GP */
4387                 if (!msr->host_initiated && (data ^ msr_entry.data))
4388                         return 1;
4389
4390                 svm->msr_decfg = data;
4391                 break;
4392         }
4393         case MSR_IA32_APICBASE:
4394                 if (kvm_vcpu_apicv_active(vcpu))
4395                         avic_update_vapic_bar(to_svm(vcpu), data);
4396                 /* Follow through */
4397         default:
4398                 return kvm_set_msr_common(vcpu, msr);
4399         }
4400         return 0;
4401 }
4402
4403 static int wrmsr_interception(struct vcpu_svm *svm)
4404 {
4405         struct msr_data msr;
4406         u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
4407         u64 data = kvm_read_edx_eax(&svm->vcpu);
4408
4409         msr.data = data;
4410         msr.index = ecx;
4411         msr.host_initiated = false;
4412
4413         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
4414         if (kvm_set_msr(&svm->vcpu, &msr)) {
4415                 trace_kvm_msr_write_ex(ecx, data);
4416                 kvm_inject_gp(&svm->vcpu, 0);
4417                 return 1;
4418         } else {
4419                 trace_kvm_msr_write(ecx, data);
4420                 return kvm_skip_emulated_instruction(&svm->vcpu);
4421         }
4422 }
4423
4424 static int msr_interception(struct vcpu_svm *svm)
4425 {
4426         if (svm->vmcb->control.exit_info_1)
4427                 return wrmsr_interception(svm);
4428         else
4429                 return rdmsr_interception(svm);
4430 }
4431
4432 static int interrupt_window_interception(struct vcpu_svm *svm)
4433 {
4434         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
4435         svm_clear_vintr(svm);
4436         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
4437         mark_dirty(svm->vmcb, VMCB_INTR);
4438         ++svm->vcpu.stat.irq_window_exits;
4439         return 1;
4440 }
4441
4442 static int pause_interception(struct vcpu_svm *svm)
4443 {
4444         struct kvm_vcpu *vcpu = &svm->vcpu;
4445         bool in_kernel = (svm_get_cpl(vcpu) == 0);
4446
4447         if (pause_filter_thresh)
4448                 grow_ple_window(vcpu);
4449
4450         kvm_vcpu_on_spin(vcpu, in_kernel);
4451         return 1;
4452 }
4453
4454 static int nop_interception(struct vcpu_svm *svm)
4455 {
4456         return kvm_skip_emulated_instruction(&(svm->vcpu));
4457 }
4458
4459 static int monitor_interception(struct vcpu_svm *svm)
4460 {
4461         printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
4462         return nop_interception(svm);
4463 }
4464
4465 static int mwait_interception(struct vcpu_svm *svm)
4466 {
4467         printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
4468         return nop_interception(svm);
4469 }
4470
4471 enum avic_ipi_failure_cause {
4472         AVIC_IPI_FAILURE_INVALID_INT_TYPE,
4473         AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
4474         AVIC_IPI_FAILURE_INVALID_TARGET,
4475         AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
4476 };
4477
4478 static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4479 {
4480         u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
4481         u32 icrl = svm->vmcb->control.exit_info_1;
4482         u32 id = svm->vmcb->control.exit_info_2 >> 32;
4483         u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
4484         struct kvm_lapic *apic = svm->vcpu.arch.apic;
4485
4486         trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
4487
4488         switch (id) {
4489         case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
4490                 /*
4491                  * AVIC hardware handles the generation of
4492                  * IPIs when the specified Message Type is Fixed
4493                  * (also known as fixed delivery mode) and
4494                  * the Trigger Mode is edge-triggered. The hardware
4495                  * also supports self and broadcast delivery modes
4496                  * specified via the Destination Shorthand(DSH)
4497                  * field of the ICRL. Logical and physical APIC ID
4498                  * formats are supported. All other IPI types cause
4499                  * a #VMEXIT, which needs to emulated.
4500                  */
4501                 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4502                 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4503                 break;
4504         case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4505                 int i;
4506                 struct kvm_vcpu *vcpu;
4507                 struct kvm *kvm = svm->vcpu.kvm;
4508                 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4509
4510                 /*
4511                  * At this point, we expect that the AVIC HW has already
4512                  * set the appropriate IRR bits on the valid target
4513                  * vcpus. So, we just need to kick the appropriate vcpu.
4514                  */
4515                 kvm_for_each_vcpu(i, vcpu, kvm) {
4516                         bool m = kvm_apic_match_dest(vcpu, apic,
4517                                                      icrl & KVM_APIC_SHORT_MASK,
4518                                                      GET_APIC_DEST_FIELD(icrh),
4519                                                      icrl & KVM_APIC_DEST_MASK);
4520
4521                         if (m && !avic_vcpu_is_running(vcpu))
4522                                 kvm_vcpu_wake_up(vcpu);
4523                 }
4524                 break;
4525         }
4526         case AVIC_IPI_FAILURE_INVALID_TARGET:
4527                 break;
4528         case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4529                 WARN_ONCE(1, "Invalid backing page\n");
4530                 break;
4531         default:
4532                 pr_err("Unknown IPI interception\n");
4533         }
4534
4535         return 1;
4536 }
4537
4538 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
4539 {
4540         struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
4541         int index;
4542         u32 *logical_apic_id_table;
4543         int dlid = GET_APIC_LOGICAL_ID(ldr);
4544
4545         if (!dlid)
4546                 return NULL;
4547
4548         if (flat) { /* flat */
4549                 index = ffs(dlid) - 1;
4550                 if (index > 7)
4551                         return NULL;
4552         } else { /* cluster */
4553                 int cluster = (dlid & 0xf0) >> 4;
4554                 int apic = ffs(dlid & 0x0f) - 1;
4555
4556                 if ((apic < 0) || (apic > 7) ||
4557                     (cluster >= 0xf))
4558                         return NULL;
4559                 index = (cluster << 2) + apic;
4560         }
4561
4562         logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
4563
4564         return &logical_apic_id_table[index];
4565 }
4566
4567 static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
4568                           bool valid)
4569 {
4570         bool flat;
4571         u32 *entry, new_entry;
4572
4573         flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
4574         entry = avic_get_logical_id_entry(vcpu, ldr, flat);
4575         if (!entry)
4576                 return -EINVAL;
4577
4578         new_entry = READ_ONCE(*entry);
4579         new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
4580         new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
4581         if (valid)
4582                 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4583         else
4584                 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4585         WRITE_ONCE(*entry, new_entry);
4586
4587         return 0;
4588 }
4589
4590 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
4591 {
4592         int ret;
4593         struct vcpu_svm *svm = to_svm(vcpu);
4594         u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
4595
4596         if (!ldr)
4597                 return 1;
4598
4599         ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true);
4600         if (ret && svm->ldr_reg) {
4601                 avic_ldr_write(vcpu, 0, svm->ldr_reg, false);
4602                 svm->ldr_reg = 0;
4603         } else {
4604                 svm->ldr_reg = ldr;
4605         }
4606         return ret;
4607 }
4608
4609 static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
4610 {
4611         u64 *old, *new;
4612         struct vcpu_svm *svm = to_svm(vcpu);
4613         u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
4614         u32 id = (apic_id_reg >> 24) & 0xff;
4615
4616         if (vcpu->vcpu_id == id)
4617                 return 0;
4618
4619         old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
4620         new = avic_get_physical_id_entry(vcpu, id);
4621         if (!new || !old)
4622                 return 1;
4623
4624         /* We need to move physical_id_entry to new offset */
4625         *new = *old;
4626         *old = 0ULL;
4627         to_svm(vcpu)->avic_physical_id_cache = new;
4628
4629         /*
4630          * Also update the guest physical APIC ID in the logical
4631          * APIC ID table entry if already setup the LDR.
4632          */
4633         if (svm->ldr_reg)
4634                 avic_handle_ldr_update(vcpu);
4635
4636         return 0;
4637 }
4638
4639 static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
4640 {
4641         struct vcpu_svm *svm = to_svm(vcpu);
4642         struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
4643         u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
4644         u32 mod = (dfr >> 28) & 0xf;
4645
4646         /*
4647          * We assume that all local APICs are using the same type.
4648          * If this changes, we need to flush the AVIC logical
4649          * APID id table.
4650          */
4651         if (kvm_svm->ldr_mode == mod)
4652                 return 0;
4653
4654         clear_page(page_address(kvm_svm->avic_logical_id_table_page));
4655         kvm_svm->ldr_mode = mod;
4656
4657         if (svm->ldr_reg)
4658                 avic_handle_ldr_update(vcpu);
4659         return 0;
4660 }
4661
4662 static int avic_unaccel_trap_write(struct vcpu_svm *svm)
4663 {
4664         struct kvm_lapic *apic = svm->vcpu.arch.apic;
4665         u32 offset = svm->vmcb->control.exit_info_1 &
4666                                 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4667
4668         switch (offset) {
4669         case APIC_ID:
4670                 if (avic_handle_apic_id_update(&svm->vcpu))
4671                         return 0;
4672                 break;
4673         case APIC_LDR:
4674                 if (avic_handle_ldr_update(&svm->vcpu))
4675                         return 0;
4676                 break;
4677         case APIC_DFR:
4678                 avic_handle_dfr_update(&svm->vcpu);
4679                 break;
4680         default:
4681                 break;
4682         }
4683
4684         kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
4685
4686         return 1;
4687 }
4688
4689 static bool is_avic_unaccelerated_access_trap(u32 offset)
4690 {
4691         bool ret = false;
4692
4693         switch (offset) {
4694         case APIC_ID:
4695         case APIC_EOI:
4696         case APIC_RRR:
4697         case APIC_LDR:
4698         case APIC_DFR:
4699         case APIC_SPIV:
4700         case APIC_ESR:
4701         case APIC_ICR:
4702         case APIC_LVTT:
4703         case APIC_LVTTHMR:
4704         case APIC_LVTPC:
4705         case APIC_LVT0:
4706         case APIC_LVT1:
4707         case APIC_LVTERR:
4708         case APIC_TMICT:
4709         case APIC_TDCR:
4710                 ret = true;
4711                 break;
4712         default:
4713                 break;
4714         }
4715         return ret;
4716 }
4717
4718 static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4719 {
4720         int ret = 0;
4721         u32 offset = svm->vmcb->control.exit_info_1 &
4722                      AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4723         u32 vector = svm->vmcb->control.exit_info_2 &
4724                      AVIC_UNACCEL_ACCESS_VECTOR_MASK;
4725         bool write = (svm->vmcb->control.exit_info_1 >> 32) &
4726                      AVIC_UNACCEL_ACCESS_WRITE_MASK;
4727         bool trap = is_avic_unaccelerated_access_trap(offset);
4728
4729         trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
4730                                             trap, write, vector);
4731         if (trap) {
4732                 /* Handling Trap */
4733                 WARN_ONCE(!write, "svm: Handling trap read.\n");
4734                 ret = avic_unaccel_trap_write(svm);
4735         } else {
4736                 /* Handling Fault */
4737                 ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
4738         }
4739
4740         return ret;
4741 }
4742
4743 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
4744         [SVM_EXIT_READ_CR0]                     = cr_interception,
4745         [SVM_EXIT_READ_CR3]                     = cr_interception,
4746         [SVM_EXIT_READ_CR4]                     = cr_interception,
4747         [SVM_EXIT_READ_CR8]                     = cr_interception,
4748         [SVM_EXIT_CR0_SEL_WRITE]                = cr_interception,
4749         [SVM_EXIT_WRITE_CR0]                    = cr_interception,
4750         [SVM_EXIT_WRITE_CR3]                    = cr_interception,
4751         [SVM_EXIT_WRITE_CR4]                    = cr_interception,
4752         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
4753         [SVM_EXIT_READ_DR0]                     = dr_interception,
4754         [SVM_EXIT_READ_DR1]                     = dr_interception,
4755         [SVM_EXIT_READ_DR2]                     = dr_interception,
4756         [SVM_EXIT_READ_DR3]                     = dr_interception,
4757         [SVM_EXIT_READ_DR4]                     = dr_interception,
4758         [SVM_EXIT_READ_DR5]                     = dr_interception,
4759         [SVM_EXIT_READ_DR6]                     = dr_interception,
4760         [SVM_EXIT_READ_DR7]                     = dr_interception,
4761         [SVM_EXIT_WRITE_DR0]                    = dr_interception,
4762         [SVM_EXIT_WRITE_DR1]                    = dr_interception,
4763         [SVM_EXIT_WRITE_DR2]                    = dr_interception,
4764         [SVM_EXIT_WRITE_DR3]                    = dr_interception,
4765         [SVM_EXIT_WRITE_DR4]                    = dr_interception,
4766         [SVM_EXIT_WRITE_DR5]                    = dr_interception,
4767         [SVM_EXIT_WRITE_DR6]                    = dr_interception,
4768         [SVM_EXIT_WRITE_DR7]                    = dr_interception,
4769         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
4770         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
4771         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
4772         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
4773         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
4774         [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
4775         [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
4776         [SVM_EXIT_INTR]                         = intr_interception,
4777         [SVM_EXIT_NMI]                          = nmi_interception,
4778         [SVM_EXIT_SMI]                          = nop_on_interception,
4779         [SVM_EXIT_INIT]                         = nop_on_interception,
4780         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
4781         [SVM_EXIT_RDPMC]                        = rdpmc_interception,
4782         [SVM_EXIT_CPUID]                        = cpuid_interception,
4783         [SVM_EXIT_IRET]                         = iret_interception,
4784         [SVM_EXIT_INVD]                         = emulate_on_interception,
4785         [SVM_EXIT_PAUSE]                        = pause_interception,
4786         [SVM_EXIT_HLT]                          = halt_interception,
4787         [SVM_EXIT_INVLPG]                       = invlpg_interception,
4788         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
4789         [SVM_EXIT_IOIO]                         = io_interception,
4790         [SVM_EXIT_MSR]                          = msr_interception,
4791         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
4792         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
4793         [SVM_EXIT_VMRUN]                        = vmrun_interception,
4794         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
4795         [SVM_EXIT_VMLOAD]                       = vmload_interception,
4796         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
4797         [SVM_EXIT_STGI]                         = stgi_interception,
4798         [SVM_EXIT_CLGI]                         = clgi_interception,
4799         [SVM_EXIT_SKINIT]                       = skinit_interception,
4800         [SVM_EXIT_WBINVD]                       = wbinvd_interception,
4801         [SVM_EXIT_MONITOR]                      = monitor_interception,
4802         [SVM_EXIT_MWAIT]                        = mwait_interception,
4803         [SVM_EXIT_XSETBV]                       = xsetbv_interception,
4804         [SVM_EXIT_NPF]                          = npf_interception,
4805         [SVM_EXIT_RSM]                          = rsm_interception,
4806         [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
4807         [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
4808 };
4809
4810 static void dump_vmcb(struct kvm_vcpu *vcpu)
4811 {
4812         struct vcpu_svm *svm = to_svm(vcpu);
4813         struct vmcb_control_area *control = &svm->vmcb->control;
4814         struct vmcb_save_area *save = &svm->vmcb->save;
4815
4816         pr_err("VMCB Control Area:\n");
4817         pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4818         pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4819         pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4820         pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4821         pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4822         pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4823         pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4824         pr_err("%-20s%d\n", "pause filter threshold:",
4825                control->pause_filter_thresh);
4826         pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4827         pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4828         pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4829         pr_err("%-20s%d\n", "asid:", control->asid);
4830         pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4831         pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4832         pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4833         pr_err("%-20s%08x\n", "int_state:", control->int_state);
4834         pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4835         pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4836         pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4837         pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4838         pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4839         pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4840         pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
4841         pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
4842         pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4843         pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
4844         pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
4845         pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
4846         pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4847         pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4848         pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
4849         pr_err("VMCB State Save Area:\n");
4850         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4851                "es:",
4852                save->es.selector, save->es.attrib,
4853                save->es.limit, save->es.base);
4854         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4855                "cs:",
4856                save->cs.selector, save->cs.attrib,
4857                save->cs.limit, save->cs.base);
4858         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4859                "ss:",
4860                save->ss.selector, save->ss.attrib,
4861                save->ss.limit, save->ss.base);
4862         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4863                "ds:",
4864                save->ds.selector, save->ds.attrib,
4865                save->ds.limit, save->ds.base);
4866         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4867                "fs:",
4868                save->fs.selector, save->fs.attrib,
4869                save->fs.limit, save->fs.base);
4870         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4871                "gs:",
4872                save->gs.selector, save->gs.attrib,
4873                save->gs.limit, save->gs.base);
4874         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4875                "gdtr:",
4876                save->gdtr.selector, save->gdtr.attrib,
4877                save->gdtr.limit, save->gdtr.base);
4878         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4879                "ldtr:",
4880                save->ldtr.selector, save->ldtr.attrib,
4881                save->ldtr.limit, save->ldtr.base);
4882         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4883                "idtr:",
4884                save->idtr.selector, save->idtr.attrib,
4885                save->idtr.limit, save->idtr.base);
4886         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4887                "tr:",
4888                save->tr.selector, save->tr.attrib,
4889                save->tr.limit, save->tr.base);
4890         pr_err("cpl:            %d                efer:         %016llx\n",
4891                 save->cpl, save->efer);
4892         pr_err("%-15s %016llx %-13s %016llx\n",
4893                "cr0:", save->cr0, "cr2:", save->cr2);
4894         pr_err("%-15s %016llx %-13s %016llx\n",
4895                "cr3:", save->cr3, "cr4:", save->cr4);
4896         pr_err("%-15s %016llx %-13s %016llx\n",
4897                "dr6:", save->dr6, "dr7:", save->dr7);
4898         pr_err("%-15s %016llx %-13s %016llx\n",
4899                "rip:", save->rip, "rflags:", save->rflags);
4900         pr_err("%-15s %016llx %-13s %016llx\n",
4901                "rsp:", save->rsp, "rax:", save->rax);
4902         pr_err("%-15s %016llx %-13s %016llx\n",
4903                "star:", save->star, "lstar:", save->lstar);
4904         pr_err("%-15s %016llx %-13s %016llx\n",
4905                "cstar:", save->cstar, "sfmask:", save->sfmask);
4906         pr_err("%-15s %016llx %-13s %016llx\n",
4907                "kernel_gs_base:", save->kernel_gs_base,
4908                "sysenter_cs:", save->sysenter_cs);
4909         pr_err("%-15s %016llx %-13s %016llx\n",
4910                "sysenter_esp:", save->sysenter_esp,
4911                "sysenter_eip:", save->sysenter_eip);
4912         pr_err("%-15s %016llx %-13s %016llx\n",
4913                "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4914         pr_err("%-15s %016llx %-13s %016llx\n",
4915                "br_from:", save->br_from, "br_to:", save->br_to);
4916         pr_err("%-15s %016llx %-13s %016llx\n",
4917                "excp_from:", save->last_excp_from,
4918                "excp_to:", save->last_excp_to);
4919 }
4920
4921 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4922 {
4923         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4924
4925         *info1 = control->exit_info_1;
4926         *info2 = control->exit_info_2;
4927 }
4928
4929 static int handle_exit(struct kvm_vcpu *vcpu)
4930 {
4931         struct vcpu_svm *svm = to_svm(vcpu);
4932         struct kvm_run *kvm_run = vcpu->run;
4933         u32 exit_code = svm->vmcb->control.exit_code;
4934
4935         trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4936
4937         if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
4938                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4939         if (npt_enabled)
4940                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
4941
4942         if (unlikely(svm->nested.exit_required)) {
4943                 nested_svm_vmexit(svm);
4944                 svm->nested.exit_required = false;
4945
4946                 return 1;
4947         }
4948
4949         if (is_guest_mode(vcpu)) {
4950                 int vmexit;
4951
4952                 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4953                                         svm->vmcb->control.exit_info_1,
4954                                         svm->vmcb->control.exit_info_2,
4955                                         svm->vmcb->control.exit_int_info,
4956                                         svm->vmcb->control.exit_int_info_err,
4957                                         KVM_ISA_SVM);
4958
4959                 vmexit = nested_svm_exit_special(svm);
4960
4961                 if (vmexit == NESTED_EXIT_CONTINUE)
4962                         vmexit = nested_svm_exit_handled(svm);
4963
4964                 if (vmexit == NESTED_EXIT_DONE)
4965                         return 1;
4966         }
4967
4968         svm_complete_interrupts(svm);
4969
4970         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4971                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4972                 kvm_run->fail_entry.hardware_entry_failure_reason
4973                         = svm->vmcb->control.exit_code;
4974                 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
4975                 dump_vmcb(vcpu);
4976                 return 0;
4977         }
4978
4979         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
4980             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
4981             exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4982             exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
4983                 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
4984                        "exit_code 0x%x\n",
4985                        __func__, svm->vmcb->control.exit_int_info,
4986                        exit_code);
4987
4988         if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
4989             || !svm_exit_handlers[exit_code]) {
4990                 WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
4991                 kvm_queue_exception(vcpu, UD_VECTOR);
4992                 return 1;
4993         }
4994
4995         return svm_exit_handlers[exit_code](svm);
4996 }
4997
4998 static void reload_tss(struct kvm_vcpu *vcpu)
4999 {
5000         int cpu = raw_smp_processor_id();
5001
5002         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5003         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
5004         load_TR_desc();
5005 }
5006
5007 static void pre_sev_run(struct vcpu_svm *svm, int cpu)
5008 {
5009         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5010         int asid = sev_get_asid(svm->vcpu.kvm);
5011
5012         /* Assign the asid allocated with this SEV guest */
5013         svm->vmcb->control.asid = asid;
5014
5015         /*
5016          * Flush guest TLB:
5017          *
5018          * 1) when different VMCB for the same ASID is to be run on the same host CPU.
5019          * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
5020          */
5021         if (sd->sev_vmcbs[asid] == svm->vmcb &&
5022             svm->last_cpu == cpu)
5023                 return;
5024
5025         svm->last_cpu = cpu;
5026         sd->sev_vmcbs[asid] = svm->vmcb;
5027         svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5028         mark_dirty(svm->vmcb, VMCB_ASID);
5029 }
5030
5031 static void pre_svm_run(struct vcpu_svm *svm)
5032 {
5033         int cpu = raw_smp_processor_id();
5034
5035         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5036
5037         if (sev_guest(svm->vcpu.kvm))
5038                 return pre_sev_run(svm, cpu);
5039
5040         /* FIXME: handle wraparound of asid_generation */
5041         if (svm->asid_generation != sd->asid_generation)
5042                 new_asid(svm, sd);
5043 }
5044
5045 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
5046 {
5047         struct vcpu_svm *svm = to_svm(vcpu);
5048
5049         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
5050         vcpu->arch.hflags |= HF_NMI_MASK;
5051         set_intercept(svm, INTERCEPT_IRET);
5052         ++vcpu->stat.nmi_injections;
5053 }
5054
5055 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
5056 {
5057         struct vmcb_control_area *control;
5058
5059         /* The following fields are ignored when AVIC is enabled */
5060         control = &svm->vmcb->control;
5061         control->int_vector = irq;
5062         control->int_ctl &= ~V_INTR_PRIO_MASK;
5063         control->int_ctl |= V_IRQ_MASK |
5064                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
5065         mark_dirty(svm->vmcb, VMCB_INTR);
5066 }
5067
5068 static void svm_set_irq(struct kvm_vcpu *vcpu)
5069 {
5070         struct vcpu_svm *svm = to_svm(vcpu);
5071
5072         BUG_ON(!(gif_set(svm)));
5073
5074         trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
5075         ++vcpu->stat.irq_injections;
5076
5077         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
5078                 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
5079 }
5080
5081 static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
5082 {
5083         return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
5084 }
5085
5086 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
5087 {
5088         struct vcpu_svm *svm = to_svm(vcpu);
5089
5090         if (svm_nested_virtualize_tpr(vcpu) ||
5091             kvm_vcpu_apicv_active(vcpu))
5092                 return;
5093
5094         clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
5095
5096         if (irr == -1)
5097                 return;
5098
5099         if (tpr >= irr)
5100                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
5101 }
5102
5103 static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
5104 {
5105         return;
5106 }
5107
5108 static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
5109 {
5110         return avic && irqchip_split(vcpu->kvm);
5111 }
5112
5113 static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
5114 {
5115 }
5116
5117 static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
5118 {
5119 }
5120
5121 /* Note: Currently only used by Hyper-V. */
5122 static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
5123 {
5124         struct vcpu_svm *svm = to_svm(vcpu);
5125         struct vmcb *vmcb = svm->vmcb;
5126
5127         if (!kvm_vcpu_apicv_active(&svm->vcpu))
5128                 return;
5129
5130         vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
5131         mark_dirty(vmcb, VMCB_INTR);
5132 }
5133
5134 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
5135 {
5136         return;
5137 }
5138
5139 static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
5140 {
5141         kvm_lapic_set_irr(vec, vcpu->arch.apic);
5142         smp_mb__after_atomic();
5143
5144         if (avic_vcpu_is_running(vcpu))
5145                 wrmsrl(SVM_AVIC_DOORBELL,
5146                        kvm_cpu_get_apicid(vcpu->cpu));
5147         else
5148                 kvm_vcpu_wake_up(vcpu);
5149 }
5150
5151 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
5152 {
5153         unsigned long flags;
5154         struct amd_svm_iommu_ir *cur;
5155
5156         spin_lock_irqsave(&svm->ir_list_lock, flags);
5157         list_for_each_entry(cur, &svm->ir_list, node) {
5158                 if (cur->data != pi->ir_data)
5159                         continue;
5160                 list_del(&cur->node);
5161                 kfree(cur);
5162                 break;
5163         }
5164         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
5165 }
5166
5167 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
5168 {
5169         int ret = 0;
5170         unsigned long flags;
5171         struct amd_svm_iommu_ir *ir;
5172
5173         /**
5174          * In some cases, the existing irte is updaed and re-set,
5175          * so we need to check here if it's already been * added
5176          * to the ir_list.
5177          */
5178         if (pi->ir_data && (pi->prev_ga_tag != 0)) {
5179                 struct kvm *kvm = svm->vcpu.kvm;
5180                 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
5181                 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
5182                 struct vcpu_svm *prev_svm;
5183
5184                 if (!prev_vcpu) {
5185                         ret = -EINVAL;
5186                         goto out;
5187                 }
5188
5189                 prev_svm = to_svm(prev_vcpu);
5190                 svm_ir_list_del(prev_svm, pi);
5191         }
5192
5193         /**
5194          * Allocating new amd_iommu_pi_data, which will get
5195          * add to the per-vcpu ir_list.
5196          */
5197         ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL);
5198         if (!ir) {
5199                 ret = -ENOMEM;
5200                 goto out;
5201         }
5202         ir->data = pi->ir_data;
5203
5204         spin_lock_irqsave(&svm->ir_list_lock, flags);
5205         list_add(&ir->node, &svm->ir_list);
5206         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
5207 out:
5208         return ret;
5209 }
5210
5211 /**
5212  * Note:
5213  * The HW cannot support posting multicast/broadcast
5214  * interrupts to a vCPU. So, we still use legacy interrupt
5215  * remapping for these kind of interrupts.
5216  *
5217  * For lowest-priority interrupts, we only support
5218  * those with single CPU as the destination, e.g. user
5219  * configures the interrupts via /proc/irq or uses
5220  * irqbalance to make the interrupts single-CPU.
5221  */
5222 static int
5223 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
5224                  struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
5225 {
5226         struct kvm_lapic_irq irq;
5227         struct kvm_vcpu *vcpu = NULL;
5228
5229         kvm_set_msi_irq(kvm, e, &irq);
5230
5231         if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
5232                 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
5233                          __func__, irq.vector);
5234                 return -1;
5235         }
5236
5237         pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
5238                  irq.vector);
5239         *svm = to_svm(vcpu);
5240         vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
5241         vcpu_info->vector = irq.vector;
5242
5243         return 0;
5244 }
5245
5246 /*
5247  * svm_update_pi_irte - set IRTE for Posted-Interrupts
5248  *
5249  * @kvm: kvm
5250  * @host_irq: host irq of the interrupt
5251  * @guest_irq: gsi of the interrupt
5252  * @set: set or unset PI
5253  * returns 0 on success, < 0 on failure
5254  */
5255 static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
5256                               uint32_t guest_irq, bool set)
5257 {
5258         struct kvm_kernel_irq_routing_entry *e;
5259         struct kvm_irq_routing_table *irq_rt;
5260         int idx, ret = -EINVAL;
5261
5262         if (!kvm_arch_has_assigned_device(kvm) ||
5263             !irq_remapping_cap(IRQ_POSTING_CAP))
5264                 return 0;
5265
5266         pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
5267                  __func__, host_irq, guest_irq, set);
5268
5269         idx = srcu_read_lock(&kvm->irq_srcu);
5270         irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
5271         WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
5272
5273         hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
5274                 struct vcpu_data vcpu_info;
5275                 struct vcpu_svm *svm = NULL;
5276
5277                 if (e->type != KVM_IRQ_ROUTING_MSI)
5278                         continue;
5279
5280                 /**
5281                  * Here, we setup with legacy mode in the following cases:
5282                  * 1. When cannot target interrupt to a specific vcpu.
5283                  * 2. Unsetting posted interrupt.
5284                  * 3. APIC virtialization is disabled for the vcpu.
5285                  */
5286                 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
5287                     kvm_vcpu_apicv_active(&svm->vcpu)) {
5288                         struct amd_iommu_pi_data pi;
5289
5290                         /* Try to enable guest_mode in IRTE */
5291                         pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
5292                                             AVIC_HPA_MASK);
5293                         pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
5294                                                      svm->vcpu.vcpu_id);
5295                         pi.is_guest_mode = true;
5296                         pi.vcpu_data = &vcpu_info;
5297                         ret = irq_set_vcpu_affinity(host_irq, &pi);
5298
5299                         /**
5300                          * Here, we successfully setting up vcpu affinity in
5301                          * IOMMU guest mode. Now, we need to store the posted
5302                          * interrupt information in a per-vcpu ir_list so that
5303                          * we can reference to them directly when we update vcpu
5304                          * scheduling information in IOMMU irte.
5305                          */
5306                         if (!ret && pi.is_guest_mode)
5307                                 svm_ir_list_add(svm, &pi);
5308                 } else {
5309                         /* Use legacy mode in IRTE */
5310                         struct amd_iommu_pi_data pi;
5311
5312                         /**
5313                          * Here, pi is used to:
5314                          * - Tell IOMMU to use legacy mode for this interrupt.
5315                          * - Retrieve ga_tag of prior interrupt remapping data.
5316                          */
5317                         pi.is_guest_mode = false;
5318                         ret = irq_set_vcpu_affinity(host_irq, &pi);
5319
5320                         /**
5321                          * Check if the posted interrupt was previously
5322                          * setup with the guest_mode by checking if the ga_tag
5323                          * was cached. If so, we need to clean up the per-vcpu
5324                          * ir_list.
5325                          */
5326                         if (!ret && pi.prev_ga_tag) {
5327                                 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
5328                                 struct kvm_vcpu *vcpu;
5329
5330                                 vcpu = kvm_get_vcpu_by_id(kvm, id);
5331                                 if (vcpu)
5332                                         svm_ir_list_del(to_svm(vcpu), &pi);
5333                         }
5334                 }
5335
5336                 if (!ret && svm) {
5337                         trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
5338                                                  e->gsi, vcpu_info.vector,
5339                                                  vcpu_info.pi_desc_addr, set);
5340                 }
5341
5342                 if (ret < 0) {
5343                         pr_err("%s: failed to update PI IRTE\n", __func__);
5344                         goto out;
5345                 }
5346         }
5347
5348         ret = 0;
5349 out:
5350         srcu_read_unlock(&kvm->irq_srcu, idx);
5351         return ret;
5352 }
5353
5354 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
5355 {
5356         struct vcpu_svm *svm = to_svm(vcpu);
5357         struct vmcb *vmcb = svm->vmcb;
5358         int ret;
5359         ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
5360               !(svm->vcpu.arch.hflags & HF_NMI_MASK);
5361         ret = ret && gif_set(svm) && nested_svm_nmi(svm);
5362
5363         return ret;
5364 }
5365
5366 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
5367 {
5368         struct vcpu_svm *svm = to_svm(vcpu);
5369
5370         return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
5371 }
5372
5373 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5374 {
5375         struct vcpu_svm *svm = to_svm(vcpu);
5376
5377         if (masked) {
5378                 svm->vcpu.arch.hflags |= HF_NMI_MASK;
5379                 set_intercept(svm, INTERCEPT_IRET);
5380         } else {
5381                 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
5382                 clr_intercept(svm, INTERCEPT_IRET);
5383         }
5384 }
5385
5386 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
5387 {
5388         struct vcpu_svm *svm = to_svm(vcpu);
5389         struct vmcb *vmcb = svm->vmcb;
5390         int ret;
5391
5392         if (!gif_set(svm) ||
5393              (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
5394                 return 0;
5395
5396         ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
5397
5398         if (is_guest_mode(vcpu))
5399                 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
5400
5401         return ret;
5402 }
5403
5404 static void enable_irq_window(struct kvm_vcpu *vcpu)
5405 {
5406         struct vcpu_svm *svm = to_svm(vcpu);
5407
5408         if (kvm_vcpu_apicv_active(vcpu))
5409                 return;
5410
5411         /*
5412          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
5413          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
5414          * get that intercept, this function will be called again though and
5415          * we'll get the vintr intercept. However, if the vGIF feature is
5416          * enabled, the STGI interception will not occur. Enable the irq
5417          * window under the assumption that the hardware will set the GIF.
5418          */
5419         if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
5420                 svm_set_vintr(svm);
5421                 svm_inject_irq(svm, 0x0);
5422         }
5423 }
5424
5425 static void enable_nmi_window(struct kvm_vcpu *vcpu)
5426 {
5427         struct vcpu_svm *svm = to_svm(vcpu);
5428
5429         if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
5430             == HF_NMI_MASK)
5431                 return; /* IRET will cause a vm exit */
5432
5433         if (!gif_set(svm)) {
5434                 if (vgif_enabled(svm))
5435                         set_intercept(svm, INTERCEPT_STGI);
5436                 return; /* STGI will cause a vm exit */
5437         }
5438
5439         if (svm->nested.exit_required)
5440                 return; /* we're not going to run the guest yet */
5441
5442         /*
5443          * Something prevents NMI from been injected. Single step over possible
5444          * problem (IRET or exception injection or interrupt shadow)
5445          */
5446         svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
5447         svm->nmi_singlestep = true;
5448         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
5449 }
5450
5451 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
5452 {
5453         return 0;
5454 }
5455
5456 static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
5457 {
5458         return 0;
5459 }
5460
5461 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
5462 {
5463         struct vcpu_svm *svm = to_svm(vcpu);
5464
5465         if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
5466                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5467         else
5468                 svm->asid_generation--;
5469 }
5470
5471 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
5472 {
5473         struct vcpu_svm *svm = to_svm(vcpu);
5474
5475         invlpga(gva, svm->vmcb->control.asid);
5476 }
5477
5478 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
5479 {
5480 }
5481
5482 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
5483 {
5484         struct vcpu_svm *svm = to_svm(vcpu);
5485
5486         if (svm_nested_virtualize_tpr(vcpu))
5487                 return;
5488
5489         if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
5490                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
5491                 kvm_set_cr8(vcpu, cr8);
5492         }
5493 }
5494
5495 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
5496 {
5497         struct vcpu_svm *svm = to_svm(vcpu);
5498         u64 cr8;
5499
5500         if (svm_nested_virtualize_tpr(vcpu) ||
5501             kvm_vcpu_apicv_active(vcpu))
5502                 return;
5503
5504         cr8 = kvm_get_cr8(vcpu);
5505         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
5506         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
5507 }
5508
5509 static void svm_complete_interrupts(struct vcpu_svm *svm)
5510 {
5511         u8 vector;
5512         int type;
5513         u32 exitintinfo = svm->vmcb->control.exit_int_info;
5514         unsigned int3_injected = svm->int3_injected;
5515
5516         svm->int3_injected = 0;
5517
5518         /*
5519          * If we've made progress since setting HF_IRET_MASK, we've
5520          * executed an IRET and can allow NMI injection.
5521          */
5522         if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
5523             && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
5524                 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
5525                 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5526         }
5527
5528         svm->vcpu.arch.nmi_injected = false;
5529         kvm_clear_exception_queue(&svm->vcpu);
5530         kvm_clear_interrupt_queue(&svm->vcpu);
5531
5532         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
5533                 return;
5534
5535         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5536
5537         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
5538         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
5539
5540         switch (type) {
5541         case SVM_EXITINTINFO_TYPE_NMI:
5542                 svm->vcpu.arch.nmi_injected = true;
5543                 break;
5544         case SVM_EXITINTINFO_TYPE_EXEPT:
5545                 /*
5546                  * In case of software exceptions, do not reinject the vector,
5547                  * but re-execute the instruction instead. Rewind RIP first
5548                  * if we emulated INT3 before.
5549                  */
5550                 if (kvm_exception_is_soft(vector)) {
5551                         if (vector == BP_VECTOR && int3_injected &&
5552                             kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
5553                                 kvm_rip_write(&svm->vcpu,
5554                                               kvm_rip_read(&svm->vcpu) -
5555                                               int3_injected);
5556                         break;
5557                 }
5558                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
5559                         u32 err = svm->vmcb->control.exit_int_info_err;
5560                         kvm_requeue_exception_e(&svm->vcpu, vector, err);
5561
5562                 } else
5563                         kvm_requeue_exception(&svm->vcpu, vector);
5564                 break;
5565         case SVM_EXITINTINFO_TYPE_INTR:
5566                 kvm_queue_interrupt(&svm->vcpu, vector, false);
5567                 break;
5568         default:
5569                 break;
5570         }
5571 }
5572
5573 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
5574 {
5575         struct vcpu_svm *svm = to_svm(vcpu);
5576         struct vmcb_control_area *control = &svm->vmcb->control;
5577
5578         control->exit_int_info = control->event_inj;
5579         control->exit_int_info_err = control->event_inj_err;
5580         control->event_inj = 0;
5581         svm_complete_interrupts(svm);
5582 }
5583
5584 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5585 {
5586         struct vcpu_svm *svm = to_svm(vcpu);
5587
5588         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5589         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5590         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5591
5592         /*
5593          * A vmexit emulation is required before the vcpu can be executed
5594          * again.
5595          */
5596         if (unlikely(svm->nested.exit_required))
5597                 return;
5598
5599         /*
5600          * Disable singlestep if we're injecting an interrupt/exception.
5601          * We don't want our modified rflags to be pushed on the stack where
5602          * we might not be able to easily reset them if we disabled NMI
5603          * singlestep later.
5604          */
5605         if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
5606                 /*
5607                  * Event injection happens before external interrupts cause a
5608                  * vmexit and interrupts are disabled here, so smp_send_reschedule
5609                  * is enough to force an immediate vmexit.
5610                  */
5611                 disable_nmi_singlestep(svm);
5612                 smp_send_reschedule(vcpu->cpu);
5613         }
5614
5615         pre_svm_run(svm);
5616
5617         sync_lapic_to_cr8(vcpu);
5618
5619         svm->vmcb->save.cr2 = vcpu->arch.cr2;
5620
5621         clgi();
5622
5623         /*
5624          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
5625          * it's non-zero. Since vmentry is serialising on affected CPUs, there
5626          * is no need to worry about the conditional branch over the wrmsr
5627          * being speculatively taken.
5628          */
5629         x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
5630
5631         local_irq_enable();
5632
5633         asm volatile (
5634                 "push %%" _ASM_BP "; \n\t"
5635                 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
5636                 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
5637                 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
5638                 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
5639                 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
5640                 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
5641 #ifdef CONFIG_X86_64
5642                 "mov %c[r8](%[svm]),  %%r8  \n\t"
5643                 "mov %c[r9](%[svm]),  %%r9  \n\t"
5644                 "mov %c[r10](%[svm]), %%r10 \n\t"
5645                 "mov %c[r11](%[svm]), %%r11 \n\t"
5646                 "mov %c[r12](%[svm]), %%r12 \n\t"
5647                 "mov %c[r13](%[svm]), %%r13 \n\t"
5648                 "mov %c[r14](%[svm]), %%r14 \n\t"
5649                 "mov %c[r15](%[svm]), %%r15 \n\t"
5650 #endif
5651
5652                 /* Enter guest mode */
5653                 "push %%" _ASM_AX " \n\t"
5654                 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
5655                 __ex(SVM_VMLOAD) "\n\t"
5656                 __ex(SVM_VMRUN) "\n\t"
5657                 __ex(SVM_VMSAVE) "\n\t"
5658                 "pop %%" _ASM_AX " \n\t"
5659
5660                 /* Save guest registers, load host registers */
5661                 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
5662                 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
5663                 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
5664                 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
5665                 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
5666                 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
5667 #ifdef CONFIG_X86_64
5668                 "mov %%r8,  %c[r8](%[svm]) \n\t"
5669                 "mov %%r9,  %c[r9](%[svm]) \n\t"
5670                 "mov %%r10, %c[r10](%[svm]) \n\t"
5671                 "mov %%r11, %c[r11](%[svm]) \n\t"
5672                 "mov %%r12, %c[r12](%[svm]) \n\t"
5673                 "mov %%r13, %c[r13](%[svm]) \n\t"
5674                 "mov %%r14, %c[r14](%[svm]) \n\t"
5675                 "mov %%r15, %c[r15](%[svm]) \n\t"
5676                 /*
5677                 * Clear host registers marked as clobbered to prevent
5678                 * speculative use.
5679                 */
5680                 "xor %%r8d, %%r8d \n\t"
5681                 "xor %%r9d, %%r9d \n\t"
5682                 "xor %%r10d, %%r10d \n\t"
5683                 "xor %%r11d, %%r11d \n\t"
5684                 "xor %%r12d, %%r12d \n\t"
5685                 "xor %%r13d, %%r13d \n\t"
5686                 "xor %%r14d, %%r14d \n\t"
5687                 "xor %%r15d, %%r15d \n\t"
5688 #endif
5689                 "xor %%ebx, %%ebx \n\t"
5690                 "xor %%ecx, %%ecx \n\t"
5691                 "xor %%edx, %%edx \n\t"
5692                 "xor %%esi, %%esi \n\t"
5693                 "xor %%edi, %%edi \n\t"
5694                 "pop %%" _ASM_BP
5695                 :
5696                 : [svm]"a"(svm),
5697                   [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
5698                   [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
5699                   [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
5700                   [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
5701                   [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
5702                   [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
5703                   [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
5704 #ifdef CONFIG_X86_64
5705                   , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
5706                   [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
5707                   [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
5708                   [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
5709                   [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
5710                   [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
5711                   [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
5712                   [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
5713 #endif
5714                 : "cc", "memory"
5715 #ifdef CONFIG_X86_64
5716                 , "rbx", "rcx", "rdx", "rsi", "rdi"
5717                 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
5718 #else
5719                 , "ebx", "ecx", "edx", "esi", "edi"
5720 #endif
5721                 );
5722
5723         /* Eliminate branch target predictions from guest mode */
5724         vmexit_fill_RSB();
5725
5726 #ifdef CONFIG_X86_64
5727         wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5728 #else
5729         loadsegment(fs, svm->host.fs);
5730 #ifndef CONFIG_X86_32_LAZY_GS
5731         loadsegment(gs, svm->host.gs);
5732 #endif
5733 #endif
5734
5735         /*
5736          * We do not use IBRS in the kernel. If this vCPU has used the
5737          * SPEC_CTRL MSR it may have left it on; save the value and
5738          * turn it off. This is much more efficient than blindly adding
5739          * it to the atomic save/restore list. Especially as the former
5740          * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
5741          *
5742          * For non-nested case:
5743          * If the L01 MSR bitmap does not intercept the MSR, then we need to
5744          * save it.
5745          *
5746          * For nested case:
5747          * If the L02 MSR bitmap does not intercept the MSR, then we need to
5748          * save it.
5749          */
5750         if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
5751                 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
5752
5753         reload_tss(vcpu);
5754
5755         local_irq_disable();
5756
5757         x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
5758
5759         vcpu->arch.cr2 = svm->vmcb->save.cr2;
5760         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
5761         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
5762         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
5763
5764         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5765                 kvm_before_interrupt(&svm->vcpu);
5766
5767         stgi();
5768
5769         /* Any pending NMI will happen here */
5770
5771         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5772                 kvm_after_interrupt(&svm->vcpu);
5773
5774         sync_cr8_to_lapic(vcpu);
5775
5776         svm->next_rip = 0;
5777
5778         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5779
5780         /* if exit due to PF check for async PF */
5781         if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
5782                 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
5783
5784         if (npt_enabled) {
5785                 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
5786                 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
5787         }
5788
5789         /*
5790          * We need to handle MC intercepts here before the vcpu has a chance to
5791          * change the physical cpu
5792          */
5793         if (unlikely(svm->vmcb->control.exit_code ==
5794                      SVM_EXIT_EXCP_BASE + MC_VECTOR))
5795                 svm_handle_mce(svm);
5796
5797         mark_all_clean(svm->vmcb);
5798 }
5799 STACK_FRAME_NON_STANDARD(svm_vcpu_run);
5800
5801 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5802 {
5803         struct vcpu_svm *svm = to_svm(vcpu);
5804
5805         svm->vmcb->save.cr3 = __sme_set(root);
5806         mark_dirty(svm->vmcb, VMCB_CR);
5807 }
5808
5809 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5810 {
5811         struct vcpu_svm *svm = to_svm(vcpu);
5812
5813         svm->vmcb->control.nested_cr3 = __sme_set(root);
5814         mark_dirty(svm->vmcb, VMCB_NPT);
5815
5816         /* Also sync guest cr3 here in case we live migrate */
5817         svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
5818         mark_dirty(svm->vmcb, VMCB_CR);
5819 }
5820
5821 static int is_disabled(void)
5822 {
5823         u64 vm_cr;
5824
5825         rdmsrl(MSR_VM_CR, vm_cr);
5826         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
5827                 return 1;
5828
5829         return 0;
5830 }
5831
5832 static void
5833 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5834 {
5835         /*
5836          * Patch in the VMMCALL instruction:
5837          */
5838         hypercall[0] = 0x0f;
5839         hypercall[1] = 0x01;
5840         hypercall[2] = 0xd9;
5841 }
5842
5843 static void svm_check_processor_compat(void *rtn)
5844 {
5845         *(int *)rtn = 0;
5846 }
5847
5848 static bool svm_cpu_has_accelerated_tpr(void)
5849 {
5850         return false;
5851 }
5852
5853 static bool svm_has_emulated_msr(int index)
5854 {
5855         switch (index) {
5856         case MSR_IA32_MCG_EXT_CTL:
5857                 return false;
5858         default:
5859                 break;
5860         }
5861
5862         return true;
5863 }
5864
5865 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5866 {
5867         return 0;
5868 }
5869
5870 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
5871 {
5872         struct vcpu_svm *svm = to_svm(vcpu);
5873
5874         /* Update nrips enabled cache */
5875         svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
5876
5877         if (!kvm_vcpu_apicv_active(vcpu))
5878                 return;
5879
5880         guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
5881 }
5882
5883 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5884 {
5885         switch (func) {
5886         case 0x1:
5887                 if (avic)
5888                         entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5889                 break;
5890         case 0x80000001:
5891                 if (nested)
5892                         entry->ecx |= (1 << 2); /* Set SVM bit */
5893                 break;
5894         case 0x8000000A:
5895                 entry->eax = 1; /* SVM revision 1 */
5896                 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5897                                    ASID emulation to nested SVM */
5898                 entry->ecx = 0; /* Reserved */
5899                 entry->edx = 0; /* Per default do not support any
5900                                    additional features */
5901
5902                 /* Support next_rip if host supports it */
5903                 if (boot_cpu_has(X86_FEATURE_NRIPS))
5904                         entry->edx |= SVM_FEATURE_NRIP;
5905
5906                 /* Support NPT for the guest if enabled */
5907                 if (npt_enabled)
5908                         entry->edx |= SVM_FEATURE_NPT;
5909
5910                 break;
5911         case 0x8000001F:
5912                 /* Support memory encryption cpuid if host supports it */
5913                 if (boot_cpu_has(X86_FEATURE_SEV))
5914                         cpuid(0x8000001f, &entry->eax, &entry->ebx,
5915                                 &entry->ecx, &entry->edx);
5916
5917         }
5918 }
5919
5920 static int svm_get_lpage_level(void)
5921 {
5922         return PT_PDPE_LEVEL;
5923 }
5924
5925 static bool svm_rdtscp_supported(void)
5926 {
5927         return boot_cpu_has(X86_FEATURE_RDTSCP);
5928 }
5929
5930 static bool svm_invpcid_supported(void)
5931 {
5932         return false;
5933 }
5934
5935 static bool svm_mpx_supported(void)
5936 {
5937         return false;
5938 }
5939
5940 static bool svm_xsaves_supported(void)
5941 {
5942         return false;
5943 }
5944
5945 static bool svm_umip_emulated(void)
5946 {
5947         return false;
5948 }
5949
5950 static bool svm_has_wbinvd_exit(void)
5951 {
5952         return true;
5953 }
5954
5955 #define PRE_EX(exit)  { .exit_code = (exit), \
5956                         .stage = X86_ICPT_PRE_EXCEPT, }
5957 #define POST_EX(exit) { .exit_code = (exit), \
5958                         .stage = X86_ICPT_POST_EXCEPT, }
5959 #define POST_MEM(exit) { .exit_code = (exit), \
5960                         .stage = X86_ICPT_POST_MEMACCESS, }
5961
5962 static const struct __x86_intercept {
5963         u32 exit_code;
5964         enum x86_intercept_stage stage;
5965 } x86_intercept_map[] = {
5966         [x86_intercept_cr_read]         = POST_EX(SVM_EXIT_READ_CR0),
5967         [x86_intercept_cr_write]        = POST_EX(SVM_EXIT_WRITE_CR0),
5968         [x86_intercept_clts]            = POST_EX(SVM_EXIT_WRITE_CR0),
5969         [x86_intercept_lmsw]            = POST_EX(SVM_EXIT_WRITE_CR0),
5970         [x86_intercept_smsw]            = POST_EX(SVM_EXIT_READ_CR0),
5971         [x86_intercept_dr_read]         = POST_EX(SVM_EXIT_READ_DR0),
5972         [x86_intercept_dr_write]        = POST_EX(SVM_EXIT_WRITE_DR0),
5973         [x86_intercept_sldt]            = POST_EX(SVM_EXIT_LDTR_READ),
5974         [x86_intercept_str]             = POST_EX(SVM_EXIT_TR_READ),
5975         [x86_intercept_lldt]            = POST_EX(SVM_EXIT_LDTR_WRITE),
5976         [x86_intercept_ltr]             = POST_EX(SVM_EXIT_TR_WRITE),
5977         [x86_intercept_sgdt]            = POST_EX(SVM_EXIT_GDTR_READ),
5978         [x86_intercept_sidt]            = POST_EX(SVM_EXIT_IDTR_READ),
5979         [x86_intercept_lgdt]            = POST_EX(SVM_EXIT_GDTR_WRITE),
5980         [x86_intercept_lidt]            = POST_EX(SVM_EXIT_IDTR_WRITE),
5981         [x86_intercept_vmrun]           = POST_EX(SVM_EXIT_VMRUN),
5982         [x86_intercept_vmmcall]         = POST_EX(SVM_EXIT_VMMCALL),
5983         [x86_intercept_vmload]          = POST_EX(SVM_EXIT_VMLOAD),
5984         [x86_intercept_vmsave]          = POST_EX(SVM_EXIT_VMSAVE),
5985         [x86_intercept_stgi]            = POST_EX(SVM_EXIT_STGI),
5986         [x86_intercept_clgi]            = POST_EX(SVM_EXIT_CLGI),
5987         [x86_intercept_skinit]          = POST_EX(SVM_EXIT_SKINIT),
5988         [x86_intercept_invlpga]         = POST_EX(SVM_EXIT_INVLPGA),
5989         [x86_intercept_rdtscp]          = POST_EX(SVM_EXIT_RDTSCP),
5990         [x86_intercept_monitor]         = POST_MEM(SVM_EXIT_MONITOR),
5991         [x86_intercept_mwait]           = POST_EX(SVM_EXIT_MWAIT),
5992         [x86_intercept_invlpg]          = POST_EX(SVM_EXIT_INVLPG),
5993         [x86_intercept_invd]            = POST_EX(SVM_EXIT_INVD),
5994         [x86_intercept_wbinvd]          = POST_EX(SVM_EXIT_WBINVD),
5995         [x86_intercept_wrmsr]           = POST_EX(SVM_EXIT_MSR),
5996         [x86_intercept_rdtsc]           = POST_EX(SVM_EXIT_RDTSC),
5997         [x86_intercept_rdmsr]           = POST_EX(SVM_EXIT_MSR),
5998         [x86_intercept_rdpmc]           = POST_EX(SVM_EXIT_RDPMC),
5999         [x86_intercept_cpuid]           = PRE_EX(SVM_EXIT_CPUID),
6000         [x86_intercept_rsm]             = PRE_EX(SVM_EXIT_RSM),
6001         [x86_intercept_pause]           = PRE_EX(SVM_EXIT_PAUSE),
6002         [x86_intercept_pushf]           = PRE_EX(SVM_EXIT_PUSHF),
6003         [x86_intercept_popf]            = PRE_EX(SVM_EXIT_POPF),
6004         [x86_intercept_intn]            = PRE_EX(SVM_EXIT_SWINT),
6005         [x86_intercept_iret]            = PRE_EX(SVM_EXIT_IRET),
6006         [x86_intercept_icebp]           = PRE_EX(SVM_EXIT_ICEBP),
6007         [x86_intercept_hlt]             = POST_EX(SVM_EXIT_HLT),
6008         [x86_intercept_in]              = POST_EX(SVM_EXIT_IOIO),
6009         [x86_intercept_ins]             = POST_EX(SVM_EXIT_IOIO),
6010         [x86_intercept_out]             = POST_EX(SVM_EXIT_IOIO),
6011         [x86_intercept_outs]            = POST_EX(SVM_EXIT_IOIO),
6012 };
6013
6014 #undef PRE_EX
6015 #undef POST_EX
6016 #undef POST_MEM
6017
6018 static int svm_check_intercept(struct kvm_vcpu *vcpu,
6019                                struct x86_instruction_info *info,
6020                                enum x86_intercept_stage stage)
6021 {
6022         struct vcpu_svm *svm = to_svm(vcpu);
6023         int vmexit, ret = X86EMUL_CONTINUE;
6024         struct __x86_intercept icpt_info;
6025         struct vmcb *vmcb = svm->vmcb;
6026
6027         if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
6028                 goto out;
6029
6030         icpt_info = x86_intercept_map[info->intercept];
6031
6032         if (stage != icpt_info.stage)
6033                 goto out;
6034
6035         switch (icpt_info.exit_code) {
6036         case SVM_EXIT_READ_CR0:
6037                 if (info->intercept == x86_intercept_cr_read)
6038                         icpt_info.exit_code += info->modrm_reg;
6039                 break;
6040         case SVM_EXIT_WRITE_CR0: {
6041                 unsigned long cr0, val;
6042                 u64 intercept;
6043
6044                 if (info->intercept == x86_intercept_cr_write)
6045                         icpt_info.exit_code += info->modrm_reg;
6046
6047                 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
6048                     info->intercept == x86_intercept_clts)
6049                         break;
6050
6051                 intercept = svm->nested.intercept;
6052
6053                 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
6054                         break;
6055
6056                 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
6057                 val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
6058
6059                 if (info->intercept == x86_intercept_lmsw) {
6060                         cr0 &= 0xfUL;
6061                         val &= 0xfUL;
6062                         /* lmsw can't clear PE - catch this here */
6063                         if (cr0 & X86_CR0_PE)
6064                                 val |= X86_CR0_PE;
6065                 }
6066
6067                 if (cr0 ^ val)
6068                         icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
6069
6070                 break;
6071         }
6072         case SVM_EXIT_READ_DR0:
6073         case SVM_EXIT_WRITE_DR0:
6074                 icpt_info.exit_code += info->modrm_reg;
6075                 break;
6076         case SVM_EXIT_MSR:
6077                 if (info->intercept == x86_intercept_wrmsr)
6078                         vmcb->control.exit_info_1 = 1;
6079                 else
6080                         vmcb->control.exit_info_1 = 0;
6081                 break;
6082         case SVM_EXIT_PAUSE:
6083                 /*
6084                  * We get this for NOP only, but pause
6085                  * is rep not, check this here
6086                  */
6087                 if (info->rep_prefix != REPE_PREFIX)
6088                         goto out;
6089                 break;
6090         case SVM_EXIT_IOIO: {
6091                 u64 exit_info;
6092                 u32 bytes;
6093
6094                 if (info->intercept == x86_intercept_in ||
6095                     info->intercept == x86_intercept_ins) {
6096                         exit_info = ((info->src_val & 0xffff) << 16) |
6097                                 SVM_IOIO_TYPE_MASK;
6098                         bytes = info->dst_bytes;
6099                 } else {
6100                         exit_info = (info->dst_val & 0xffff) << 16;
6101                         bytes = info->src_bytes;
6102                 }
6103
6104                 if (info->intercept == x86_intercept_outs ||
6105                     info->intercept == x86_intercept_ins)
6106                         exit_info |= SVM_IOIO_STR_MASK;
6107
6108                 if (info->rep_prefix)
6109                         exit_info |= SVM_IOIO_REP_MASK;
6110
6111                 bytes = min(bytes, 4u);
6112
6113                 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
6114
6115                 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
6116
6117                 vmcb->control.exit_info_1 = exit_info;
6118                 vmcb->control.exit_info_2 = info->next_rip;
6119
6120                 break;
6121         }
6122         default:
6123                 break;
6124         }
6125
6126         /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
6127         if (static_cpu_has(X86_FEATURE_NRIPS))
6128                 vmcb->control.next_rip  = info->next_rip;
6129         vmcb->control.exit_code = icpt_info.exit_code;
6130         vmexit = nested_svm_exit_handled(svm);
6131
6132         ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
6133                                            : X86EMUL_CONTINUE;
6134
6135 out:
6136         return ret;
6137 }
6138
6139 static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
6140 {
6141         local_irq_enable();
6142         /*
6143          * We must have an instruction with interrupts enabled, so
6144          * the timer interrupt isn't delayed by the interrupt shadow.
6145          */
6146         asm("nop");
6147         local_irq_disable();
6148 }
6149
6150 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
6151 {
6152         if (pause_filter_thresh)
6153                 shrink_ple_window(vcpu);
6154 }
6155
6156 static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
6157 {
6158         if (avic_handle_apic_id_update(vcpu) != 0)
6159                 return;
6160         if (avic_handle_dfr_update(vcpu) != 0)
6161                 return;
6162         avic_handle_ldr_update(vcpu);
6163 }
6164
6165 static void svm_setup_mce(struct kvm_vcpu *vcpu)
6166 {
6167         /* [63:9] are reserved. */
6168         vcpu->arch.mcg_cap &= 0x1ff;
6169 }
6170
6171 static int svm_smi_allowed(struct kvm_vcpu *vcpu)
6172 {
6173         struct vcpu_svm *svm = to_svm(vcpu);
6174
6175         /* Per APM Vol.2 15.22.2 "Response to SMI" */
6176         if (!gif_set(svm))
6177                 return 0;
6178
6179         if (is_guest_mode(&svm->vcpu) &&
6180             svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
6181                 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
6182                 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
6183                 svm->nested.exit_required = true;
6184                 return 0;
6185         }
6186
6187         return 1;
6188 }
6189
6190 static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
6191 {
6192         struct vcpu_svm *svm = to_svm(vcpu);
6193         int ret;
6194
6195         if (is_guest_mode(vcpu)) {
6196                 /* FED8h - SVM Guest */
6197                 put_smstate(u64, smstate, 0x7ed8, 1);
6198                 /* FEE0h - SVM Guest VMCB Physical Address */
6199                 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
6200
6201                 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
6202                 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
6203                 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
6204
6205                 ret = nested_svm_vmexit(svm);
6206                 if (ret)
6207                         return ret;
6208         }
6209         return 0;
6210 }
6211
6212 static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
6213 {
6214         struct vcpu_svm *svm = to_svm(vcpu);
6215         struct vmcb *nested_vmcb;
6216         struct page *page;
6217         struct {
6218                 u64 guest;
6219                 u64 vmcb;
6220         } svm_state_save;
6221         int ret;
6222
6223         ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
6224                                   sizeof(svm_state_save));
6225         if (ret)
6226                 return ret;
6227
6228         if (svm_state_save.guest) {
6229                 vcpu->arch.hflags &= ~HF_SMM_MASK;
6230                 nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
6231                 if (nested_vmcb)
6232                         enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
6233                 else
6234                         ret = 1;
6235                 vcpu->arch.hflags |= HF_SMM_MASK;
6236         }
6237         return ret;
6238 }
6239
6240 static int enable_smi_window(struct kvm_vcpu *vcpu)
6241 {
6242         struct vcpu_svm *svm = to_svm(vcpu);
6243
6244         if (!gif_set(svm)) {
6245                 if (vgif_enabled(svm))
6246                         set_intercept(svm, INTERCEPT_STGI);
6247                 /* STGI will cause a vm exit */
6248                 return 1;
6249         }
6250         return 0;
6251 }
6252
6253 static int sev_asid_new(void)
6254 {
6255         int pos;
6256
6257         /*
6258          * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
6259          */
6260         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
6261         if (pos >= max_sev_asid)
6262                 return -EBUSY;
6263
6264         set_bit(pos, sev_asid_bitmap);
6265         return pos + 1;
6266 }
6267
6268 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
6269 {
6270         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6271         int asid, ret;
6272
6273         ret = -EBUSY;
6274         asid = sev_asid_new();
6275         if (asid < 0)
6276                 return ret;
6277
6278         ret = sev_platform_init(&argp->error);
6279         if (ret)
6280                 goto e_free;
6281
6282         sev->active = true;
6283         sev->asid = asid;
6284         INIT_LIST_HEAD(&sev->regions_list);
6285
6286         return 0;
6287
6288 e_free:
6289         __sev_asid_free(asid);
6290         return ret;
6291 }
6292
6293 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
6294 {
6295         struct sev_data_activate *data;
6296         int asid = sev_get_asid(kvm);
6297         int ret;
6298
6299         wbinvd_on_all_cpus();
6300
6301         ret = sev_guest_df_flush(error);
6302         if (ret)
6303                 return ret;
6304
6305         data = kzalloc(sizeof(*data), GFP_KERNEL);
6306         if (!data)
6307                 return -ENOMEM;
6308
6309         /* activate ASID on the given handle */
6310         data->handle = handle;
6311         data->asid   = asid;
6312         ret = sev_guest_activate(data, error);
6313         kfree(data);
6314
6315         return ret;
6316 }
6317
6318 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
6319 {
6320         struct fd f;
6321         int ret;
6322
6323         f = fdget(fd);
6324         if (!f.file)
6325                 return -EBADF;
6326
6327         ret = sev_issue_cmd_external_user(f.file, id, data, error);
6328
6329         fdput(f);
6330         return ret;
6331 }
6332
6333 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
6334 {
6335         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6336
6337         return __sev_issue_cmd(sev->fd, id, data, error);
6338 }
6339
6340 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
6341 {
6342         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6343         struct sev_data_launch_start *start;
6344         struct kvm_sev_launch_start params;
6345         void *dh_blob, *session_blob;
6346         int *error = &argp->error;
6347         int ret;
6348
6349         if (!sev_guest(kvm))
6350                 return -ENOTTY;
6351
6352         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6353                 return -EFAULT;
6354
6355         start = kzalloc(sizeof(*start), GFP_KERNEL);
6356         if (!start)
6357                 return -ENOMEM;
6358
6359         dh_blob = NULL;
6360         if (params.dh_uaddr) {
6361                 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
6362                 if (IS_ERR(dh_blob)) {
6363                         ret = PTR_ERR(dh_blob);
6364                         goto e_free;
6365                 }
6366
6367                 start->dh_cert_address = __sme_set(__pa(dh_blob));
6368                 start->dh_cert_len = params.dh_len;
6369         }
6370
6371         session_blob = NULL;
6372         if (params.session_uaddr) {
6373                 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
6374                 if (IS_ERR(session_blob)) {
6375                         ret = PTR_ERR(session_blob);
6376                         goto e_free_dh;
6377                 }
6378
6379                 start->session_address = __sme_set(__pa(session_blob));
6380                 start->session_len = params.session_len;
6381         }
6382
6383         start->handle = params.handle;
6384         start->policy = params.policy;
6385
6386         /* create memory encryption context */
6387         ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
6388         if (ret)
6389                 goto e_free_session;
6390
6391         /* Bind ASID to this guest */
6392         ret = sev_bind_asid(kvm, start->handle, error);
6393         if (ret)
6394                 goto e_free_session;
6395
6396         /* return handle to userspace */
6397         params.handle = start->handle;
6398         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
6399                 sev_unbind_asid(kvm, start->handle);
6400                 ret = -EFAULT;
6401                 goto e_free_session;
6402         }
6403
6404         sev->handle = start->handle;
6405         sev->fd = argp->sev_fd;
6406
6407 e_free_session:
6408         kfree(session_blob);
6409 e_free_dh:
6410         kfree(dh_blob);
6411 e_free:
6412         kfree(start);
6413         return ret;
6414 }
6415
6416 static int get_num_contig_pages(int idx, struct page **inpages,
6417                                 unsigned long npages)
6418 {
6419         unsigned long paddr, next_paddr;
6420         int i = idx + 1, pages = 1;
6421
6422         /* find the number of contiguous pages starting from idx */
6423         paddr = __sme_page_pa(inpages[idx]);
6424         while (i < npages) {
6425                 next_paddr = __sme_page_pa(inpages[i++]);
6426                 if ((paddr + PAGE_SIZE) == next_paddr) {
6427                         pages++;
6428                         paddr = next_paddr;
6429                         continue;
6430                 }
6431                 break;
6432         }
6433
6434         return pages;
6435 }
6436
6437 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
6438 {
6439         unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
6440         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6441         struct kvm_sev_launch_update_data params;
6442         struct sev_data_launch_update_data *data;
6443         struct page **inpages;
6444         int i, ret, pages;
6445
6446         if (!sev_guest(kvm))
6447                 return -ENOTTY;
6448
6449         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6450                 return -EFAULT;
6451
6452         data = kzalloc(sizeof(*data), GFP_KERNEL);
6453         if (!data)
6454                 return -ENOMEM;
6455
6456         vaddr = params.uaddr;
6457         size = params.len;
6458         vaddr_end = vaddr + size;
6459
6460         /* Lock the user memory. */
6461         inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
6462         if (!inpages) {
6463                 ret = -ENOMEM;
6464                 goto e_free;
6465         }
6466
6467         /*
6468          * The LAUNCH_UPDATE command will perform in-place encryption of the
6469          * memory content (i.e it will write the same memory region with C=1).
6470          * It's possible that the cache may contain the data with C=0, i.e.,
6471          * unencrypted so invalidate it first.
6472          */
6473         sev_clflush_pages(inpages, npages);
6474
6475         for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
6476                 int offset, len;
6477
6478                 /*
6479                  * If the user buffer is not page-aligned, calculate the offset
6480                  * within the page.
6481                  */
6482                 offset = vaddr & (PAGE_SIZE - 1);
6483
6484                 /* Calculate the number of pages that can be encrypted in one go. */
6485                 pages = get_num_contig_pages(i, inpages, npages);
6486
6487                 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
6488
6489                 data->handle = sev->handle;
6490                 data->len = len;
6491                 data->address = __sme_page_pa(inpages[i]) + offset;
6492                 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
6493                 if (ret)
6494                         goto e_unpin;
6495
6496                 size -= len;
6497                 next_vaddr = vaddr + len;
6498         }
6499
6500 e_unpin:
6501         /* content of memory is updated, mark pages dirty */
6502         for (i = 0; i < npages; i++) {
6503                 set_page_dirty_lock(inpages[i]);
6504                 mark_page_accessed(inpages[i]);
6505         }
6506         /* unlock the user pages */
6507         sev_unpin_memory(kvm, inpages, npages);
6508 e_free:
6509         kfree(data);
6510         return ret;
6511 }
6512
6513 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6514 {
6515         void __user *measure = (void __user *)(uintptr_t)argp->data;
6516         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6517         struct sev_data_launch_measure *data;
6518         struct kvm_sev_launch_measure params;
6519         void __user *p = NULL;
6520         void *blob = NULL;
6521         int ret;
6522
6523         if (!sev_guest(kvm))
6524                 return -ENOTTY;
6525
6526         if (copy_from_user(&params, measure, sizeof(params)))
6527                 return -EFAULT;
6528
6529         data = kzalloc(sizeof(*data), GFP_KERNEL);
6530         if (!data)
6531                 return -ENOMEM;
6532
6533         /* User wants to query the blob length */
6534         if (!params.len)
6535                 goto cmd;
6536
6537         p = (void __user *)(uintptr_t)params.uaddr;
6538         if (p) {
6539                 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
6540                         ret = -EINVAL;
6541                         goto e_free;
6542                 }
6543
6544                 ret = -ENOMEM;
6545                 blob = kmalloc(params.len, GFP_KERNEL);
6546                 if (!blob)
6547                         goto e_free;
6548
6549                 data->address = __psp_pa(blob);
6550                 data->len = params.len;
6551         }
6552
6553 cmd:
6554         data->handle = sev->handle;
6555         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
6556
6557         /*
6558          * If we query the session length, FW responded with expected data.
6559          */
6560         if (!params.len)
6561                 goto done;
6562
6563         if (ret)
6564                 goto e_free_blob;
6565
6566         if (blob) {
6567                 if (copy_to_user(p, blob, params.len))
6568                         ret = -EFAULT;
6569         }
6570
6571 done:
6572         params.len = data->len;
6573         if (copy_to_user(measure, &params, sizeof(params)))
6574                 ret = -EFAULT;
6575 e_free_blob:
6576         kfree(blob);
6577 e_free:
6578         kfree(data);
6579         return ret;
6580 }
6581
6582 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
6583 {
6584         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6585         struct sev_data_launch_finish *data;
6586         int ret;
6587
6588         if (!sev_guest(kvm))
6589                 return -ENOTTY;
6590
6591         data = kzalloc(sizeof(*data), GFP_KERNEL);
6592         if (!data)
6593                 return -ENOMEM;
6594
6595         data->handle = sev->handle;
6596         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
6597
6598         kfree(data);
6599         return ret;
6600 }
6601
6602 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
6603 {
6604         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6605         struct kvm_sev_guest_status params;
6606         struct sev_data_guest_status *data;
6607         int ret;
6608
6609         if (!sev_guest(kvm))
6610                 return -ENOTTY;
6611
6612         data = kzalloc(sizeof(*data), GFP_KERNEL);
6613         if (!data)
6614                 return -ENOMEM;
6615
6616         data->handle = sev->handle;
6617         ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
6618         if (ret)
6619                 goto e_free;
6620
6621         params.policy = data->policy;
6622         params.state = data->state;
6623         params.handle = data->handle;
6624
6625         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6626                 ret = -EFAULT;
6627 e_free:
6628         kfree(data);
6629         return ret;
6630 }
6631
6632 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
6633                                unsigned long dst, int size,
6634                                int *error, bool enc)
6635 {
6636         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6637         struct sev_data_dbg *data;
6638         int ret;
6639
6640         data = kzalloc(sizeof(*data), GFP_KERNEL);
6641         if (!data)
6642                 return -ENOMEM;
6643
6644         data->handle = sev->handle;
6645         data->dst_addr = dst;
6646         data->src_addr = src;
6647         data->len = size;
6648
6649         ret = sev_issue_cmd(kvm,
6650                             enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
6651                             data, error);
6652         kfree(data);
6653         return ret;
6654 }
6655
6656 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
6657                              unsigned long dst_paddr, int sz, int *err)
6658 {
6659         int offset;
6660
6661         /*
6662          * Its safe to read more than we are asked, caller should ensure that
6663          * destination has enough space.
6664          */
6665         src_paddr = round_down(src_paddr, 16);
6666         offset = src_paddr & 15;
6667         sz = round_up(sz + offset, 16);
6668
6669         return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
6670 }
6671
6672 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
6673                                   unsigned long __user dst_uaddr,
6674                                   unsigned long dst_paddr,
6675                                   int size, int *err)
6676 {
6677         struct page *tpage = NULL;
6678         int ret, offset;
6679
6680         /* if inputs are not 16-byte then use intermediate buffer */
6681         if (!IS_ALIGNED(dst_paddr, 16) ||
6682             !IS_ALIGNED(paddr,     16) ||
6683             !IS_ALIGNED(size,      16)) {
6684                 tpage = (void *)alloc_page(GFP_KERNEL);
6685                 if (!tpage)
6686                         return -ENOMEM;
6687
6688                 dst_paddr = __sme_page_pa(tpage);
6689         }
6690
6691         ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
6692         if (ret)
6693                 goto e_free;
6694
6695         if (tpage) {
6696                 offset = paddr & 15;
6697                 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
6698                                  page_address(tpage) + offset, size))
6699                         ret = -EFAULT;
6700         }
6701
6702 e_free:
6703         if (tpage)
6704                 __free_page(tpage);
6705
6706         return ret;
6707 }
6708
6709 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
6710                                   unsigned long __user vaddr,
6711                                   unsigned long dst_paddr,
6712                                   unsigned long __user dst_vaddr,
6713                                   int size, int *error)
6714 {
6715         struct page *src_tpage = NULL;
6716         struct page *dst_tpage = NULL;
6717         int ret, len = size;
6718
6719         /* If source buffer is not aligned then use an intermediate buffer */
6720         if (!IS_ALIGNED(vaddr, 16)) {
6721                 src_tpage = alloc_page(GFP_KERNEL);
6722                 if (!src_tpage)
6723                         return -ENOMEM;
6724
6725                 if (copy_from_user(page_address(src_tpage),
6726                                 (void __user *)(uintptr_t)vaddr, size)) {
6727                         __free_page(src_tpage);
6728                         return -EFAULT;
6729                 }
6730
6731                 paddr = __sme_page_pa(src_tpage);
6732         }
6733
6734         /*
6735          *  If destination buffer or length is not aligned then do read-modify-write:
6736          *   - decrypt destination in an intermediate buffer
6737          *   - copy the source buffer in an intermediate buffer
6738          *   - use the intermediate buffer as source buffer
6739          */
6740         if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
6741                 int dst_offset;
6742
6743                 dst_tpage = alloc_page(GFP_KERNEL);
6744                 if (!dst_tpage) {
6745                         ret = -ENOMEM;
6746                         goto e_free;
6747                 }
6748
6749                 ret = __sev_dbg_decrypt(kvm, dst_paddr,
6750                                         __sme_page_pa(dst_tpage), size, error);
6751                 if (ret)
6752                         goto e_free;
6753
6754                 /*
6755                  *  If source is kernel buffer then use memcpy() otherwise
6756                  *  copy_from_user().
6757                  */
6758                 dst_offset = dst_paddr & 15;
6759
6760                 if (src_tpage)
6761                         memcpy(page_address(dst_tpage) + dst_offset,
6762                                page_address(src_tpage), size);
6763                 else {
6764                         if (copy_from_user(page_address(dst_tpage) + dst_offset,
6765                                            (void __user *)(uintptr_t)vaddr, size)) {
6766                                 ret = -EFAULT;
6767                                 goto e_free;
6768                         }
6769                 }
6770
6771                 paddr = __sme_page_pa(dst_tpage);
6772                 dst_paddr = round_down(dst_paddr, 16);
6773                 len = round_up(size, 16);
6774         }
6775
6776         ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
6777
6778 e_free:
6779         if (src_tpage)
6780                 __free_page(src_tpage);
6781         if (dst_tpage)
6782                 __free_page(dst_tpage);
6783         return ret;
6784 }
6785
6786 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6787 {
6788         unsigned long vaddr, vaddr_end, next_vaddr;
6789         unsigned long dst_vaddr;
6790         struct page **src_p, **dst_p;
6791         struct kvm_sev_dbg debug;
6792         unsigned long n;
6793         int ret, size;
6794
6795         if (!sev_guest(kvm))
6796                 return -ENOTTY;
6797
6798         if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
6799                 return -EFAULT;
6800
6801         vaddr = debug.src_uaddr;
6802         size = debug.len;
6803         vaddr_end = vaddr + size;
6804         dst_vaddr = debug.dst_uaddr;
6805
6806         for (; vaddr < vaddr_end; vaddr = next_vaddr) {
6807                 int len, s_off, d_off;
6808
6809                 /* lock userspace source and destination page */
6810                 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
6811                 if (!src_p)
6812                         return -EFAULT;
6813
6814                 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
6815                 if (!dst_p) {
6816                         sev_unpin_memory(kvm, src_p, n);
6817                         return -EFAULT;
6818                 }
6819
6820                 /*
6821                  * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
6822                  * memory content (i.e it will write the same memory region with C=1).
6823                  * It's possible that the cache may contain the data with C=0, i.e.,
6824                  * unencrypted so invalidate it first.
6825                  */
6826                 sev_clflush_pages(src_p, 1);
6827                 sev_clflush_pages(dst_p, 1);
6828
6829                 /*
6830                  * Since user buffer may not be page aligned, calculate the
6831                  * offset within the page.
6832                  */
6833                 s_off = vaddr & ~PAGE_MASK;
6834                 d_off = dst_vaddr & ~PAGE_MASK;
6835                 len = min_t(size_t, (PAGE_SIZE - s_off), size);
6836
6837                 if (dec)
6838                         ret = __sev_dbg_decrypt_user(kvm,
6839                                                      __sme_page_pa(src_p[0]) + s_off,
6840                                                      dst_vaddr,
6841                                                      __sme_page_pa(dst_p[0]) + d_off,
6842                                                      len, &argp->error);
6843                 else
6844                         ret = __sev_dbg_encrypt_user(kvm,
6845                                                      __sme_page_pa(src_p[0]) + s_off,
6846                                                      vaddr,
6847                                                      __sme_page_pa(dst_p[0]) + d_off,
6848                                                      dst_vaddr,
6849                                                      len, &argp->error);
6850
6851                 sev_unpin_memory(kvm, src_p, 1);
6852                 sev_unpin_memory(kvm, dst_p, 1);
6853
6854                 if (ret)
6855                         goto err;
6856
6857                 next_vaddr = vaddr + len;
6858                 dst_vaddr = dst_vaddr + len;
6859                 size -= len;
6860         }
6861 err:
6862         return ret;
6863 }
6864
6865 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6866 {
6867         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6868         struct sev_data_launch_secret *data;
6869         struct kvm_sev_launch_secret params;
6870         struct page **pages;
6871         void *blob, *hdr;
6872         unsigned long n;
6873         int ret, offset;
6874
6875         if (!sev_guest(kvm))
6876                 return -ENOTTY;
6877
6878         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6879                 return -EFAULT;
6880
6881         pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
6882         if (!pages)
6883                 return -ENOMEM;
6884
6885         /*
6886          * The secret must be copied into contiguous memory region, lets verify
6887          * that userspace memory pages are contiguous before we issue command.
6888          */
6889         if (get_num_contig_pages(0, pages, n) != n) {
6890                 ret = -EINVAL;
6891                 goto e_unpin_memory;
6892         }
6893
6894         ret = -ENOMEM;
6895         data = kzalloc(sizeof(*data), GFP_KERNEL);
6896         if (!data)
6897                 goto e_unpin_memory;
6898
6899         offset = params.guest_uaddr & (PAGE_SIZE - 1);
6900         data->guest_address = __sme_page_pa(pages[0]) + offset;
6901         data->guest_len = params.guest_len;
6902
6903         blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
6904         if (IS_ERR(blob)) {
6905                 ret = PTR_ERR(blob);
6906                 goto e_free;
6907         }
6908
6909         data->trans_address = __psp_pa(blob);
6910         data->trans_len = params.trans_len;
6911
6912         hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
6913         if (IS_ERR(hdr)) {
6914                 ret = PTR_ERR(hdr);
6915                 goto e_free_blob;
6916         }
6917         data->hdr_address = __psp_pa(hdr);
6918         data->hdr_len = params.hdr_len;
6919
6920         data->handle = sev->handle;
6921         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
6922
6923         kfree(hdr);
6924
6925 e_free_blob:
6926         kfree(blob);
6927 e_free:
6928         kfree(data);
6929 e_unpin_memory:
6930         sev_unpin_memory(kvm, pages, n);
6931         return ret;
6932 }
6933
6934 static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
6935 {
6936         struct kvm_sev_cmd sev_cmd;
6937         int r;
6938
6939         if (!svm_sev_enabled())
6940                 return -ENOTTY;
6941
6942         if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
6943                 return -EFAULT;
6944
6945         mutex_lock(&kvm->lock);
6946
6947         switch (sev_cmd.id) {
6948         case KVM_SEV_INIT:
6949                 r = sev_guest_init(kvm, &sev_cmd);
6950                 break;
6951         case KVM_SEV_LAUNCH_START:
6952                 r = sev_launch_start(kvm, &sev_cmd);
6953                 break;
6954         case KVM_SEV_LAUNCH_UPDATE_DATA:
6955                 r = sev_launch_update_data(kvm, &sev_cmd);
6956                 break;
6957         case KVM_SEV_LAUNCH_MEASURE:
6958                 r = sev_launch_measure(kvm, &sev_cmd);
6959                 break;
6960         case KVM_SEV_LAUNCH_FINISH:
6961                 r = sev_launch_finish(kvm, &sev_cmd);
6962                 break;
6963         case KVM_SEV_GUEST_STATUS:
6964                 r = sev_guest_status(kvm, &sev_cmd);
6965                 break;
6966         case KVM_SEV_DBG_DECRYPT:
6967                 r = sev_dbg_crypt(kvm, &sev_cmd, true);
6968                 break;
6969         case KVM_SEV_DBG_ENCRYPT:
6970                 r = sev_dbg_crypt(kvm, &sev_cmd, false);
6971                 break;
6972         case KVM_SEV_LAUNCH_SECRET:
6973                 r = sev_launch_secret(kvm, &sev_cmd);
6974                 break;
6975         default:
6976                 r = -EINVAL;
6977                 goto out;
6978         }
6979
6980         if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
6981                 r = -EFAULT;
6982
6983 out:
6984         mutex_unlock(&kvm->lock);
6985         return r;
6986 }
6987
6988 static int svm_register_enc_region(struct kvm *kvm,
6989                                    struct kvm_enc_region *range)
6990 {
6991         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6992         struct enc_region *region;
6993         int ret = 0;
6994
6995         if (!sev_guest(kvm))
6996                 return -ENOTTY;
6997
6998         if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
6999                 return -EINVAL;
7000
7001         region = kzalloc(sizeof(*region), GFP_KERNEL);
7002         if (!region)
7003                 return -ENOMEM;
7004
7005         region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
7006         if (!region->pages) {
7007                 ret = -ENOMEM;
7008                 goto e_free;
7009         }
7010
7011         /*
7012          * The guest may change the memory encryption attribute from C=0 -> C=1
7013          * or vice versa for this memory range. Lets make sure caches are
7014          * flushed to ensure that guest data gets written into memory with
7015          * correct C-bit.
7016          */
7017         sev_clflush_pages(region->pages, region->npages);
7018
7019         region->uaddr = range->addr;
7020         region->size = range->size;
7021
7022         mutex_lock(&kvm->lock);
7023         list_add_tail(&region->list, &sev->regions_list);
7024         mutex_unlock(&kvm->lock);
7025
7026         return ret;
7027
7028 e_free:
7029         kfree(region);
7030         return ret;
7031 }
7032
7033 static struct enc_region *
7034 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
7035 {
7036         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
7037         struct list_head *head = &sev->regions_list;
7038         struct enc_region *i;
7039
7040         list_for_each_entry(i, head, list) {
7041                 if (i->uaddr == range->addr &&
7042                     i->size == range->size)
7043                         return i;
7044         }
7045
7046         return NULL;
7047 }
7048
7049
7050 static int svm_unregister_enc_region(struct kvm *kvm,
7051                                      struct kvm_enc_region *range)
7052 {
7053         struct enc_region *region;
7054         int ret;
7055
7056         mutex_lock(&kvm->lock);
7057
7058         if (!sev_guest(kvm)) {
7059                 ret = -ENOTTY;
7060                 goto failed;
7061         }
7062
7063         region = find_enc_region(kvm, range);
7064         if (!region) {
7065                 ret = -EINVAL;
7066                 goto failed;
7067         }
7068
7069         __unregister_enc_region_locked(kvm, region);
7070
7071         mutex_unlock(&kvm->lock);
7072         return 0;
7073
7074 failed:
7075         mutex_unlock(&kvm->lock);
7076         return ret;
7077 }
7078
7079 static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
7080 {
7081         /* Not supported */
7082         return 0;
7083 }
7084
7085 static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
7086                                    uint16_t *vmcs_version)
7087 {
7088         /* Intel-only feature */
7089         return -ENODEV;
7090 }
7091
7092 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7093         .cpu_has_kvm_support = has_svm,
7094         .disabled_by_bios = is_disabled,
7095         .hardware_setup = svm_hardware_setup,
7096         .hardware_unsetup = svm_hardware_unsetup,
7097         .check_processor_compatibility = svm_check_processor_compat,
7098         .hardware_enable = svm_hardware_enable,
7099         .hardware_disable = svm_hardware_disable,
7100         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
7101         .has_emulated_msr = svm_has_emulated_msr,
7102
7103         .vcpu_create = svm_create_vcpu,
7104         .vcpu_free = svm_free_vcpu,
7105         .vcpu_reset = svm_vcpu_reset,
7106
7107         .vm_alloc = svm_vm_alloc,
7108         .vm_free = svm_vm_free,
7109         .vm_init = avic_vm_init,
7110         .vm_destroy = svm_vm_destroy,
7111
7112         .prepare_guest_switch = svm_prepare_guest_switch,
7113         .vcpu_load = svm_vcpu_load,
7114         .vcpu_put = svm_vcpu_put,
7115         .vcpu_blocking = svm_vcpu_blocking,
7116         .vcpu_unblocking = svm_vcpu_unblocking,
7117
7118         .update_bp_intercept = update_bp_intercept,
7119         .get_msr_feature = svm_get_msr_feature,
7120         .get_msr = svm_get_msr,
7121         .set_msr = svm_set_msr,
7122         .get_segment_base = svm_get_segment_base,
7123         .get_segment = svm_get_segment,
7124         .set_segment = svm_set_segment,
7125         .get_cpl = svm_get_cpl,
7126         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
7127         .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
7128         .decache_cr3 = svm_decache_cr3,
7129         .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
7130         .set_cr0 = svm_set_cr0,
7131         .set_cr3 = svm_set_cr3,
7132         .set_cr4 = svm_set_cr4,
7133         .set_efer = svm_set_efer,
7134         .get_idt = svm_get_idt,
7135         .set_idt = svm_set_idt,
7136         .get_gdt = svm_get_gdt,
7137         .set_gdt = svm_set_gdt,
7138         .get_dr6 = svm_get_dr6,
7139         .set_dr6 = svm_set_dr6,
7140         .set_dr7 = svm_set_dr7,
7141         .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
7142         .cache_reg = svm_cache_reg,
7143         .get_rflags = svm_get_rflags,
7144         .set_rflags = svm_set_rflags,
7145
7146         .tlb_flush = svm_flush_tlb,
7147         .tlb_flush_gva = svm_flush_tlb_gva,
7148
7149         .run = svm_vcpu_run,
7150         .handle_exit = handle_exit,
7151         .skip_emulated_instruction = skip_emulated_instruction,
7152         .set_interrupt_shadow = svm_set_interrupt_shadow,
7153         .get_interrupt_shadow = svm_get_interrupt_shadow,
7154         .patch_hypercall = svm_patch_hypercall,
7155         .set_irq = svm_set_irq,
7156         .set_nmi = svm_inject_nmi,
7157         .queue_exception = svm_queue_exception,
7158         .cancel_injection = svm_cancel_injection,
7159         .interrupt_allowed = svm_interrupt_allowed,
7160         .nmi_allowed = svm_nmi_allowed,
7161         .get_nmi_mask = svm_get_nmi_mask,
7162         .set_nmi_mask = svm_set_nmi_mask,
7163         .enable_nmi_window = enable_nmi_window,
7164         .enable_irq_window = enable_irq_window,
7165         .update_cr8_intercept = update_cr8_intercept,
7166         .set_virtual_apic_mode = svm_set_virtual_apic_mode,
7167         .get_enable_apicv = svm_get_enable_apicv,
7168         .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
7169         .load_eoi_exitmap = svm_load_eoi_exitmap,
7170         .hwapic_irr_update = svm_hwapic_irr_update,
7171         .hwapic_isr_update = svm_hwapic_isr_update,
7172         .sync_pir_to_irr = kvm_lapic_find_highest_irr,
7173         .apicv_post_state_restore = avic_post_state_restore,
7174
7175         .set_tss_addr = svm_set_tss_addr,
7176         .set_identity_map_addr = svm_set_identity_map_addr,
7177         .get_tdp_level = get_npt_level,
7178         .get_mt_mask = svm_get_mt_mask,
7179
7180         .get_exit_info = svm_get_exit_info,
7181
7182         .get_lpage_level = svm_get_lpage_level,
7183
7184         .cpuid_update = svm_cpuid_update,
7185
7186         .rdtscp_supported = svm_rdtscp_supported,
7187         .invpcid_supported = svm_invpcid_supported,
7188         .mpx_supported = svm_mpx_supported,
7189         .xsaves_supported = svm_xsaves_supported,
7190         .umip_emulated = svm_umip_emulated,
7191
7192         .set_supported_cpuid = svm_set_supported_cpuid,
7193
7194         .has_wbinvd_exit = svm_has_wbinvd_exit,
7195
7196         .read_l1_tsc_offset = svm_read_l1_tsc_offset,
7197         .write_l1_tsc_offset = svm_write_l1_tsc_offset,
7198
7199         .set_tdp_cr3 = set_tdp_cr3,
7200
7201         .check_intercept = svm_check_intercept,
7202         .handle_external_intr = svm_handle_external_intr,
7203
7204         .request_immediate_exit = __kvm_request_immediate_exit,
7205
7206         .sched_in = svm_sched_in,
7207
7208         .pmu_ops = &amd_pmu_ops,
7209         .deliver_posted_interrupt = svm_deliver_avic_intr,
7210         .update_pi_irte = svm_update_pi_irte,
7211         .setup_mce = svm_setup_mce,
7212
7213         .smi_allowed = svm_smi_allowed,
7214         .pre_enter_smm = svm_pre_enter_smm,
7215         .pre_leave_smm = svm_pre_leave_smm,
7216         .enable_smi_window = enable_smi_window,
7217
7218         .mem_enc_op = svm_mem_enc_op,
7219         .mem_enc_reg_region = svm_register_enc_region,
7220         .mem_enc_unreg_region = svm_unregister_enc_region,
7221
7222         .nested_enable_evmcs = nested_enable_evmcs,
7223         .nested_get_evmcs_version = nested_get_evmcs_version,
7224 };
7225
7226 static int __init svm_init(void)
7227 {
7228         return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
7229                         __alignof__(struct vcpu_svm), THIS_MODULE);
7230 }
7231
7232 static void __exit svm_exit(void)
7233 {
7234         kvm_exit();
7235 }
7236
7237 module_init(svm_init)
7238 module_exit(svm_exit)