]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/kvm/cpuid.c
KVM: x86: expose AVX512_BF16 feature to guest
[linux.git] / arch / x86 / kvm / cpuid.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  * cpuid support routines
5  *
6  * derived from arch/x86/kvm/x86.c
7  *
8  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9  * Copyright IBM Corporation, 2008
10  */
11
12 #include <linux/kvm_host.h>
13 #include <linux/export.h>
14 #include <linux/vmalloc.h>
15 #include <linux/uaccess.h>
16 #include <linux/sched/stat.h>
17
18 #include <asm/processor.h>
19 #include <asm/user.h>
20 #include <asm/fpu/xstate.h>
21 #include "cpuid.h"
22 #include "lapic.h"
23 #include "mmu.h"
24 #include "trace.h"
25 #include "pmu.h"
26
27 static u32 xstate_required_size(u64 xstate_bv, bool compacted)
28 {
29         int feature_bit = 0;
30         u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
31
32         xstate_bv &= XFEATURE_MASK_EXTEND;
33         while (xstate_bv) {
34                 if (xstate_bv & 0x1) {
35                         u32 eax, ebx, ecx, edx, offset;
36                         cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
37                         offset = compacted ? ret : ebx;
38                         ret = max(ret, offset + eax);
39                 }
40
41                 xstate_bv >>= 1;
42                 feature_bit++;
43         }
44
45         return ret;
46 }
47
48 bool kvm_mpx_supported(void)
49 {
50         return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
51                  && kvm_x86_ops->mpx_supported());
52 }
53 EXPORT_SYMBOL_GPL(kvm_mpx_supported);
54
55 u64 kvm_supported_xcr0(void)
56 {
57         u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
58
59         if (!kvm_mpx_supported())
60                 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
61
62         return xcr0;
63 }
64
65 #define F(x) bit(X86_FEATURE_##x)
66
67 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
68 {
69         struct kvm_cpuid_entry2 *best;
70         struct kvm_lapic *apic = vcpu->arch.apic;
71
72         best = kvm_find_cpuid_entry(vcpu, 1, 0);
73         if (!best)
74                 return 0;
75
76         /* Update OSXSAVE bit */
77         if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) {
78                 best->ecx &= ~F(OSXSAVE);
79                 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
80                         best->ecx |= F(OSXSAVE);
81         }
82
83         best->edx &= ~F(APIC);
84         if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
85                 best->edx |= F(APIC);
86
87         if (apic) {
88                 if (best->ecx & F(TSC_DEADLINE_TIMER))
89                         apic->lapic_timer.timer_mode_mask = 3 << 17;
90                 else
91                         apic->lapic_timer.timer_mode_mask = 1 << 17;
92         }
93
94         best = kvm_find_cpuid_entry(vcpu, 7, 0);
95         if (best) {
96                 /* Update OSPKE bit */
97                 if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) {
98                         best->ecx &= ~F(OSPKE);
99                         if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
100                                 best->ecx |= F(OSPKE);
101                 }
102         }
103
104         best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
105         if (!best) {
106                 vcpu->arch.guest_supported_xcr0 = 0;
107                 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
108         } else {
109                 vcpu->arch.guest_supported_xcr0 =
110                         (best->eax | ((u64)best->edx << 32)) &
111                         kvm_supported_xcr0();
112                 vcpu->arch.guest_xstate_size = best->ebx =
113                         xstate_required_size(vcpu->arch.xcr0, false);
114         }
115
116         best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
117         if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
118                 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
119
120         /*
121          * The existing code assumes virtual address is 48-bit or 57-bit in the
122          * canonical address checks; exit if it is ever changed.
123          */
124         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
125         if (best) {
126                 int vaddr_bits = (best->eax & 0xff00) >> 8;
127
128                 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
129                         return -EINVAL;
130         }
131
132         best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
133         if (kvm_hlt_in_guest(vcpu->kvm) && best &&
134                 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
135                 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
136
137         if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
138                 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
139                 if (best) {
140                         if (vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT)
141                                 best->ecx |= F(MWAIT);
142                         else
143                                 best->ecx &= ~F(MWAIT);
144                 }
145         }
146
147         /* Update physical-address width */
148         vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
149         kvm_mmu_reset_context(vcpu);
150
151         kvm_pmu_refresh(vcpu);
152         return 0;
153 }
154
155 static int is_efer_nx(void)
156 {
157         unsigned long long efer = 0;
158
159         rdmsrl_safe(MSR_EFER, &efer);
160         return efer & EFER_NX;
161 }
162
163 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
164 {
165         int i;
166         struct kvm_cpuid_entry2 *e, *entry;
167
168         entry = NULL;
169         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
170                 e = &vcpu->arch.cpuid_entries[i];
171                 if (e->function == 0x80000001) {
172                         entry = e;
173                         break;
174                 }
175         }
176         if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
177                 entry->edx &= ~F(NX);
178                 printk(KERN_INFO "kvm: guest NX capability removed\n");
179         }
180 }
181
182 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
183 {
184         struct kvm_cpuid_entry2 *best;
185
186         best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
187         if (!best || best->eax < 0x80000008)
188                 goto not_found;
189         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
190         if (best)
191                 return best->eax & 0xff;
192 not_found:
193         return 36;
194 }
195 EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
196
197 /* when an old userspace process fills a new kernel module */
198 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
199                              struct kvm_cpuid *cpuid,
200                              struct kvm_cpuid_entry __user *entries)
201 {
202         int r, i;
203         struct kvm_cpuid_entry *cpuid_entries = NULL;
204
205         r = -E2BIG;
206         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
207                 goto out;
208         r = -ENOMEM;
209         if (cpuid->nent) {
210                 cpuid_entries =
211                         vmalloc(array_size(sizeof(struct kvm_cpuid_entry),
212                                            cpuid->nent));
213                 if (!cpuid_entries)
214                         goto out;
215                 r = -EFAULT;
216                 if (copy_from_user(cpuid_entries, entries,
217                                    cpuid->nent * sizeof(struct kvm_cpuid_entry)))
218                         goto out;
219         }
220         for (i = 0; i < cpuid->nent; i++) {
221                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
222                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
223                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
224                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
225                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
226                 vcpu->arch.cpuid_entries[i].index = 0;
227                 vcpu->arch.cpuid_entries[i].flags = 0;
228                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
229                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
230                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
231         }
232         vcpu->arch.cpuid_nent = cpuid->nent;
233         cpuid_fix_nx_cap(vcpu);
234         kvm_apic_set_version(vcpu);
235         kvm_x86_ops->cpuid_update(vcpu);
236         r = kvm_update_cpuid(vcpu);
237
238 out:
239         vfree(cpuid_entries);
240         return r;
241 }
242
243 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
244                               struct kvm_cpuid2 *cpuid,
245                               struct kvm_cpuid_entry2 __user *entries)
246 {
247         int r;
248
249         r = -E2BIG;
250         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
251                 goto out;
252         r = -EFAULT;
253         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
254                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
255                 goto out;
256         vcpu->arch.cpuid_nent = cpuid->nent;
257         kvm_apic_set_version(vcpu);
258         kvm_x86_ops->cpuid_update(vcpu);
259         r = kvm_update_cpuid(vcpu);
260 out:
261         return r;
262 }
263
264 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
265                               struct kvm_cpuid2 *cpuid,
266                               struct kvm_cpuid_entry2 __user *entries)
267 {
268         int r;
269
270         r = -E2BIG;
271         if (cpuid->nent < vcpu->arch.cpuid_nent)
272                 goto out;
273         r = -EFAULT;
274         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
275                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
276                 goto out;
277         return 0;
278
279 out:
280         cpuid->nent = vcpu->arch.cpuid_nent;
281         return r;
282 }
283
284 static void cpuid_mask(u32 *word, int wordnum)
285 {
286         *word &= boot_cpu_data.x86_capability[wordnum];
287 }
288
289 static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function,
290                            u32 index)
291 {
292         entry->function = function;
293         entry->index = index;
294         entry->flags = 0;
295
296         cpuid_count(entry->function, entry->index,
297                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
298
299         switch (function) {
300         case 2:
301                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
302                 break;
303         case 4:
304         case 7:
305         case 0xb:
306         case 0xd:
307         case 0x14:
308         case 0x8000001d:
309                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
310                 break;
311         }
312 }
313
314 static int __do_cpuid_func_emulated(struct kvm_cpuid_entry2 *entry,
315                                     u32 func, int *nent, int maxnent)
316 {
317         entry->function = func;
318         entry->index = 0;
319         entry->flags = 0;
320
321         switch (func) {
322         case 0:
323                 entry->eax = 7;
324                 ++*nent;
325                 break;
326         case 1:
327                 entry->ecx = F(MOVBE);
328                 ++*nent;
329                 break;
330         case 7:
331                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
332                 entry->eax = 0;
333                 entry->ecx = F(RDPID);
334                 ++*nent;
335         default:
336                 break;
337         }
338
339         return 0;
340 }
341
342 static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
343 {
344         unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
345         unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
346         unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
347         unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
348         unsigned f_la57;
349
350         /* cpuid 7.0.ebx */
351         const u32 kvm_cpuid_7_0_ebx_x86_features =
352                 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
353                 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
354                 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
355                 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
356                 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt;
357
358         /* cpuid 7.0.ecx*/
359         const u32 kvm_cpuid_7_0_ecx_x86_features =
360                 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
361                 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
362                 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
363                 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B);
364
365         /* cpuid 7.0.edx*/
366         const u32 kvm_cpuid_7_0_edx_x86_features =
367                 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
368                 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
369                 F(MD_CLEAR);
370
371         /* cpuid 7.1.eax */
372         const u32 kvm_cpuid_7_1_eax_x86_features =
373                 F(AVX512_BF16);
374
375         switch (index) {
376         case 0:
377                 entry->eax = min(entry->eax, 1u);
378                 entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
379                 cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
380                 /* TSC_ADJUST is emulated */
381                 entry->ebx |= F(TSC_ADJUST);
382
383                 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
384                 f_la57 = entry->ecx & F(LA57);
385                 cpuid_mask(&entry->ecx, CPUID_7_ECX);
386                 /* Set LA57 based on hardware capability. */
387                 entry->ecx |= f_la57;
388                 entry->ecx |= f_umip;
389                 /* PKU is not yet implemented for shadow paging. */
390                 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
391                         entry->ecx &= ~F(PKU);
392
393                 entry->edx &= kvm_cpuid_7_0_edx_x86_features;
394                 cpuid_mask(&entry->edx, CPUID_7_EDX);
395                 /*
396                  * We emulate ARCH_CAPABILITIES in software even
397                  * if the host doesn't support it.
398                  */
399                 entry->edx |= F(ARCH_CAPABILITIES);
400                 break;
401         case 1:
402                 entry->eax &= kvm_cpuid_7_1_eax_x86_features;
403                 entry->ebx = 0;
404                 entry->ecx = 0;
405                 entry->edx = 0;
406                 break;
407         default:
408                 WARN_ON_ONCE(1);
409                 entry->eax = 0;
410                 entry->ebx = 0;
411                 entry->ecx = 0;
412                 entry->edx = 0;
413                 break;
414         }
415 }
416
417 static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
418                                   int *nent, int maxnent)
419 {
420         int r;
421         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
422 #ifdef CONFIG_X86_64
423         unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
424                                 ? F(GBPAGES) : 0;
425         unsigned f_lm = F(LM);
426 #else
427         unsigned f_gbpages = 0;
428         unsigned f_lm = 0;
429 #endif
430         unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
431         unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
432         unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
433
434         /* cpuid 1.edx */
435         const u32 kvm_cpuid_1_edx_x86_features =
436                 F(FPU) | F(VME) | F(DE) | F(PSE) |
437                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
438                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
439                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
440                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
441                 0 /* Reserved, DS, ACPI */ | F(MMX) |
442                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
443                 0 /* HTT, TM, Reserved, PBE */;
444         /* cpuid 0x80000001.edx */
445         const u32 kvm_cpuid_8000_0001_edx_x86_features =
446                 F(FPU) | F(VME) | F(DE) | F(PSE) |
447                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
448                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
449                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
450                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
451                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
452                 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
453                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
454         /* cpuid 1.ecx */
455         const u32 kvm_cpuid_1_ecx_x86_features =
456                 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
457                  * but *not* advertised to guests via CPUID ! */
458                 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
459                 0 /* DS-CPL, VMX, SMX, EST */ |
460                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
461                 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
462                 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
463                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
464                 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
465                 F(F16C) | F(RDRAND);
466         /* cpuid 0x80000001.ecx */
467         const u32 kvm_cpuid_8000_0001_ecx_x86_features =
468                 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
469                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
470                 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
471                 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
472                 F(TOPOEXT) | F(PERFCTR_CORE);
473
474         /* cpuid 0x80000008.ebx */
475         const u32 kvm_cpuid_8000_0008_ebx_x86_features =
476                 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
477                 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON);
478
479         /* cpuid 0xC0000001.edx */
480         const u32 kvm_cpuid_C000_0001_edx_x86_features =
481                 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
482                 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
483                 F(PMM) | F(PMM_EN);
484
485         /* cpuid 0xD.1.eax */
486         const u32 kvm_cpuid_D_1_eax_x86_features =
487                 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
488
489         /* all calls to cpuid_count() should be made on the same cpu */
490         get_cpu();
491
492         r = -E2BIG;
493
494         if (*nent >= maxnent)
495                 goto out;
496
497         do_host_cpuid(entry, function, 0);
498         ++*nent;
499
500         switch (function) {
501         case 0:
502                 /* Limited to the highest leaf implemented in KVM. */
503                 entry->eax = min(entry->eax, 0x1fU);
504                 break;
505         case 1:
506                 entry->edx &= kvm_cpuid_1_edx_x86_features;
507                 cpuid_mask(&entry->edx, CPUID_1_EDX);
508                 entry->ecx &= kvm_cpuid_1_ecx_x86_features;
509                 cpuid_mask(&entry->ecx, CPUID_1_ECX);
510                 /* we support x2apic emulation even if host does not support
511                  * it since we emulate x2apic in software */
512                 entry->ecx |= F(X2APIC);
513                 break;
514         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
515          * may return different values. This forces us to get_cpu() before
516          * issuing the first command, and also to emulate this annoying behavior
517          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
518         case 2: {
519                 int t, times = entry->eax & 0xff;
520
521                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
522                 for (t = 1; t < times; ++t) {
523                         if (*nent >= maxnent)
524                                 goto out;
525
526                         do_host_cpuid(&entry[t], function, 0);
527                         ++*nent;
528                 }
529                 break;
530         }
531         /* functions 4 and 0x8000001d have additional index. */
532         case 4:
533         case 0x8000001d: {
534                 int i, cache_type;
535
536                 /* read more entries until cache_type is zero */
537                 for (i = 1; ; ++i) {
538                         if (*nent >= maxnent)
539                                 goto out;
540
541                         cache_type = entry[i - 1].eax & 0x1f;
542                         if (!cache_type)
543                                 break;
544                         do_host_cpuid(&entry[i], function, i);
545                         ++*nent;
546                 }
547                 break;
548         }
549         case 6: /* Thermal management */
550                 entry->eax = 0x4; /* allow ARAT */
551                 entry->ebx = 0;
552                 entry->ecx = 0;
553                 entry->edx = 0;
554                 break;
555         /* function 7 has additional index. */
556         case 7: {
557                 int i;
558
559                 for (i = 0; ; ) {
560                         do_cpuid_7_mask(&entry[i], i);
561                         if (i == entry->eax)
562                                 break;
563                         if (*nent >= maxnent)
564                                 goto out;
565
566                         ++i;
567                         do_host_cpuid(&entry[i], function, i);
568                         ++*nent;
569                 }
570                 break;
571         }
572         case 9:
573                 break;
574         case 0xa: { /* Architectural Performance Monitoring */
575                 struct x86_pmu_capability cap;
576                 union cpuid10_eax eax;
577                 union cpuid10_edx edx;
578
579                 perf_get_x86_pmu_capability(&cap);
580
581                 /*
582                  * Only support guest architectural pmu on a host
583                  * with architectural pmu.
584                  */
585                 if (!cap.version)
586                         memset(&cap, 0, sizeof(cap));
587
588                 eax.split.version_id = min(cap.version, 2);
589                 eax.split.num_counters = cap.num_counters_gp;
590                 eax.split.bit_width = cap.bit_width_gp;
591                 eax.split.mask_length = cap.events_mask_len;
592
593                 edx.split.num_counters_fixed = cap.num_counters_fixed;
594                 edx.split.bit_width_fixed = cap.bit_width_fixed;
595                 edx.split.reserved = 0;
596
597                 entry->eax = eax.full;
598                 entry->ebx = cap.events_mask;
599                 entry->ecx = 0;
600                 entry->edx = edx.full;
601                 break;
602         }
603         /*
604          * Per Intel's SDM, the 0x1f is a superset of 0xb,
605          * thus they can be handled by common code.
606          */
607         case 0x1f:
608         case 0xb: {
609                 int i, level_type;
610
611                 /* read more entries until level_type is zero */
612                 for (i = 1; ; ++i) {
613                         if (*nent >= maxnent)
614                                 goto out;
615
616                         level_type = entry[i - 1].ecx & 0xff00;
617                         if (!level_type)
618                                 break;
619                         do_host_cpuid(&entry[i], function, i);
620                         ++*nent;
621                 }
622                 break;
623         }
624         case 0xd: {
625                 int idx, i;
626                 u64 supported = kvm_supported_xcr0();
627
628                 entry->eax &= supported;
629                 entry->ebx = xstate_required_size(supported, false);
630                 entry->ecx = entry->ebx;
631                 entry->edx &= supported >> 32;
632                 if (!supported)
633                         break;
634
635                 for (idx = 1, i = 1; idx < 64; ++idx) {
636                         u64 mask = ((u64)1 << idx);
637                         if (*nent >= maxnent)
638                                 goto out;
639
640                         do_host_cpuid(&entry[i], function, idx);
641                         if (idx == 1) {
642                                 entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
643                                 cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
644                                 entry[i].ebx = 0;
645                                 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
646                                         entry[i].ebx =
647                                                 xstate_required_size(supported,
648                                                                      true);
649                         } else {
650                                 if (entry[i].eax == 0 || !(supported & mask))
651                                         continue;
652                                 if (WARN_ON_ONCE(entry[i].ecx & 1))
653                                         continue;
654                         }
655                         entry[i].ecx = 0;
656                         entry[i].edx = 0;
657                         ++*nent;
658                         ++i;
659                 }
660                 break;
661         }
662         /* Intel PT */
663         case 0x14: {
664                 int t, times = entry->eax;
665
666                 if (!f_intel_pt)
667                         break;
668
669                 for (t = 1; t <= times; ++t) {
670                         if (*nent >= maxnent)
671                                 goto out;
672                         do_host_cpuid(&entry[t], function, t);
673                         ++*nent;
674                 }
675                 break;
676         }
677         case KVM_CPUID_SIGNATURE: {
678                 static const char signature[12] = "KVMKVMKVM\0\0";
679                 const u32 *sigptr = (const u32 *)signature;
680                 entry->eax = KVM_CPUID_FEATURES;
681                 entry->ebx = sigptr[0];
682                 entry->ecx = sigptr[1];
683                 entry->edx = sigptr[2];
684                 break;
685         }
686         case KVM_CPUID_FEATURES:
687                 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
688                              (1 << KVM_FEATURE_NOP_IO_DELAY) |
689                              (1 << KVM_FEATURE_CLOCKSOURCE2) |
690                              (1 << KVM_FEATURE_ASYNC_PF) |
691                              (1 << KVM_FEATURE_PV_EOI) |
692                              (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
693                              (1 << KVM_FEATURE_PV_UNHALT) |
694                              (1 << KVM_FEATURE_PV_TLB_FLUSH) |
695                              (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
696                              (1 << KVM_FEATURE_PV_SEND_IPI) |
697                              (1 << KVM_FEATURE_POLL_CONTROL) |
698                              (1 << KVM_FEATURE_PV_SCHED_YIELD);
699
700                 if (sched_info_on())
701                         entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
702
703                 entry->ebx = 0;
704                 entry->ecx = 0;
705                 entry->edx = 0;
706                 break;
707         case 0x80000000:
708                 entry->eax = min(entry->eax, 0x8000001f);
709                 break;
710         case 0x80000001:
711                 entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
712                 cpuid_mask(&entry->edx, CPUID_8000_0001_EDX);
713                 entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features;
714                 cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX);
715                 break;
716         case 0x80000007: /* Advanced power management */
717                 /* invariant TSC is CPUID.80000007H:EDX[8] */
718                 entry->edx &= (1 << 8);
719                 /* mask against host */
720                 entry->edx &= boot_cpu_data.x86_power;
721                 entry->eax = entry->ebx = entry->ecx = 0;
722                 break;
723         case 0x80000008: {
724                 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
725                 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
726                 unsigned phys_as = entry->eax & 0xff;
727
728                 if (!g_phys_as)
729                         g_phys_as = phys_as;
730                 entry->eax = g_phys_as | (virt_as << 8);
731                 entry->edx = 0;
732                 /*
733                  * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
734                  * hardware cpuid
735                  */
736                 if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
737                         entry->ebx |= F(AMD_IBPB);
738                 if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
739                         entry->ebx |= F(AMD_IBRS);
740                 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
741                         entry->ebx |= F(VIRT_SSBD);
742                 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
743                 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
744                 /*
745                  * The preference is to use SPEC CTRL MSR instead of the
746                  * VIRT_SPEC MSR.
747                  */
748                 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
749                     !boot_cpu_has(X86_FEATURE_AMD_SSBD))
750                         entry->ebx |= F(VIRT_SSBD);
751                 break;
752         }
753         case 0x80000019:
754                 entry->ecx = entry->edx = 0;
755                 break;
756         case 0x8000001a:
757         case 0x8000001e:
758                 break;
759         /*Add support for Centaur's CPUID instruction*/
760         case 0xC0000000:
761                 /*Just support up to 0xC0000004 now*/
762                 entry->eax = min(entry->eax, 0xC0000004);
763                 break;
764         case 0xC0000001:
765                 entry->edx &= kvm_cpuid_C000_0001_edx_x86_features;
766                 cpuid_mask(&entry->edx, CPUID_C000_0001_EDX);
767                 break;
768         case 3: /* Processor serial number */
769         case 5: /* MONITOR/MWAIT */
770         case 0xC0000002:
771         case 0xC0000003:
772         case 0xC0000004:
773         default:
774                 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
775                 break;
776         }
777
778         kvm_x86_ops->set_supported_cpuid(function, entry);
779
780         r = 0;
781
782 out:
783         put_cpu();
784
785         return r;
786 }
787
788 static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func,
789                          int *nent, int maxnent, unsigned int type)
790 {
791         if (type == KVM_GET_EMULATED_CPUID)
792                 return __do_cpuid_func_emulated(entry, func, nent, maxnent);
793
794         return __do_cpuid_func(entry, func, nent, maxnent);
795 }
796
797 #undef F
798
799 struct kvm_cpuid_param {
800         u32 func;
801         bool (*qualifier)(const struct kvm_cpuid_param *param);
802 };
803
804 static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
805 {
806         return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
807 }
808
809 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
810                                  __u32 num_entries, unsigned int ioctl_type)
811 {
812         int i;
813         __u32 pad[3];
814
815         if (ioctl_type != KVM_GET_EMULATED_CPUID)
816                 return false;
817
818         /*
819          * We want to make sure that ->padding is being passed clean from
820          * userspace in case we want to use it for something in the future.
821          *
822          * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
823          * have to give ourselves satisfied only with the emulated side. /me
824          * sheds a tear.
825          */
826         for (i = 0; i < num_entries; i++) {
827                 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
828                         return true;
829
830                 if (pad[0] || pad[1] || pad[2])
831                         return true;
832         }
833         return false;
834 }
835
836 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
837                             struct kvm_cpuid_entry2 __user *entries,
838                             unsigned int type)
839 {
840         struct kvm_cpuid_entry2 *cpuid_entries;
841         int limit, nent = 0, r = -E2BIG, i;
842         u32 func;
843         static const struct kvm_cpuid_param param[] = {
844                 { .func = 0 },
845                 { .func = 0x80000000 },
846                 { .func = 0xC0000000, .qualifier = is_centaur_cpu },
847                 { .func = KVM_CPUID_SIGNATURE },
848         };
849
850         if (cpuid->nent < 1)
851                 goto out;
852         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
853                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
854
855         if (sanity_check_entries(entries, cpuid->nent, type))
856                 return -EINVAL;
857
858         r = -ENOMEM;
859         cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
860                                            cpuid->nent));
861         if (!cpuid_entries)
862                 goto out;
863
864         r = 0;
865         for (i = 0; i < ARRAY_SIZE(param); i++) {
866                 const struct kvm_cpuid_param *ent = &param[i];
867
868                 if (ent->qualifier && !ent->qualifier(ent))
869                         continue;
870
871                 r = do_cpuid_func(&cpuid_entries[nent], ent->func,
872                                   &nent, cpuid->nent, type);
873
874                 if (r)
875                         goto out_free;
876
877                 limit = cpuid_entries[nent - 1].eax;
878                 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
879                         r = do_cpuid_func(&cpuid_entries[nent], func,
880                                           &nent, cpuid->nent, type);
881
882                 if (r)
883                         goto out_free;
884         }
885
886         r = -EFAULT;
887         if (copy_to_user(entries, cpuid_entries,
888                          nent * sizeof(struct kvm_cpuid_entry2)))
889                 goto out_free;
890         cpuid->nent = nent;
891         r = 0;
892
893 out_free:
894         vfree(cpuid_entries);
895 out:
896         return r;
897 }
898
899 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
900 {
901         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
902         struct kvm_cpuid_entry2 *ej;
903         int j = i;
904         int nent = vcpu->arch.cpuid_nent;
905
906         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
907         /* when no next entry is found, the current entry[i] is reselected */
908         do {
909                 j = (j + 1) % nent;
910                 ej = &vcpu->arch.cpuid_entries[j];
911         } while (ej->function != e->function);
912
913         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
914
915         return j;
916 }
917
918 /* find an entry with matching function, matching index (if needed), and that
919  * should be read next (if it's stateful) */
920 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
921         u32 function, u32 index)
922 {
923         if (e->function != function)
924                 return 0;
925         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
926                 return 0;
927         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
928             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
929                 return 0;
930         return 1;
931 }
932
933 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
934                                               u32 function, u32 index)
935 {
936         int i;
937         struct kvm_cpuid_entry2 *best = NULL;
938
939         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
940                 struct kvm_cpuid_entry2 *e;
941
942                 e = &vcpu->arch.cpuid_entries[i];
943                 if (is_matching_cpuid_entry(e, function, index)) {
944                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
945                                 move_to_next_stateful_cpuid_entry(vcpu, i);
946                         best = e;
947                         break;
948                 }
949         }
950         return best;
951 }
952 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
953
954 /*
955  * If no match is found, check whether we exceed the vCPU's limit
956  * and return the content of the highest valid _standard_ leaf instead.
957  * This is to satisfy the CPUID specification.
958  */
959 static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
960                                                   u32 function, u32 index)
961 {
962         struct kvm_cpuid_entry2 *maxlevel;
963
964         maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
965         if (!maxlevel || maxlevel->eax >= function)
966                 return NULL;
967         if (function & 0x80000000) {
968                 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
969                 if (!maxlevel)
970                         return NULL;
971         }
972         return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
973 }
974
975 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
976                u32 *ecx, u32 *edx, bool check_limit)
977 {
978         u32 function = *eax, index = *ecx;
979         struct kvm_cpuid_entry2 *best;
980         bool entry_found = true;
981
982         best = kvm_find_cpuid_entry(vcpu, function, index);
983
984         if (!best) {
985                 entry_found = false;
986                 if (!check_limit)
987                         goto out;
988
989                 best = check_cpuid_limit(vcpu, function, index);
990         }
991
992 out:
993         if (best) {
994                 *eax = best->eax;
995                 *ebx = best->ebx;
996                 *ecx = best->ecx;
997                 *edx = best->edx;
998         } else
999                 *eax = *ebx = *ecx = *edx = 0;
1000         trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found);
1001         return entry_found;
1002 }
1003 EXPORT_SYMBOL_GPL(kvm_cpuid);
1004
1005 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1006 {
1007         u32 eax, ebx, ecx, edx;
1008
1009         if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1010                 return 1;
1011
1012         eax = kvm_rax_read(vcpu);
1013         ecx = kvm_rcx_read(vcpu);
1014         kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
1015         kvm_rax_write(vcpu, eax);
1016         kvm_rbx_write(vcpu, ebx);
1017         kvm_rcx_write(vcpu, ecx);
1018         kvm_rdx_write(vcpu, edx);
1019         return kvm_skip_emulated_instruction(vcpu);
1020 }
1021 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);