2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/sched/signal.h>
28 #include <linux/slab.h>
29 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/irqbypass.h>
32 #include <linux/kvm_irqfd.h>
33 #include <asm/cputable.h>
34 #include <linux/uaccess.h>
35 #include <asm/kvm_ppc.h>
36 #include <asm/tlbflush.h>
37 #include <asm/cputhreads.h>
38 #include <asm/irqflags.h>
39 #include <asm/iommu.h>
40 #include <asm/switch_to.h>
42 #ifdef CONFIG_PPC_PSERIES
43 #include <asm/hvcall.h>
44 #include <asm/plpar_wrappers.h>
49 #include "../mm/mmu_decl.h"
51 #define CREATE_TRACE_POINTS
54 struct kvmppc_ops *kvmppc_hv_ops;
55 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
56 struct kvmppc_ops *kvmppc_pr_ops;
57 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
60 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
62 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
65 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
70 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
76 * Common checks before entering the guest world. Call with interrupts
81 * == 1 if we're ready to go into guest state
82 * <= 0 if we need to go back to the host with return value
84 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
88 WARN_ON(irqs_disabled());
99 if (signal_pending(current)) {
100 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
101 vcpu->run->exit_reason = KVM_EXIT_INTR;
106 vcpu->mode = IN_GUEST_MODE;
109 * Reading vcpu->requests must happen after setting vcpu->mode,
110 * so we don't miss a request because the requester sees
111 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
112 * before next entering the guest (and thus doesn't IPI).
113 * This also orders the write to mode from any reads
114 * to the page tables done while the VCPU is running.
115 * Please see the comment in kvm_flush_remote_tlbs.
119 if (kvm_request_pending(vcpu)) {
120 /* Make sure we process requests preemptable */
122 trace_kvm_check_requests(vcpu);
123 r = kvmppc_core_check_requests(vcpu);
130 if (kvmppc_core_prepare_to_enter(vcpu)) {
131 /* interrupts got enabled in between, so we
132 are back at square 1 */
136 guest_enter_irqoff();
144 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
146 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
147 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
149 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
152 shared->sprg0 = swab64(shared->sprg0);
153 shared->sprg1 = swab64(shared->sprg1);
154 shared->sprg2 = swab64(shared->sprg2);
155 shared->sprg3 = swab64(shared->sprg3);
156 shared->srr0 = swab64(shared->srr0);
157 shared->srr1 = swab64(shared->srr1);
158 shared->dar = swab64(shared->dar);
159 shared->msr = swab64(shared->msr);
160 shared->dsisr = swab32(shared->dsisr);
161 shared->int_pending = swab32(shared->int_pending);
162 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
163 shared->sr[i] = swab32(shared->sr[i]);
167 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
169 int nr = kvmppc_get_gpr(vcpu, 11);
171 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
172 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
173 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
174 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
175 unsigned long r2 = 0;
177 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
179 param1 &= 0xffffffff;
180 param2 &= 0xffffffff;
181 param3 &= 0xffffffff;
182 param4 &= 0xffffffff;
186 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
188 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
189 /* Book3S can be little endian, find it out here */
190 int shared_big_endian = true;
191 if (vcpu->arch.intr_msr & MSR_LE)
192 shared_big_endian = false;
193 if (shared_big_endian != vcpu->arch.shared_big_endian)
194 kvmppc_swab_shared(vcpu);
195 vcpu->arch.shared_big_endian = shared_big_endian;
198 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
200 * Older versions of the Linux magic page code had
201 * a bug where they would map their trampoline code
202 * NX. If that's the case, remove !PR NX capability.
204 vcpu->arch.disable_kernel_nx = true;
205 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
208 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
209 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
211 #ifdef CONFIG_PPC_64K_PAGES
213 * Make sure our 4k magic page is in the same window of a 64k
214 * page within the guest and within the host's page.
216 if ((vcpu->arch.magic_page_pa & 0xf000) !=
217 ((ulong)vcpu->arch.shared & 0xf000)) {
218 void *old_shared = vcpu->arch.shared;
219 ulong shared = (ulong)vcpu->arch.shared;
223 shared |= vcpu->arch.magic_page_pa & 0xf000;
224 new_shared = (void*)shared;
225 memcpy(new_shared, old_shared, 0x1000);
226 vcpu->arch.shared = new_shared;
230 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
235 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
237 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
238 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
241 /* Second return value is in r4 */
243 case EV_HCALL_TOKEN(EV_IDLE):
245 kvm_vcpu_block(vcpu);
246 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
249 r = EV_UNIMPLEMENTED;
253 kvmppc_set_gpr(vcpu, 4, r2);
257 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
259 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
263 /* We have to know what CPU to virtualize */
267 /* PAPR only works with book3s_64 */
268 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
271 /* HV KVM can only do PAPR mode for now */
272 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
275 #ifdef CONFIG_KVM_BOOKE_HV
276 if (!cpu_has_feature(CPU_FTR_EMB_HV))
284 return r ? 0 : -EINVAL;
286 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
288 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
290 enum emulation_result er;
293 er = kvmppc_emulate_loadstore(vcpu);
296 /* Future optimization: only reload non-volatiles if they were
297 * actually modified. */
303 case EMULATE_DO_MMIO:
304 run->exit_reason = KVM_EXIT_MMIO;
305 /* We must reload nonvolatiles because "update" load/store
306 * instructions modify register state. */
307 /* Future optimization: only reload non-volatiles if they were
308 * actually modified. */
315 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
316 /* XXX Deliver Program interrupt to guest. */
317 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
328 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
330 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
333 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
334 struct kvmppc_pte pte;
339 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
349 /* Magic page override */
350 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
351 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
352 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
353 void *magic = vcpu->arch.shared;
354 magic += pte.eaddr & 0xfff;
355 memcpy(magic, ptr, size);
359 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
360 return EMULATE_DO_MMIO;
364 EXPORT_SYMBOL_GPL(kvmppc_st);
366 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
369 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
370 struct kvmppc_pte pte;
375 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
385 if (!data && !pte.may_execute)
388 /* Magic page override */
389 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
390 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
391 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
392 void *magic = vcpu->arch.shared;
393 magic += pte.eaddr & 0xfff;
394 memcpy(ptr, magic, size);
398 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
399 return EMULATE_DO_MMIO;
403 EXPORT_SYMBOL_GPL(kvmppc_ld);
405 int kvm_arch_hardware_enable(void)
410 int kvm_arch_hardware_setup(void)
415 void kvm_arch_check_processor_compat(void *rtn)
417 *(int *)rtn = kvmppc_core_check_processor_compat();
420 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
422 struct kvmppc_ops *kvm_ops = NULL;
424 * if we have both HV and PR enabled, default is HV
428 kvm_ops = kvmppc_hv_ops;
430 kvm_ops = kvmppc_pr_ops;
433 } else if (type == KVM_VM_PPC_HV) {
436 kvm_ops = kvmppc_hv_ops;
437 } else if (type == KVM_VM_PPC_PR) {
440 kvm_ops = kvmppc_pr_ops;
444 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
447 kvm->arch.kvm_ops = kvm_ops;
448 return kvmppc_core_init_vm(kvm);
453 bool kvm_arch_has_vcpu_debugfs(void)
458 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
463 void kvm_arch_destroy_vm(struct kvm *kvm)
466 struct kvm_vcpu *vcpu;
468 #ifdef CONFIG_KVM_XICS
470 * We call kick_all_cpus_sync() to ensure that all
471 * CPUs have executed any pending IPIs before we
472 * continue and free VCPUs structures below.
474 if (is_kvmppc_hv_enabled(kvm))
475 kick_all_cpus_sync();
478 kvm_for_each_vcpu(i, vcpu, kvm)
479 kvm_arch_vcpu_free(vcpu);
481 mutex_lock(&kvm->lock);
482 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
483 kvm->vcpus[i] = NULL;
485 atomic_set(&kvm->online_vcpus, 0);
487 kvmppc_core_destroy_vm(kvm);
489 mutex_unlock(&kvm->lock);
491 /* drop the module reference */
492 module_put(kvm->arch.kvm_ops->owner);
495 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
498 /* Assume we're using HV mode when the HV module is loaded */
499 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
503 * Hooray - we know which VM type we're running on. Depend on
504 * that rather than the guess above.
506 hv_enabled = is_kvmppc_hv_enabled(kvm);
511 case KVM_CAP_PPC_BOOKE_SREGS:
512 case KVM_CAP_PPC_BOOKE_WATCHDOG:
513 case KVM_CAP_PPC_EPR:
515 case KVM_CAP_PPC_SEGSTATE:
516 case KVM_CAP_PPC_HIOR:
517 case KVM_CAP_PPC_PAPR:
519 case KVM_CAP_PPC_UNSET_IRQ:
520 case KVM_CAP_PPC_IRQ_LEVEL:
521 case KVM_CAP_ENABLE_CAP:
522 case KVM_CAP_ENABLE_CAP_VM:
523 case KVM_CAP_ONE_REG:
524 case KVM_CAP_IOEVENTFD:
525 case KVM_CAP_DEVICE_CTRL:
526 case KVM_CAP_IMMEDIATE_EXIT:
529 case KVM_CAP_PPC_PAIRED_SINGLES:
530 case KVM_CAP_PPC_OSI:
531 case KVM_CAP_PPC_GET_PVINFO:
532 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
535 /* We support this only for PR */
538 #ifdef CONFIG_KVM_MPIC
539 case KVM_CAP_IRQ_MPIC:
544 #ifdef CONFIG_PPC_BOOK3S_64
545 case KVM_CAP_SPAPR_TCE:
546 case KVM_CAP_SPAPR_TCE_64:
548 case KVM_CAP_SPAPR_TCE_VFIO:
549 case KVM_CAP_PPC_RTAS:
550 case KVM_CAP_PPC_FIXUP_HCALL:
551 case KVM_CAP_PPC_ENABLE_HCALL:
552 #ifdef CONFIG_KVM_XICS
553 case KVM_CAP_IRQ_XICS:
555 case KVM_CAP_PPC_GET_CPU_CHAR:
559 case KVM_CAP_PPC_ALLOC_HTAB:
562 #endif /* CONFIG_PPC_BOOK3S_64 */
563 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
564 case KVM_CAP_PPC_SMT:
567 if (kvm->arch.emul_smt_mode > 1)
568 r = kvm->arch.emul_smt_mode;
570 r = kvm->arch.smt_mode;
571 } else if (hv_enabled) {
572 if (cpu_has_feature(CPU_FTR_ARCH_300))
575 r = threads_per_subcore;
578 case KVM_CAP_PPC_SMT_POSSIBLE:
581 if (!cpu_has_feature(CPU_FTR_ARCH_300))
582 r = ((threads_per_subcore << 1) - 1);
584 /* P9 can emulate dbells, so allow any mode */
588 case KVM_CAP_PPC_RMA:
591 case KVM_CAP_PPC_HWRNG:
592 r = kvmppc_hwrng_present();
594 case KVM_CAP_PPC_MMU_RADIX:
595 r = !!(hv_enabled && radix_enabled());
597 case KVM_CAP_PPC_MMU_HASH_V3:
598 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300));
601 case KVM_CAP_SYNC_MMU:
602 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
604 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
610 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
611 case KVM_CAP_PPC_HTAB_FD:
615 case KVM_CAP_NR_VCPUS:
617 * Recommending a number of CPUs is somewhat arbitrary; we
618 * return the number of present CPUs for -HV (since a host
619 * will have secondary threads "offline"), and for other KVM
620 * implementations just count online CPUs.
623 r = num_present_cpus();
625 r = num_online_cpus();
627 case KVM_CAP_NR_MEMSLOTS:
628 r = KVM_USER_MEM_SLOTS;
630 case KVM_CAP_MAX_VCPUS:
633 #ifdef CONFIG_PPC_BOOK3S_64
634 case KVM_CAP_PPC_GET_SMMU_INFO:
637 case KVM_CAP_SPAPR_MULTITCE:
640 case KVM_CAP_SPAPR_RESIZE_HPT:
644 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
645 case KVM_CAP_PPC_FWNMI:
649 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
650 case KVM_CAP_PPC_HTM:
651 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
652 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
663 long kvm_arch_dev_ioctl(struct file *filp,
664 unsigned int ioctl, unsigned long arg)
669 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
670 struct kvm_memory_slot *dont)
672 kvmppc_core_free_memslot(kvm, free, dont);
675 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
676 unsigned long npages)
678 return kvmppc_core_create_memslot(kvm, slot, npages);
681 int kvm_arch_prepare_memory_region(struct kvm *kvm,
682 struct kvm_memory_slot *memslot,
683 const struct kvm_userspace_memory_region *mem,
684 enum kvm_mr_change change)
686 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
689 void kvm_arch_commit_memory_region(struct kvm *kvm,
690 const struct kvm_userspace_memory_region *mem,
691 const struct kvm_memory_slot *old,
692 const struct kvm_memory_slot *new,
693 enum kvm_mr_change change)
695 kvmppc_core_commit_memory_region(kvm, mem, old, new);
698 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
699 struct kvm_memory_slot *slot)
701 kvmppc_core_flush_memslot(kvm, slot);
704 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
706 struct kvm_vcpu *vcpu;
707 vcpu = kvmppc_core_vcpu_create(kvm, id);
709 vcpu->arch.wqp = &vcpu->wq;
710 kvmppc_create_vcpu_debugfs(vcpu, id);
715 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
719 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
721 /* Make sure we're not using the vcpu anymore */
722 hrtimer_cancel(&vcpu->arch.dec_timer);
724 kvmppc_remove_vcpu_debugfs(vcpu);
726 switch (vcpu->arch.irq_type) {
727 case KVMPPC_IRQ_MPIC:
728 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
730 case KVMPPC_IRQ_XICS:
732 kvmppc_xive_cleanup_vcpu(vcpu);
734 kvmppc_xics_free_icp(vcpu);
738 kvmppc_core_vcpu_free(vcpu);
741 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
743 kvm_arch_vcpu_free(vcpu);
746 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
748 return kvmppc_core_pending_dec(vcpu);
751 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
753 struct kvm_vcpu *vcpu;
755 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
756 kvmppc_decrementer_func(vcpu);
758 return HRTIMER_NORESTART;
761 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
765 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
766 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
767 vcpu->arch.dec_expires = get_tb();
769 #ifdef CONFIG_KVM_EXIT_TIMING
770 mutex_init(&vcpu->arch.exit_timing_lock);
772 ret = kvmppc_subarch_vcpu_init(vcpu);
776 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
778 kvmppc_mmu_destroy(vcpu);
779 kvmppc_subarch_vcpu_uninit(vcpu);
782 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
786 * vrsave (formerly usprg0) isn't used by Linux, but may
787 * be used by the guest.
789 * On non-booke this is associated with Altivec and
790 * is handled by code in book3s.c.
792 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
794 kvmppc_core_vcpu_load(vcpu, cpu);
797 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
799 kvmppc_core_vcpu_put(vcpu);
801 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
806 * irq_bypass_add_producer and irq_bypass_del_producer are only
807 * useful if the architecture supports PCI passthrough.
808 * irq_bypass_stop and irq_bypass_start are not needed and so
809 * kvm_ops are not defined for them.
811 bool kvm_arch_has_irq_bypass(void)
813 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
814 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
817 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
818 struct irq_bypass_producer *prod)
820 struct kvm_kernel_irqfd *irqfd =
821 container_of(cons, struct kvm_kernel_irqfd, consumer);
822 struct kvm *kvm = irqfd->kvm;
824 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
825 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
830 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
831 struct irq_bypass_producer *prod)
833 struct kvm_kernel_irqfd *irqfd =
834 container_of(cons, struct kvm_kernel_irqfd, consumer);
835 struct kvm *kvm = irqfd->kvm;
837 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
838 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
842 static inline int kvmppc_get_vsr_dword_offset(int index)
846 if ((index != 0) && (index != 1))
858 static inline int kvmppc_get_vsr_word_offset(int index)
862 if ((index > 3) || (index < 0))
873 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
876 union kvmppc_one_reg val;
877 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
878 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
883 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
884 val.vval = VCPU_VSX_VR(vcpu, index);
885 val.vsxval[offset] = gpr;
886 VCPU_VSX_VR(vcpu, index) = val.vval;
888 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
892 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
895 union kvmppc_one_reg val;
896 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
898 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
899 val.vval = VCPU_VSX_VR(vcpu, index);
902 VCPU_VSX_VR(vcpu, index) = val.vval;
904 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
905 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
909 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
912 union kvmppc_one_reg val;
913 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
915 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
916 val.vsx32val[0] = gpr;
917 val.vsx32val[1] = gpr;
918 val.vsx32val[2] = gpr;
919 val.vsx32val[3] = gpr;
920 VCPU_VSX_VR(vcpu, index) = val.vval;
922 val.vsx32val[0] = gpr;
923 val.vsx32val[1] = gpr;
924 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
925 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
929 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
932 union kvmppc_one_reg val;
933 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
934 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
935 int dword_offset, word_offset;
940 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
941 val.vval = VCPU_VSX_VR(vcpu, index);
942 val.vsx32val[offset] = gpr32;
943 VCPU_VSX_VR(vcpu, index) = val.vval;
945 dword_offset = offset / 2;
946 word_offset = offset % 2;
947 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
948 val.vsx32val[word_offset] = gpr32;
949 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
952 #endif /* CONFIG_VSX */
954 #ifdef CONFIG_ALTIVEC
955 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
956 int index, int element_size)
959 int elts = sizeof(vector128)/element_size;
961 if ((index < 0) || (index >= elts))
964 if (kvmppc_need_byteswap(vcpu))
965 offset = elts - index - 1;
972 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
975 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
978 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
981 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
984 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
987 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
990 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
993 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
997 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1000 union kvmppc_one_reg val;
1001 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1002 vcpu->arch.mmio_vmx_offset);
1003 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1008 val.vval = VCPU_VSX_VR(vcpu, index);
1009 val.vsxval[offset] = gpr;
1010 VCPU_VSX_VR(vcpu, index) = val.vval;
1013 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1016 union kvmppc_one_reg val;
1017 int offset = kvmppc_get_vmx_word_offset(vcpu,
1018 vcpu->arch.mmio_vmx_offset);
1019 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1024 val.vval = VCPU_VSX_VR(vcpu, index);
1025 val.vsx32val[offset] = gpr32;
1026 VCPU_VSX_VR(vcpu, index) = val.vval;
1029 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1032 union kvmppc_one_reg val;
1033 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1034 vcpu->arch.mmio_vmx_offset);
1035 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1040 val.vval = VCPU_VSX_VR(vcpu, index);
1041 val.vsx16val[offset] = gpr16;
1042 VCPU_VSX_VR(vcpu, index) = val.vval;
1045 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1048 union kvmppc_one_reg val;
1049 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1050 vcpu->arch.mmio_vmx_offset);
1051 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1056 val.vval = VCPU_VSX_VR(vcpu, index);
1057 val.vsx8val[offset] = gpr8;
1058 VCPU_VSX_VR(vcpu, index) = val.vval;
1060 #endif /* CONFIG_ALTIVEC */
1062 #ifdef CONFIG_PPC_FPU
1063 static inline u64 sp_to_dp(u32 fprs)
1069 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1075 static inline u32 dp_to_sp(u64 fprd)
1081 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1088 #define sp_to_dp(x) (x)
1089 #define dp_to_sp(x) (x)
1090 #endif /* CONFIG_PPC_FPU */
1092 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1093 struct kvm_run *run)
1095 u64 uninitialized_var(gpr);
1097 if (run->mmio.len > sizeof(gpr)) {
1098 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1102 if (!vcpu->arch.mmio_host_swabbed) {
1103 switch (run->mmio.len) {
1104 case 8: gpr = *(u64 *)run->mmio.data; break;
1105 case 4: gpr = *(u32 *)run->mmio.data; break;
1106 case 2: gpr = *(u16 *)run->mmio.data; break;
1107 case 1: gpr = *(u8 *)run->mmio.data; break;
1110 switch (run->mmio.len) {
1111 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1112 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1113 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1114 case 1: gpr = *(u8 *)run->mmio.data; break;
1118 /* conversion between single and double precision */
1119 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1120 gpr = sp_to_dp(gpr);
1122 if (vcpu->arch.mmio_sign_extend) {
1123 switch (run->mmio.len) {
1126 gpr = (s64)(s32)gpr;
1130 gpr = (s64)(s16)gpr;
1138 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1139 case KVM_MMIO_REG_GPR:
1140 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1142 case KVM_MMIO_REG_FPR:
1143 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1144 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1146 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1148 #ifdef CONFIG_PPC_BOOK3S
1149 case KVM_MMIO_REG_QPR:
1150 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1152 case KVM_MMIO_REG_FQPR:
1153 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1154 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1158 case KVM_MMIO_REG_VSX:
1159 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1160 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1162 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1163 kvmppc_set_vsr_dword(vcpu, gpr);
1164 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1165 kvmppc_set_vsr_word(vcpu, gpr);
1166 else if (vcpu->arch.mmio_copy_type ==
1167 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1168 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1169 else if (vcpu->arch.mmio_copy_type ==
1170 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1171 kvmppc_set_vsr_word_dump(vcpu, gpr);
1174 #ifdef CONFIG_ALTIVEC
1175 case KVM_MMIO_REG_VMX:
1176 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1177 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1179 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1180 kvmppc_set_vmx_dword(vcpu, gpr);
1181 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1182 kvmppc_set_vmx_word(vcpu, gpr);
1183 else if (vcpu->arch.mmio_copy_type ==
1184 KVMPPC_VMX_COPY_HWORD)
1185 kvmppc_set_vmx_hword(vcpu, gpr);
1186 else if (vcpu->arch.mmio_copy_type ==
1187 KVMPPC_VMX_COPY_BYTE)
1188 kvmppc_set_vmx_byte(vcpu, gpr);
1196 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1197 unsigned int rt, unsigned int bytes,
1198 int is_default_endian, int sign_extend)
1203 /* Pity C doesn't have a logical XOR operator */
1204 if (kvmppc_need_byteswap(vcpu)) {
1205 host_swabbed = is_default_endian;
1207 host_swabbed = !is_default_endian;
1210 if (bytes > sizeof(run->mmio.data)) {
1211 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1215 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1216 run->mmio.len = bytes;
1217 run->mmio.is_write = 0;
1219 vcpu->arch.io_gpr = rt;
1220 vcpu->arch.mmio_host_swabbed = host_swabbed;
1221 vcpu->mmio_needed = 1;
1222 vcpu->mmio_is_write = 0;
1223 vcpu->arch.mmio_sign_extend = sign_extend;
1225 idx = srcu_read_lock(&vcpu->kvm->srcu);
1227 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1228 bytes, &run->mmio.data);
1230 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1233 kvmppc_complete_mmio_load(vcpu, run);
1234 vcpu->mmio_needed = 0;
1235 return EMULATE_DONE;
1238 return EMULATE_DO_MMIO;
1241 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1242 unsigned int rt, unsigned int bytes,
1243 int is_default_endian)
1245 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1247 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1249 /* Same as above, but sign extends */
1250 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1251 unsigned int rt, unsigned int bytes,
1252 int is_default_endian)
1254 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1258 int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1259 unsigned int rt, unsigned int bytes,
1260 int is_default_endian, int mmio_sign_extend)
1262 enum emulation_result emulated = EMULATE_DONE;
1264 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1265 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1266 return EMULATE_FAIL;
1268 while (vcpu->arch.mmio_vsx_copy_nums) {
1269 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1270 is_default_endian, mmio_sign_extend);
1272 if (emulated != EMULATE_DONE)
1275 vcpu->arch.paddr_accessed += run->mmio.len;
1277 vcpu->arch.mmio_vsx_copy_nums--;
1278 vcpu->arch.mmio_vsx_offset++;
1282 #endif /* CONFIG_VSX */
1284 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1285 u64 val, unsigned int bytes, int is_default_endian)
1287 void *data = run->mmio.data;
1291 /* Pity C doesn't have a logical XOR operator */
1292 if (kvmppc_need_byteswap(vcpu)) {
1293 host_swabbed = is_default_endian;
1295 host_swabbed = !is_default_endian;
1298 if (bytes > sizeof(run->mmio.data)) {
1299 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1303 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1304 run->mmio.len = bytes;
1305 run->mmio.is_write = 1;
1306 vcpu->mmio_needed = 1;
1307 vcpu->mmio_is_write = 1;
1309 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1310 val = dp_to_sp(val);
1312 /* Store the value at the lowest bytes in 'data'. */
1313 if (!host_swabbed) {
1315 case 8: *(u64 *)data = val; break;
1316 case 4: *(u32 *)data = val; break;
1317 case 2: *(u16 *)data = val; break;
1318 case 1: *(u8 *)data = val; break;
1322 case 8: *(u64 *)data = swab64(val); break;
1323 case 4: *(u32 *)data = swab32(val); break;
1324 case 2: *(u16 *)data = swab16(val); break;
1325 case 1: *(u8 *)data = val; break;
1329 idx = srcu_read_lock(&vcpu->kvm->srcu);
1331 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1332 bytes, &run->mmio.data);
1334 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1337 vcpu->mmio_needed = 0;
1338 return EMULATE_DONE;
1341 return EMULATE_DO_MMIO;
1343 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1346 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1348 u32 dword_offset, word_offset;
1349 union kvmppc_one_reg reg;
1351 int copy_type = vcpu->arch.mmio_copy_type;
1354 switch (copy_type) {
1355 case KVMPPC_VSX_COPY_DWORD:
1357 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1359 if (vsx_offset == -1) {
1364 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
1365 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1367 reg.vval = VCPU_VSX_VR(vcpu, rs);
1368 *val = reg.vsxval[vsx_offset];
1372 case KVMPPC_VSX_COPY_WORD:
1374 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1376 if (vsx_offset == -1) {
1381 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
1382 dword_offset = vsx_offset / 2;
1383 word_offset = vsx_offset % 2;
1384 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1385 *val = reg.vsx32val[word_offset];
1387 reg.vval = VCPU_VSX_VR(vcpu, rs);
1388 *val = reg.vsx32val[vsx_offset];
1400 int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1401 int rs, unsigned int bytes, int is_default_endian)
1404 enum emulation_result emulated = EMULATE_DONE;
1406 vcpu->arch.io_gpr = rs;
1408 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1409 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1410 return EMULATE_FAIL;
1412 while (vcpu->arch.mmio_vsx_copy_nums) {
1413 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1414 return EMULATE_FAIL;
1416 emulated = kvmppc_handle_store(run, vcpu,
1417 val, bytes, is_default_endian);
1419 if (emulated != EMULATE_DONE)
1422 vcpu->arch.paddr_accessed += run->mmio.len;
1424 vcpu->arch.mmio_vsx_copy_nums--;
1425 vcpu->arch.mmio_vsx_offset++;
1431 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1432 struct kvm_run *run)
1434 enum emulation_result emulated = EMULATE_FAIL;
1437 vcpu->arch.paddr_accessed += run->mmio.len;
1439 if (!vcpu->mmio_is_write) {
1440 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1441 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1443 emulated = kvmppc_handle_vsx_store(run, vcpu,
1444 vcpu->arch.io_gpr, run->mmio.len, 1);
1448 case EMULATE_DO_MMIO:
1449 run->exit_reason = KVM_EXIT_MMIO;
1453 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1454 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1455 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1464 #endif /* CONFIG_VSX */
1466 #ifdef CONFIG_ALTIVEC
1467 int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1468 unsigned int rt, unsigned int bytes, int is_default_endian)
1470 enum emulation_result emulated = EMULATE_DONE;
1472 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1473 return EMULATE_FAIL;
1475 while (vcpu->arch.mmio_vmx_copy_nums) {
1476 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1477 is_default_endian, 0);
1479 if (emulated != EMULATE_DONE)
1482 vcpu->arch.paddr_accessed += run->mmio.len;
1483 vcpu->arch.mmio_vmx_copy_nums--;
1484 vcpu->arch.mmio_vmx_offset++;
1490 int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1492 union kvmppc_one_reg reg;
1497 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1499 if (vmx_offset == -1)
1502 reg.vval = VCPU_VSX_VR(vcpu, index);
1503 *val = reg.vsxval[vmx_offset];
1508 int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1510 union kvmppc_one_reg reg;
1515 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1517 if (vmx_offset == -1)
1520 reg.vval = VCPU_VSX_VR(vcpu, index);
1521 *val = reg.vsx32val[vmx_offset];
1526 int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1528 union kvmppc_one_reg reg;
1533 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1535 if (vmx_offset == -1)
1538 reg.vval = VCPU_VSX_VR(vcpu, index);
1539 *val = reg.vsx16val[vmx_offset];
1544 int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1546 union kvmppc_one_reg reg;
1551 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1553 if (vmx_offset == -1)
1556 reg.vval = VCPU_VSX_VR(vcpu, index);
1557 *val = reg.vsx8val[vmx_offset];
1562 int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1563 unsigned int rs, unsigned int bytes, int is_default_endian)
1566 unsigned int index = rs & KVM_MMIO_REG_MASK;
1567 enum emulation_result emulated = EMULATE_DONE;
1569 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1570 return EMULATE_FAIL;
1572 vcpu->arch.io_gpr = rs;
1574 while (vcpu->arch.mmio_vmx_copy_nums) {
1575 switch (vcpu->arch.mmio_copy_type) {
1576 case KVMPPC_VMX_COPY_DWORD:
1577 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1578 return EMULATE_FAIL;
1581 case KVMPPC_VMX_COPY_WORD:
1582 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1583 return EMULATE_FAIL;
1585 case KVMPPC_VMX_COPY_HWORD:
1586 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1587 return EMULATE_FAIL;
1589 case KVMPPC_VMX_COPY_BYTE:
1590 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1591 return EMULATE_FAIL;
1594 return EMULATE_FAIL;
1597 emulated = kvmppc_handle_store(run, vcpu, val, bytes,
1599 if (emulated != EMULATE_DONE)
1602 vcpu->arch.paddr_accessed += run->mmio.len;
1603 vcpu->arch.mmio_vmx_copy_nums--;
1604 vcpu->arch.mmio_vmx_offset++;
1610 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1611 struct kvm_run *run)
1613 enum emulation_result emulated = EMULATE_FAIL;
1616 vcpu->arch.paddr_accessed += run->mmio.len;
1618 if (!vcpu->mmio_is_write) {
1619 emulated = kvmppc_handle_vmx_load(run, vcpu,
1620 vcpu->arch.io_gpr, run->mmio.len, 1);
1622 emulated = kvmppc_handle_vmx_store(run, vcpu,
1623 vcpu->arch.io_gpr, run->mmio.len, 1);
1627 case EMULATE_DO_MMIO:
1628 run->exit_reason = KVM_EXIT_MMIO;
1632 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1633 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1634 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1643 #endif /* CONFIG_ALTIVEC */
1645 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1648 union kvmppc_one_reg val;
1651 size = one_reg_size(reg->id);
1652 if (size > sizeof(val))
1655 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1659 #ifdef CONFIG_ALTIVEC
1660 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1661 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1665 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1667 case KVM_REG_PPC_VSCR:
1668 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1672 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1674 case KVM_REG_PPC_VRSAVE:
1675 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1677 #endif /* CONFIG_ALTIVEC */
1687 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1693 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1696 union kvmppc_one_reg val;
1699 size = one_reg_size(reg->id);
1700 if (size > sizeof(val))
1703 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1706 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1710 #ifdef CONFIG_ALTIVEC
1711 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1712 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1716 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1718 case KVM_REG_PPC_VSCR:
1719 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1723 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1725 case KVM_REG_PPC_VRSAVE:
1726 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1730 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1732 #endif /* CONFIG_ALTIVEC */
1742 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1748 if (vcpu->mmio_needed) {
1749 vcpu->mmio_needed = 0;
1750 if (!vcpu->mmio_is_write)
1751 kvmppc_complete_mmio_load(vcpu, run);
1753 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1754 vcpu->arch.mmio_vsx_copy_nums--;
1755 vcpu->arch.mmio_vsx_offset++;
1758 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1759 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1760 if (r == RESUME_HOST) {
1761 vcpu->mmio_needed = 1;
1766 #ifdef CONFIG_ALTIVEC
1767 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1768 vcpu->arch.mmio_vmx_copy_nums--;
1769 vcpu->arch.mmio_vmx_offset++;
1772 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1773 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1774 if (r == RESUME_HOST) {
1775 vcpu->mmio_needed = 1;
1780 } else if (vcpu->arch.osi_needed) {
1781 u64 *gprs = run->osi.gprs;
1784 for (i = 0; i < 32; i++)
1785 kvmppc_set_gpr(vcpu, i, gprs[i]);
1786 vcpu->arch.osi_needed = 0;
1787 } else if (vcpu->arch.hcall_needed) {
1790 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1791 for (i = 0; i < 9; ++i)
1792 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1793 vcpu->arch.hcall_needed = 0;
1795 } else if (vcpu->arch.epr_needed) {
1796 kvmppc_set_epr(vcpu, run->epr.epr);
1797 vcpu->arch.epr_needed = 0;
1801 kvm_sigset_activate(vcpu);
1803 if (run->immediate_exit)
1806 r = kvmppc_vcpu_run(run, vcpu);
1808 kvm_sigset_deactivate(vcpu);
1810 #ifdef CONFIG_ALTIVEC
1817 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1819 if (irq->irq == KVM_INTERRUPT_UNSET) {
1820 kvmppc_core_dequeue_external(vcpu);
1824 kvmppc_core_queue_external(vcpu, irq);
1826 kvm_vcpu_kick(vcpu);
1831 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1832 struct kvm_enable_cap *cap)
1840 case KVM_CAP_PPC_OSI:
1842 vcpu->arch.osi_enabled = true;
1844 case KVM_CAP_PPC_PAPR:
1846 vcpu->arch.papr_enabled = true;
1848 case KVM_CAP_PPC_EPR:
1851 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1853 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1856 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1858 vcpu->arch.watchdog_enabled = true;
1861 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1862 case KVM_CAP_SW_TLB: {
1863 struct kvm_config_tlb cfg;
1864 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1867 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1870 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1874 #ifdef CONFIG_KVM_MPIC
1875 case KVM_CAP_IRQ_MPIC: {
1877 struct kvm_device *dev;
1880 f = fdget(cap->args[0]);
1885 dev = kvm_device_from_filp(f.file);
1887 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1893 #ifdef CONFIG_KVM_XICS
1894 case KVM_CAP_IRQ_XICS: {
1896 struct kvm_device *dev;
1899 f = fdget(cap->args[0]);
1904 dev = kvm_device_from_filp(f.file);
1907 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1909 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1915 #endif /* CONFIG_KVM_XICS */
1916 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1917 case KVM_CAP_PPC_FWNMI:
1919 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1922 vcpu->kvm->arch.fwnmi_enabled = true;
1924 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1931 r = kvmppc_sanity_check(vcpu);
1936 bool kvm_arch_intc_initialized(struct kvm *kvm)
1938 #ifdef CONFIG_KVM_MPIC
1942 #ifdef CONFIG_KVM_XICS
1943 if (kvm->arch.xics || kvm->arch.xive)
1949 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1950 struct kvm_mp_state *mp_state)
1955 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1956 struct kvm_mp_state *mp_state)
1961 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1962 unsigned int ioctl, unsigned long arg)
1964 struct kvm_vcpu *vcpu = filp->private_data;
1965 void __user *argp = (void __user *)arg;
1967 if (ioctl == KVM_INTERRUPT) {
1968 struct kvm_interrupt irq;
1969 if (copy_from_user(&irq, argp, sizeof(irq)))
1971 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1973 return -ENOIOCTLCMD;
1976 long kvm_arch_vcpu_ioctl(struct file *filp,
1977 unsigned int ioctl, unsigned long arg)
1979 struct kvm_vcpu *vcpu = filp->private_data;
1980 void __user *argp = (void __user *)arg;
1984 case KVM_ENABLE_CAP:
1986 struct kvm_enable_cap cap;
1989 if (copy_from_user(&cap, argp, sizeof(cap)))
1991 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1996 case KVM_SET_ONE_REG:
1997 case KVM_GET_ONE_REG:
1999 struct kvm_one_reg reg;
2001 if (copy_from_user(®, argp, sizeof(reg)))
2003 if (ioctl == KVM_SET_ONE_REG)
2004 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2006 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2010 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2011 case KVM_DIRTY_TLB: {
2012 struct kvm_dirty_tlb dirty;
2015 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2017 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2030 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2032 return VM_FAULT_SIGBUS;
2035 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2037 u32 inst_nop = 0x60000000;
2038 #ifdef CONFIG_KVM_BOOKE_HV
2039 u32 inst_sc1 = 0x44000022;
2040 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2041 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2042 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2043 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2045 u32 inst_lis = 0x3c000000;
2046 u32 inst_ori = 0x60000000;
2047 u32 inst_sc = 0x44000002;
2048 u32 inst_imm_mask = 0xffff;
2051 * The hypercall to get into KVM from within guest context is as
2054 * lis r0, r0, KVM_SC_MAGIC_R0@h
2055 * ori r0, KVM_SC_MAGIC_R0@l
2059 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2060 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2061 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2062 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2065 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2070 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2073 if (!irqchip_in_kernel(kvm))
2076 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2077 irq_event->irq, irq_event->level,
2083 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2084 struct kvm_enable_cap *cap)
2092 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2093 case KVM_CAP_PPC_ENABLE_HCALL: {
2094 unsigned long hcall = cap->args[0];
2097 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2100 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2103 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2105 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2109 case KVM_CAP_PPC_SMT: {
2110 unsigned long mode = cap->args[0];
2111 unsigned long flags = cap->args[1];
2114 if (kvm->arch.kvm_ops->set_smt_mode)
2115 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2127 #ifdef CONFIG_PPC_BOOK3S_64
2129 * These functions check whether the underlying hardware is safe
2130 * against attacks based on observing the effects of speculatively
2131 * executed instructions, and whether it supplies instructions for
2132 * use in workarounds. The information comes from firmware, either
2133 * via the device tree on powernv platforms or from an hcall on
2134 * pseries platforms.
2136 #ifdef CONFIG_PPC_PSERIES
2137 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2139 struct h_cpu_char_result c;
2142 if (!machine_is(pseries))
2145 rc = plpar_get_cpu_characteristics(&c);
2146 if (rc == H_SUCCESS) {
2147 cp->character = c.character;
2148 cp->behaviour = c.behaviour;
2149 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2150 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2151 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2152 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2153 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2154 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2155 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2156 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2157 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2158 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2159 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2164 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2170 static inline bool have_fw_feat(struct device_node *fw_features,
2171 const char *state, const char *name)
2173 struct device_node *np;
2176 np = of_get_child_by_name(fw_features, name);
2178 r = of_property_read_bool(np, state);
2184 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2186 struct device_node *np, *fw_features;
2189 memset(cp, 0, sizeof(*cp));
2190 r = pseries_get_cpu_char(cp);
2194 np = of_find_node_by_name(NULL, "ibm,opal");
2196 fw_features = of_get_child_by_name(np, "fw-features");
2200 if (have_fw_feat(fw_features, "enabled",
2201 "inst-spec-barrier-ori31,31,0"))
2202 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2203 if (have_fw_feat(fw_features, "enabled",
2204 "fw-bcctrl-serialized"))
2205 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2206 if (have_fw_feat(fw_features, "enabled",
2207 "inst-l1d-flush-ori30,30,0"))
2208 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2209 if (have_fw_feat(fw_features, "enabled",
2210 "inst-l1d-flush-trig2"))
2211 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2212 if (have_fw_feat(fw_features, "enabled",
2213 "fw-l1d-thread-split"))
2214 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2215 if (have_fw_feat(fw_features, "enabled",
2216 "fw-count-cache-disabled"))
2217 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2218 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2219 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2220 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2221 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2222 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2223 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2225 if (have_fw_feat(fw_features, "enabled",
2226 "speculation-policy-favor-security"))
2227 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2228 if (!have_fw_feat(fw_features, "disabled",
2229 "needs-l1d-flush-msr-pr-0-to-1"))
2230 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2231 if (!have_fw_feat(fw_features, "disabled",
2232 "needs-spec-barrier-for-bound-checks"))
2233 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2234 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2235 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2236 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2238 of_node_put(fw_features);
2245 long kvm_arch_vm_ioctl(struct file *filp,
2246 unsigned int ioctl, unsigned long arg)
2248 struct kvm *kvm __maybe_unused = filp->private_data;
2249 void __user *argp = (void __user *)arg;
2253 case KVM_PPC_GET_PVINFO: {
2254 struct kvm_ppc_pvinfo pvinfo;
2255 memset(&pvinfo, 0, sizeof(pvinfo));
2256 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2257 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2264 case KVM_ENABLE_CAP:
2266 struct kvm_enable_cap cap;
2268 if (copy_from_user(&cap, argp, sizeof(cap)))
2270 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
2273 #ifdef CONFIG_SPAPR_TCE_IOMMU
2274 case KVM_CREATE_SPAPR_TCE_64: {
2275 struct kvm_create_spapr_tce_64 create_tce_64;
2278 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2280 if (create_tce_64.flags) {
2284 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2287 case KVM_CREATE_SPAPR_TCE: {
2288 struct kvm_create_spapr_tce create_tce;
2289 struct kvm_create_spapr_tce_64 create_tce_64;
2292 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2295 create_tce_64.liobn = create_tce.liobn;
2296 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2297 create_tce_64.offset = 0;
2298 create_tce_64.size = create_tce.window_size >>
2299 IOMMU_PAGE_SHIFT_4K;
2300 create_tce_64.flags = 0;
2301 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2305 #ifdef CONFIG_PPC_BOOK3S_64
2306 case KVM_PPC_GET_SMMU_INFO: {
2307 struct kvm_ppc_smmu_info info;
2308 struct kvm *kvm = filp->private_data;
2310 memset(&info, 0, sizeof(info));
2311 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2312 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2316 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2317 struct kvm *kvm = filp->private_data;
2319 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2322 case KVM_PPC_CONFIGURE_V3_MMU: {
2323 struct kvm *kvm = filp->private_data;
2324 struct kvm_ppc_mmuv3_cfg cfg;
2327 if (!kvm->arch.kvm_ops->configure_mmu)
2330 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2332 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2335 case KVM_PPC_GET_RMMU_INFO: {
2336 struct kvm *kvm = filp->private_data;
2337 struct kvm_ppc_rmmu_info info;
2340 if (!kvm->arch.kvm_ops->get_rmmu_info)
2342 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2343 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2347 case KVM_PPC_GET_CPU_CHAR: {
2348 struct kvm_ppc_cpu_char cpuchar;
2350 r = kvmppc_get_cpu_char(&cpuchar);
2351 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2356 struct kvm *kvm = filp->private_data;
2357 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2359 #else /* CONFIG_PPC_BOOK3S_64 */
2368 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2369 static unsigned long nr_lpids;
2371 long kvmppc_alloc_lpid(void)
2376 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2377 if (lpid >= nr_lpids) {
2378 pr_err("%s: No LPIDs free\n", __func__);
2381 } while (test_and_set_bit(lpid, lpid_inuse));
2385 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2387 void kvmppc_claim_lpid(long lpid)
2389 set_bit(lpid, lpid_inuse);
2391 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2393 void kvmppc_free_lpid(long lpid)
2395 clear_bit(lpid, lpid_inuse);
2397 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2399 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2401 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2402 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2404 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2406 int kvm_arch_init(void *opaque)
2411 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);