1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2007
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <asm/cputable.h>
23 #include <linux/uaccess.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/cputhreads.h>
26 #include <asm/irqflags.h>
27 #include <asm/iommu.h>
28 #include <asm/switch_to.h>
30 #ifdef CONFIG_PPC_PSERIES
31 #include <asm/hvcall.h>
32 #include <asm/plpar_wrappers.h>
34 #include <asm/ultravisor.h>
35 #include <asm/kvm_host.h>
39 #include "../mm/mmu_decl.h"
41 #define CREATE_TRACE_POINTS
44 struct kvmppc_ops *kvmppc_hv_ops;
45 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
46 struct kvmppc_ops *kvmppc_pr_ops;
47 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
52 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
55 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
57 return kvm_arch_vcpu_runnable(vcpu);
60 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
65 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
71 * Common checks before entering the guest world. Call with interrupts
76 * == 1 if we're ready to go into guest state
77 * <= 0 if we need to go back to the host with return value
79 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
83 WARN_ON(irqs_disabled());
94 if (signal_pending(current)) {
95 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
96 vcpu->run->exit_reason = KVM_EXIT_INTR;
101 vcpu->mode = IN_GUEST_MODE;
104 * Reading vcpu->requests must happen after setting vcpu->mode,
105 * so we don't miss a request because the requester sees
106 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
107 * before next entering the guest (and thus doesn't IPI).
108 * This also orders the write to mode from any reads
109 * to the page tables done while the VCPU is running.
110 * Please see the comment in kvm_flush_remote_tlbs.
114 if (kvm_request_pending(vcpu)) {
115 /* Make sure we process requests preemptable */
117 trace_kvm_check_requests(vcpu);
118 r = kvmppc_core_check_requests(vcpu);
125 if (kvmppc_core_prepare_to_enter(vcpu)) {
126 /* interrupts got enabled in between, so we
127 are back at square 1 */
131 guest_enter_irqoff();
139 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
141 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
142 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
147 shared->sprg0 = swab64(shared->sprg0);
148 shared->sprg1 = swab64(shared->sprg1);
149 shared->sprg2 = swab64(shared->sprg2);
150 shared->sprg3 = swab64(shared->sprg3);
151 shared->srr0 = swab64(shared->srr0);
152 shared->srr1 = swab64(shared->srr1);
153 shared->dar = swab64(shared->dar);
154 shared->msr = swab64(shared->msr);
155 shared->dsisr = swab32(shared->dsisr);
156 shared->int_pending = swab32(shared->int_pending);
157 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
158 shared->sr[i] = swab32(shared->sr[i]);
162 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
164 int nr = kvmppc_get_gpr(vcpu, 11);
166 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
167 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
168 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
169 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
170 unsigned long r2 = 0;
172 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
174 param1 &= 0xffffffff;
175 param2 &= 0xffffffff;
176 param3 &= 0xffffffff;
177 param4 &= 0xffffffff;
181 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
183 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
184 /* Book3S can be little endian, find it out here */
185 int shared_big_endian = true;
186 if (vcpu->arch.intr_msr & MSR_LE)
187 shared_big_endian = false;
188 if (shared_big_endian != vcpu->arch.shared_big_endian)
189 kvmppc_swab_shared(vcpu);
190 vcpu->arch.shared_big_endian = shared_big_endian;
193 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
195 * Older versions of the Linux magic page code had
196 * a bug where they would map their trampoline code
197 * NX. If that's the case, remove !PR NX capability.
199 vcpu->arch.disable_kernel_nx = true;
200 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
206 #ifdef CONFIG_PPC_64K_PAGES
208 * Make sure our 4k magic page is in the same window of a 64k
209 * page within the guest and within the host's page.
211 if ((vcpu->arch.magic_page_pa & 0xf000) !=
212 ((ulong)vcpu->arch.shared & 0xf000)) {
213 void *old_shared = vcpu->arch.shared;
214 ulong shared = (ulong)vcpu->arch.shared;
218 shared |= vcpu->arch.magic_page_pa & 0xf000;
219 new_shared = (void*)shared;
220 memcpy(new_shared, old_shared, 0x1000);
221 vcpu->arch.shared = new_shared;
225 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
230 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
232 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
233 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
236 /* Second return value is in r4 */
238 case EV_HCALL_TOKEN(EV_IDLE):
240 kvm_vcpu_block(vcpu);
241 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
244 r = EV_UNIMPLEMENTED;
248 kvmppc_set_gpr(vcpu, 4, r2);
252 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
254 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
258 /* We have to know what CPU to virtualize */
262 /* PAPR only works with book3s_64 */
263 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
266 /* HV KVM can only do PAPR mode for now */
267 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
270 #ifdef CONFIG_KVM_BOOKE_HV
271 if (!cpu_has_feature(CPU_FTR_EMB_HV))
279 return r ? 0 : -EINVAL;
281 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
283 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
285 enum emulation_result er;
288 er = kvmppc_emulate_loadstore(vcpu);
291 /* Future optimization: only reload non-volatiles if they were
292 * actually modified. */
298 case EMULATE_DO_MMIO:
299 run->exit_reason = KVM_EXIT_MMIO;
300 /* We must reload nonvolatiles because "update" load/store
301 * instructions modify register state. */
302 /* Future optimization: only reload non-volatiles if they were
303 * actually modified. */
310 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
311 /* XXX Deliver Program interrupt to guest. */
312 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
323 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
325 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
328 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
329 struct kvmppc_pte pte;
334 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
335 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
338 if ((!r) || (r == -EAGAIN))
341 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
351 /* Magic page override */
352 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
353 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
354 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
355 void *magic = vcpu->arch.shared;
356 magic += pte.eaddr & 0xfff;
357 memcpy(magic, ptr, size);
361 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
362 return EMULATE_DO_MMIO;
366 EXPORT_SYMBOL_GPL(kvmppc_st);
368 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
371 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
372 struct kvmppc_pte pte;
377 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
378 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
381 if ((!rc) || (rc == -EAGAIN))
384 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
394 if (!data && !pte.may_execute)
397 /* Magic page override */
398 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
399 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
400 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
401 void *magic = vcpu->arch.shared;
402 magic += pte.eaddr & 0xfff;
403 memcpy(ptr, magic, size);
407 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
408 return EMULATE_DO_MMIO;
412 EXPORT_SYMBOL_GPL(kvmppc_ld);
414 int kvm_arch_hardware_enable(void)
419 int kvm_arch_hardware_setup(void)
424 int kvm_arch_check_processor_compat(void)
426 return kvmppc_core_check_processor_compat();
429 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
431 struct kvmppc_ops *kvm_ops = NULL;
433 * if we have both HV and PR enabled, default is HV
437 kvm_ops = kvmppc_hv_ops;
439 kvm_ops = kvmppc_pr_ops;
442 } else if (type == KVM_VM_PPC_HV) {
445 kvm_ops = kvmppc_hv_ops;
446 } else if (type == KVM_VM_PPC_PR) {
449 kvm_ops = kvmppc_pr_ops;
453 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
456 kvm->arch.kvm_ops = kvm_ops;
457 return kvmppc_core_init_vm(kvm);
462 void kvm_arch_destroy_vm(struct kvm *kvm)
465 struct kvm_vcpu *vcpu;
467 #ifdef CONFIG_KVM_XICS
469 * We call kick_all_cpus_sync() to ensure that all
470 * CPUs have executed any pending IPIs before we
471 * continue and free VCPUs structures below.
473 if (is_kvmppc_hv_enabled(kvm))
474 kick_all_cpus_sync();
477 kvm_for_each_vcpu(i, vcpu, kvm)
478 kvm_arch_vcpu_free(vcpu);
480 mutex_lock(&kvm->lock);
481 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
482 kvm->vcpus[i] = NULL;
484 atomic_set(&kvm->online_vcpus, 0);
486 kvmppc_core_destroy_vm(kvm);
488 mutex_unlock(&kvm->lock);
490 /* drop the module reference */
491 module_put(kvm->arch.kvm_ops->owner);
494 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
497 /* Assume we're using HV mode when the HV module is loaded */
498 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
502 * Hooray - we know which VM type we're running on. Depend on
503 * that rather than the guess above.
505 hv_enabled = is_kvmppc_hv_enabled(kvm);
510 case KVM_CAP_PPC_BOOKE_SREGS:
511 case KVM_CAP_PPC_BOOKE_WATCHDOG:
512 case KVM_CAP_PPC_EPR:
514 case KVM_CAP_PPC_SEGSTATE:
515 case KVM_CAP_PPC_HIOR:
516 case KVM_CAP_PPC_PAPR:
518 case KVM_CAP_PPC_UNSET_IRQ:
519 case KVM_CAP_PPC_IRQ_LEVEL:
520 case KVM_CAP_ENABLE_CAP:
521 case KVM_CAP_ONE_REG:
522 case KVM_CAP_IOEVENTFD:
523 case KVM_CAP_DEVICE_CTRL:
524 case KVM_CAP_IMMEDIATE_EXIT:
527 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
529 case KVM_CAP_PPC_PAIRED_SINGLES:
530 case KVM_CAP_PPC_OSI:
531 case KVM_CAP_PPC_GET_PVINFO:
532 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
535 /* We support this only for PR */
538 #ifdef CONFIG_KVM_MPIC
539 case KVM_CAP_IRQ_MPIC:
544 #ifdef CONFIG_PPC_BOOK3S_64
545 case KVM_CAP_SPAPR_TCE:
546 case KVM_CAP_SPAPR_TCE_64:
549 case KVM_CAP_SPAPR_TCE_VFIO:
550 r = !!cpu_has_feature(CPU_FTR_HVMODE);
552 case KVM_CAP_PPC_RTAS:
553 case KVM_CAP_PPC_FIXUP_HCALL:
554 case KVM_CAP_PPC_ENABLE_HCALL:
555 #ifdef CONFIG_KVM_XICS
556 case KVM_CAP_IRQ_XICS:
558 case KVM_CAP_PPC_GET_CPU_CHAR:
561 #ifdef CONFIG_KVM_XIVE
562 case KVM_CAP_PPC_IRQ_XIVE:
564 * We need XIVE to be enabled on the platform (implies
565 * a POWER9 processor) and the PowerNV platform, as
566 * nested is not yet supported.
568 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
569 kvmppc_xive_native_supported();
573 case KVM_CAP_PPC_ALLOC_HTAB:
576 #endif /* CONFIG_PPC_BOOK3S_64 */
577 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
578 case KVM_CAP_PPC_SMT:
581 if (kvm->arch.emul_smt_mode > 1)
582 r = kvm->arch.emul_smt_mode;
584 r = kvm->arch.smt_mode;
585 } else if (hv_enabled) {
586 if (cpu_has_feature(CPU_FTR_ARCH_300))
589 r = threads_per_subcore;
592 case KVM_CAP_PPC_SMT_POSSIBLE:
595 if (!cpu_has_feature(CPU_FTR_ARCH_300))
596 r = ((threads_per_subcore << 1) - 1);
598 /* P9 can emulate dbells, so allow any mode */
602 case KVM_CAP_PPC_RMA:
605 case KVM_CAP_PPC_HWRNG:
606 r = kvmppc_hwrng_present();
608 case KVM_CAP_PPC_MMU_RADIX:
609 r = !!(hv_enabled && radix_enabled());
611 case KVM_CAP_PPC_MMU_HASH_V3:
612 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
613 cpu_has_feature(CPU_FTR_HVMODE));
615 case KVM_CAP_PPC_NESTED_HV:
616 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
617 !kvmppc_hv_ops->enable_nested(NULL));
620 case KVM_CAP_SYNC_MMU:
621 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
623 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
629 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
630 case KVM_CAP_PPC_HTAB_FD:
634 case KVM_CAP_NR_VCPUS:
636 * Recommending a number of CPUs is somewhat arbitrary; we
637 * return the number of present CPUs for -HV (since a host
638 * will have secondary threads "offline"), and for other KVM
639 * implementations just count online CPUs.
642 r = num_present_cpus();
644 r = num_online_cpus();
646 case KVM_CAP_MAX_VCPUS:
649 case KVM_CAP_MAX_VCPU_ID:
652 #ifdef CONFIG_PPC_BOOK3S_64
653 case KVM_CAP_PPC_GET_SMMU_INFO:
656 case KVM_CAP_SPAPR_MULTITCE:
659 case KVM_CAP_SPAPR_RESIZE_HPT:
663 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
664 case KVM_CAP_PPC_FWNMI:
668 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
669 case KVM_CAP_PPC_HTM:
670 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
671 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
682 long kvm_arch_dev_ioctl(struct file *filp,
683 unsigned int ioctl, unsigned long arg)
688 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
689 struct kvm_memory_slot *dont)
691 kvmppc_core_free_memslot(kvm, free, dont);
694 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
695 unsigned long npages)
697 return kvmppc_core_create_memslot(kvm, slot, npages);
700 int kvm_arch_prepare_memory_region(struct kvm *kvm,
701 struct kvm_memory_slot *memslot,
702 const struct kvm_userspace_memory_region *mem,
703 enum kvm_mr_change change)
705 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
708 void kvm_arch_commit_memory_region(struct kvm *kvm,
709 const struct kvm_userspace_memory_region *mem,
710 const struct kvm_memory_slot *old,
711 const struct kvm_memory_slot *new,
712 enum kvm_mr_change change)
714 kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
717 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
718 struct kvm_memory_slot *slot)
720 kvmppc_core_flush_memslot(kvm, slot);
723 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
725 struct kvm_vcpu *vcpu;
728 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
730 return ERR_PTR(-ENOMEM);
732 err = kvm_vcpu_init(vcpu, kvm, id);
736 err = kvmppc_core_vcpu_create(vcpu);
740 vcpu->arch.wqp = &vcpu->wq;
741 kvmppc_create_vcpu_debugfs(vcpu, id);
745 kvm_vcpu_uninit(vcpu);
747 kmem_cache_free(kvm_vcpu_cache, vcpu);
751 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
755 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
757 /* Make sure we're not using the vcpu anymore */
758 hrtimer_cancel(&vcpu->arch.dec_timer);
760 kvmppc_remove_vcpu_debugfs(vcpu);
762 switch (vcpu->arch.irq_type) {
763 case KVMPPC_IRQ_MPIC:
764 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
766 case KVMPPC_IRQ_XICS:
768 kvmppc_xive_cleanup_vcpu(vcpu);
770 kvmppc_xics_free_icp(vcpu);
772 case KVMPPC_IRQ_XIVE:
773 kvmppc_xive_native_cleanup_vcpu(vcpu);
777 kvmppc_core_vcpu_free(vcpu);
779 kvm_vcpu_uninit(vcpu);
781 kmem_cache_free(kvm_vcpu_cache, vcpu);
784 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
786 kvm_arch_vcpu_free(vcpu);
789 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
791 return kvmppc_core_pending_dec(vcpu);
794 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
796 struct kvm_vcpu *vcpu;
798 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
799 kvmppc_decrementer_func(vcpu);
801 return HRTIMER_NORESTART;
804 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
808 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
809 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
810 vcpu->arch.dec_expires = get_tb();
812 #ifdef CONFIG_KVM_EXIT_TIMING
813 mutex_init(&vcpu->arch.exit_timing_lock);
815 ret = kvmppc_subarch_vcpu_init(vcpu);
819 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
821 kvmppc_mmu_destroy(vcpu);
822 kvmppc_subarch_vcpu_uninit(vcpu);
825 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
829 * vrsave (formerly usprg0) isn't used by Linux, but may
830 * be used by the guest.
832 * On non-booke this is associated with Altivec and
833 * is handled by code in book3s.c.
835 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
837 kvmppc_core_vcpu_load(vcpu, cpu);
840 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
842 kvmppc_core_vcpu_put(vcpu);
844 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
849 * irq_bypass_add_producer and irq_bypass_del_producer are only
850 * useful if the architecture supports PCI passthrough.
851 * irq_bypass_stop and irq_bypass_start are not needed and so
852 * kvm_ops are not defined for them.
854 bool kvm_arch_has_irq_bypass(void)
856 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
857 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
860 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
861 struct irq_bypass_producer *prod)
863 struct kvm_kernel_irqfd *irqfd =
864 container_of(cons, struct kvm_kernel_irqfd, consumer);
865 struct kvm *kvm = irqfd->kvm;
867 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
868 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
873 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
874 struct irq_bypass_producer *prod)
876 struct kvm_kernel_irqfd *irqfd =
877 container_of(cons, struct kvm_kernel_irqfd, consumer);
878 struct kvm *kvm = irqfd->kvm;
880 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
881 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
885 static inline int kvmppc_get_vsr_dword_offset(int index)
889 if ((index != 0) && (index != 1))
901 static inline int kvmppc_get_vsr_word_offset(int index)
905 if ((index > 3) || (index < 0))
916 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
919 union kvmppc_one_reg val;
920 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
921 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
927 val.vval = VCPU_VSX_VR(vcpu, index - 32);
928 val.vsxval[offset] = gpr;
929 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
931 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
935 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
938 union kvmppc_one_reg val;
939 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
942 val.vval = VCPU_VSX_VR(vcpu, index - 32);
945 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
947 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
948 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
952 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
955 union kvmppc_one_reg val;
956 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
959 val.vsx32val[0] = gpr;
960 val.vsx32val[1] = gpr;
961 val.vsx32val[2] = gpr;
962 val.vsx32val[3] = gpr;
963 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
965 val.vsx32val[0] = gpr;
966 val.vsx32val[1] = gpr;
967 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
968 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
972 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
975 union kvmppc_one_reg val;
976 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
977 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
978 int dword_offset, word_offset;
984 val.vval = VCPU_VSX_VR(vcpu, index - 32);
985 val.vsx32val[offset] = gpr32;
986 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
988 dword_offset = offset / 2;
989 word_offset = offset % 2;
990 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
991 val.vsx32val[word_offset] = gpr32;
992 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
995 #endif /* CONFIG_VSX */
997 #ifdef CONFIG_ALTIVEC
998 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
999 int index, int element_size)
1002 int elts = sizeof(vector128)/element_size;
1004 if ((index < 0) || (index >= elts))
1007 if (kvmppc_need_byteswap(vcpu))
1008 offset = elts - index - 1;
1015 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1018 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1021 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1024 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1027 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1030 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1033 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1036 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1040 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1043 union kvmppc_one_reg val;
1044 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1045 vcpu->arch.mmio_vmx_offset);
1046 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1051 val.vval = VCPU_VSX_VR(vcpu, index);
1052 val.vsxval[offset] = gpr;
1053 VCPU_VSX_VR(vcpu, index) = val.vval;
1056 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1059 union kvmppc_one_reg val;
1060 int offset = kvmppc_get_vmx_word_offset(vcpu,
1061 vcpu->arch.mmio_vmx_offset);
1062 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1067 val.vval = VCPU_VSX_VR(vcpu, index);
1068 val.vsx32val[offset] = gpr32;
1069 VCPU_VSX_VR(vcpu, index) = val.vval;
1072 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1075 union kvmppc_one_reg val;
1076 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1077 vcpu->arch.mmio_vmx_offset);
1078 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1083 val.vval = VCPU_VSX_VR(vcpu, index);
1084 val.vsx16val[offset] = gpr16;
1085 VCPU_VSX_VR(vcpu, index) = val.vval;
1088 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1091 union kvmppc_one_reg val;
1092 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1093 vcpu->arch.mmio_vmx_offset);
1094 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1099 val.vval = VCPU_VSX_VR(vcpu, index);
1100 val.vsx8val[offset] = gpr8;
1101 VCPU_VSX_VR(vcpu, index) = val.vval;
1103 #endif /* CONFIG_ALTIVEC */
1105 #ifdef CONFIG_PPC_FPU
1106 static inline u64 sp_to_dp(u32 fprs)
1112 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1118 static inline u32 dp_to_sp(u64 fprd)
1124 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1131 #define sp_to_dp(x) (x)
1132 #define dp_to_sp(x) (x)
1133 #endif /* CONFIG_PPC_FPU */
1135 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1136 struct kvm_run *run)
1138 u64 uninitialized_var(gpr);
1140 if (run->mmio.len > sizeof(gpr)) {
1141 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1145 if (!vcpu->arch.mmio_host_swabbed) {
1146 switch (run->mmio.len) {
1147 case 8: gpr = *(u64 *)run->mmio.data; break;
1148 case 4: gpr = *(u32 *)run->mmio.data; break;
1149 case 2: gpr = *(u16 *)run->mmio.data; break;
1150 case 1: gpr = *(u8 *)run->mmio.data; break;
1153 switch (run->mmio.len) {
1154 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1155 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1156 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1157 case 1: gpr = *(u8 *)run->mmio.data; break;
1161 /* conversion between single and double precision */
1162 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1163 gpr = sp_to_dp(gpr);
1165 if (vcpu->arch.mmio_sign_extend) {
1166 switch (run->mmio.len) {
1169 gpr = (s64)(s32)gpr;
1173 gpr = (s64)(s16)gpr;
1181 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1182 case KVM_MMIO_REG_GPR:
1183 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1185 case KVM_MMIO_REG_FPR:
1186 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1187 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1189 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1191 #ifdef CONFIG_PPC_BOOK3S
1192 case KVM_MMIO_REG_QPR:
1193 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1195 case KVM_MMIO_REG_FQPR:
1196 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1197 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1201 case KVM_MMIO_REG_VSX:
1202 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1203 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1205 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1206 kvmppc_set_vsr_dword(vcpu, gpr);
1207 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1208 kvmppc_set_vsr_word(vcpu, gpr);
1209 else if (vcpu->arch.mmio_copy_type ==
1210 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1211 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1212 else if (vcpu->arch.mmio_copy_type ==
1213 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1214 kvmppc_set_vsr_word_dump(vcpu, gpr);
1217 #ifdef CONFIG_ALTIVEC
1218 case KVM_MMIO_REG_VMX:
1219 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1220 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1222 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1223 kvmppc_set_vmx_dword(vcpu, gpr);
1224 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1225 kvmppc_set_vmx_word(vcpu, gpr);
1226 else if (vcpu->arch.mmio_copy_type ==
1227 KVMPPC_VMX_COPY_HWORD)
1228 kvmppc_set_vmx_hword(vcpu, gpr);
1229 else if (vcpu->arch.mmio_copy_type ==
1230 KVMPPC_VMX_COPY_BYTE)
1231 kvmppc_set_vmx_byte(vcpu, gpr);
1234 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1235 case KVM_MMIO_REG_NESTED_GPR:
1236 if (kvmppc_need_byteswap(vcpu))
1238 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1247 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1248 unsigned int rt, unsigned int bytes,
1249 int is_default_endian, int sign_extend)
1254 /* Pity C doesn't have a logical XOR operator */
1255 if (kvmppc_need_byteswap(vcpu)) {
1256 host_swabbed = is_default_endian;
1258 host_swabbed = !is_default_endian;
1261 if (bytes > sizeof(run->mmio.data)) {
1262 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1266 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1267 run->mmio.len = bytes;
1268 run->mmio.is_write = 0;
1270 vcpu->arch.io_gpr = rt;
1271 vcpu->arch.mmio_host_swabbed = host_swabbed;
1272 vcpu->mmio_needed = 1;
1273 vcpu->mmio_is_write = 0;
1274 vcpu->arch.mmio_sign_extend = sign_extend;
1276 idx = srcu_read_lock(&vcpu->kvm->srcu);
1278 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1279 bytes, &run->mmio.data);
1281 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1284 kvmppc_complete_mmio_load(vcpu, run);
1285 vcpu->mmio_needed = 0;
1286 return EMULATE_DONE;
1289 return EMULATE_DO_MMIO;
1292 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1293 unsigned int rt, unsigned int bytes,
1294 int is_default_endian)
1296 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1298 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1300 /* Same as above, but sign extends */
1301 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1302 unsigned int rt, unsigned int bytes,
1303 int is_default_endian)
1305 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1309 int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1310 unsigned int rt, unsigned int bytes,
1311 int is_default_endian, int mmio_sign_extend)
1313 enum emulation_result emulated = EMULATE_DONE;
1315 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1316 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1317 return EMULATE_FAIL;
1319 while (vcpu->arch.mmio_vsx_copy_nums) {
1320 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1321 is_default_endian, mmio_sign_extend);
1323 if (emulated != EMULATE_DONE)
1326 vcpu->arch.paddr_accessed += run->mmio.len;
1328 vcpu->arch.mmio_vsx_copy_nums--;
1329 vcpu->arch.mmio_vsx_offset++;
1333 #endif /* CONFIG_VSX */
1335 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1336 u64 val, unsigned int bytes, int is_default_endian)
1338 void *data = run->mmio.data;
1342 /* Pity C doesn't have a logical XOR operator */
1343 if (kvmppc_need_byteswap(vcpu)) {
1344 host_swabbed = is_default_endian;
1346 host_swabbed = !is_default_endian;
1349 if (bytes > sizeof(run->mmio.data)) {
1350 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1354 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1355 run->mmio.len = bytes;
1356 run->mmio.is_write = 1;
1357 vcpu->mmio_needed = 1;
1358 vcpu->mmio_is_write = 1;
1360 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1361 val = dp_to_sp(val);
1363 /* Store the value at the lowest bytes in 'data'. */
1364 if (!host_swabbed) {
1366 case 8: *(u64 *)data = val; break;
1367 case 4: *(u32 *)data = val; break;
1368 case 2: *(u16 *)data = val; break;
1369 case 1: *(u8 *)data = val; break;
1373 case 8: *(u64 *)data = swab64(val); break;
1374 case 4: *(u32 *)data = swab32(val); break;
1375 case 2: *(u16 *)data = swab16(val); break;
1376 case 1: *(u8 *)data = val; break;
1380 idx = srcu_read_lock(&vcpu->kvm->srcu);
1382 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1383 bytes, &run->mmio.data);
1385 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1388 vcpu->mmio_needed = 0;
1389 return EMULATE_DONE;
1392 return EMULATE_DO_MMIO;
1394 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1397 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1399 u32 dword_offset, word_offset;
1400 union kvmppc_one_reg reg;
1402 int copy_type = vcpu->arch.mmio_copy_type;
1405 switch (copy_type) {
1406 case KVMPPC_VSX_COPY_DWORD:
1408 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1410 if (vsx_offset == -1) {
1416 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1418 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1419 *val = reg.vsxval[vsx_offset];
1423 case KVMPPC_VSX_COPY_WORD:
1425 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1427 if (vsx_offset == -1) {
1433 dword_offset = vsx_offset / 2;
1434 word_offset = vsx_offset % 2;
1435 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1436 *val = reg.vsx32val[word_offset];
1438 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1439 *val = reg.vsx32val[vsx_offset];
1451 int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1452 int rs, unsigned int bytes, int is_default_endian)
1455 enum emulation_result emulated = EMULATE_DONE;
1457 vcpu->arch.io_gpr = rs;
1459 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1460 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1461 return EMULATE_FAIL;
1463 while (vcpu->arch.mmio_vsx_copy_nums) {
1464 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1465 return EMULATE_FAIL;
1467 emulated = kvmppc_handle_store(run, vcpu,
1468 val, bytes, is_default_endian);
1470 if (emulated != EMULATE_DONE)
1473 vcpu->arch.paddr_accessed += run->mmio.len;
1475 vcpu->arch.mmio_vsx_copy_nums--;
1476 vcpu->arch.mmio_vsx_offset++;
1482 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1483 struct kvm_run *run)
1485 enum emulation_result emulated = EMULATE_FAIL;
1488 vcpu->arch.paddr_accessed += run->mmio.len;
1490 if (!vcpu->mmio_is_write) {
1491 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1492 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1494 emulated = kvmppc_handle_vsx_store(run, vcpu,
1495 vcpu->arch.io_gpr, run->mmio.len, 1);
1499 case EMULATE_DO_MMIO:
1500 run->exit_reason = KVM_EXIT_MMIO;
1504 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1505 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1506 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1515 #endif /* CONFIG_VSX */
1517 #ifdef CONFIG_ALTIVEC
1518 int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1519 unsigned int rt, unsigned int bytes, int is_default_endian)
1521 enum emulation_result emulated = EMULATE_DONE;
1523 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1524 return EMULATE_FAIL;
1526 while (vcpu->arch.mmio_vmx_copy_nums) {
1527 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1528 is_default_endian, 0);
1530 if (emulated != EMULATE_DONE)
1533 vcpu->arch.paddr_accessed += run->mmio.len;
1534 vcpu->arch.mmio_vmx_copy_nums--;
1535 vcpu->arch.mmio_vmx_offset++;
1541 int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1543 union kvmppc_one_reg reg;
1548 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1550 if (vmx_offset == -1)
1553 reg.vval = VCPU_VSX_VR(vcpu, index);
1554 *val = reg.vsxval[vmx_offset];
1559 int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1561 union kvmppc_one_reg reg;
1566 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1568 if (vmx_offset == -1)
1571 reg.vval = VCPU_VSX_VR(vcpu, index);
1572 *val = reg.vsx32val[vmx_offset];
1577 int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1579 union kvmppc_one_reg reg;
1584 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1586 if (vmx_offset == -1)
1589 reg.vval = VCPU_VSX_VR(vcpu, index);
1590 *val = reg.vsx16val[vmx_offset];
1595 int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1597 union kvmppc_one_reg reg;
1602 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1604 if (vmx_offset == -1)
1607 reg.vval = VCPU_VSX_VR(vcpu, index);
1608 *val = reg.vsx8val[vmx_offset];
1613 int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1614 unsigned int rs, unsigned int bytes, int is_default_endian)
1617 unsigned int index = rs & KVM_MMIO_REG_MASK;
1618 enum emulation_result emulated = EMULATE_DONE;
1620 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1621 return EMULATE_FAIL;
1623 vcpu->arch.io_gpr = rs;
1625 while (vcpu->arch.mmio_vmx_copy_nums) {
1626 switch (vcpu->arch.mmio_copy_type) {
1627 case KVMPPC_VMX_COPY_DWORD:
1628 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1629 return EMULATE_FAIL;
1632 case KVMPPC_VMX_COPY_WORD:
1633 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1634 return EMULATE_FAIL;
1636 case KVMPPC_VMX_COPY_HWORD:
1637 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1638 return EMULATE_FAIL;
1640 case KVMPPC_VMX_COPY_BYTE:
1641 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1642 return EMULATE_FAIL;
1645 return EMULATE_FAIL;
1648 emulated = kvmppc_handle_store(run, vcpu, val, bytes,
1650 if (emulated != EMULATE_DONE)
1653 vcpu->arch.paddr_accessed += run->mmio.len;
1654 vcpu->arch.mmio_vmx_copy_nums--;
1655 vcpu->arch.mmio_vmx_offset++;
1661 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1662 struct kvm_run *run)
1664 enum emulation_result emulated = EMULATE_FAIL;
1667 vcpu->arch.paddr_accessed += run->mmio.len;
1669 if (!vcpu->mmio_is_write) {
1670 emulated = kvmppc_handle_vmx_load(run, vcpu,
1671 vcpu->arch.io_gpr, run->mmio.len, 1);
1673 emulated = kvmppc_handle_vmx_store(run, vcpu,
1674 vcpu->arch.io_gpr, run->mmio.len, 1);
1678 case EMULATE_DO_MMIO:
1679 run->exit_reason = KVM_EXIT_MMIO;
1683 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1684 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1685 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1694 #endif /* CONFIG_ALTIVEC */
1696 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1699 union kvmppc_one_reg val;
1702 size = one_reg_size(reg->id);
1703 if (size > sizeof(val))
1706 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1710 #ifdef CONFIG_ALTIVEC
1711 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1712 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1716 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1718 case KVM_REG_PPC_VSCR:
1719 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1723 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1725 case KVM_REG_PPC_VRSAVE:
1726 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1728 #endif /* CONFIG_ALTIVEC */
1738 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1744 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1747 union kvmppc_one_reg val;
1750 size = one_reg_size(reg->id);
1751 if (size > sizeof(val))
1754 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1757 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1761 #ifdef CONFIG_ALTIVEC
1762 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1763 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1767 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1769 case KVM_REG_PPC_VSCR:
1770 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1774 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1776 case KVM_REG_PPC_VRSAVE:
1777 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1781 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1783 #endif /* CONFIG_ALTIVEC */
1793 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1799 if (vcpu->mmio_needed) {
1800 vcpu->mmio_needed = 0;
1801 if (!vcpu->mmio_is_write)
1802 kvmppc_complete_mmio_load(vcpu, run);
1804 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1805 vcpu->arch.mmio_vsx_copy_nums--;
1806 vcpu->arch.mmio_vsx_offset++;
1809 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1810 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1811 if (r == RESUME_HOST) {
1812 vcpu->mmio_needed = 1;
1817 #ifdef CONFIG_ALTIVEC
1818 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1819 vcpu->arch.mmio_vmx_copy_nums--;
1820 vcpu->arch.mmio_vmx_offset++;
1823 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1824 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1825 if (r == RESUME_HOST) {
1826 vcpu->mmio_needed = 1;
1831 } else if (vcpu->arch.osi_needed) {
1832 u64 *gprs = run->osi.gprs;
1835 for (i = 0; i < 32; i++)
1836 kvmppc_set_gpr(vcpu, i, gprs[i]);
1837 vcpu->arch.osi_needed = 0;
1838 } else if (vcpu->arch.hcall_needed) {
1841 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1842 for (i = 0; i < 9; ++i)
1843 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1844 vcpu->arch.hcall_needed = 0;
1846 } else if (vcpu->arch.epr_needed) {
1847 kvmppc_set_epr(vcpu, run->epr.epr);
1848 vcpu->arch.epr_needed = 0;
1852 kvm_sigset_activate(vcpu);
1854 if (run->immediate_exit)
1857 r = kvmppc_vcpu_run(run, vcpu);
1859 kvm_sigset_deactivate(vcpu);
1861 #ifdef CONFIG_ALTIVEC
1868 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1870 if (irq->irq == KVM_INTERRUPT_UNSET) {
1871 kvmppc_core_dequeue_external(vcpu);
1875 kvmppc_core_queue_external(vcpu, irq);
1877 kvm_vcpu_kick(vcpu);
1882 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1883 struct kvm_enable_cap *cap)
1891 case KVM_CAP_PPC_OSI:
1893 vcpu->arch.osi_enabled = true;
1895 case KVM_CAP_PPC_PAPR:
1897 vcpu->arch.papr_enabled = true;
1899 case KVM_CAP_PPC_EPR:
1902 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1904 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1907 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1909 vcpu->arch.watchdog_enabled = true;
1912 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1913 case KVM_CAP_SW_TLB: {
1914 struct kvm_config_tlb cfg;
1915 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1918 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1921 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1925 #ifdef CONFIG_KVM_MPIC
1926 case KVM_CAP_IRQ_MPIC: {
1928 struct kvm_device *dev;
1931 f = fdget(cap->args[0]);
1936 dev = kvm_device_from_filp(f.file);
1938 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1944 #ifdef CONFIG_KVM_XICS
1945 case KVM_CAP_IRQ_XICS: {
1947 struct kvm_device *dev;
1950 f = fdget(cap->args[0]);
1955 dev = kvm_device_from_filp(f.file);
1958 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1960 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1966 #endif /* CONFIG_KVM_XICS */
1967 #ifdef CONFIG_KVM_XIVE
1968 case KVM_CAP_PPC_IRQ_XIVE: {
1970 struct kvm_device *dev;
1973 f = fdget(cap->args[0]);
1978 if (!xive_enabled())
1982 dev = kvm_device_from_filp(f.file);
1984 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1990 #endif /* CONFIG_KVM_XIVE */
1991 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1992 case KVM_CAP_PPC_FWNMI:
1994 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1997 vcpu->kvm->arch.fwnmi_enabled = true;
1999 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2006 r = kvmppc_sanity_check(vcpu);
2011 bool kvm_arch_intc_initialized(struct kvm *kvm)
2013 #ifdef CONFIG_KVM_MPIC
2017 #ifdef CONFIG_KVM_XICS
2018 if (kvm->arch.xics || kvm->arch.xive)
2024 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2025 struct kvm_mp_state *mp_state)
2030 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2031 struct kvm_mp_state *mp_state)
2036 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2037 unsigned int ioctl, unsigned long arg)
2039 struct kvm_vcpu *vcpu = filp->private_data;
2040 void __user *argp = (void __user *)arg;
2042 if (ioctl == KVM_INTERRUPT) {
2043 struct kvm_interrupt irq;
2044 if (copy_from_user(&irq, argp, sizeof(irq)))
2046 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2048 return -ENOIOCTLCMD;
2051 long kvm_arch_vcpu_ioctl(struct file *filp,
2052 unsigned int ioctl, unsigned long arg)
2054 struct kvm_vcpu *vcpu = filp->private_data;
2055 void __user *argp = (void __user *)arg;
2059 case KVM_ENABLE_CAP:
2061 struct kvm_enable_cap cap;
2064 if (copy_from_user(&cap, argp, sizeof(cap)))
2066 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2071 case KVM_SET_ONE_REG:
2072 case KVM_GET_ONE_REG:
2074 struct kvm_one_reg reg;
2076 if (copy_from_user(®, argp, sizeof(reg)))
2078 if (ioctl == KVM_SET_ONE_REG)
2079 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2081 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2085 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2086 case KVM_DIRTY_TLB: {
2087 struct kvm_dirty_tlb dirty;
2090 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2092 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2105 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2107 return VM_FAULT_SIGBUS;
2110 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2112 u32 inst_nop = 0x60000000;
2113 #ifdef CONFIG_KVM_BOOKE_HV
2114 u32 inst_sc1 = 0x44000022;
2115 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2116 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2117 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2118 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2120 u32 inst_lis = 0x3c000000;
2121 u32 inst_ori = 0x60000000;
2122 u32 inst_sc = 0x44000002;
2123 u32 inst_imm_mask = 0xffff;
2126 * The hypercall to get into KVM from within guest context is as
2129 * lis r0, r0, KVM_SC_MAGIC_R0@h
2130 * ori r0, KVM_SC_MAGIC_R0@l
2134 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2135 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2136 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2137 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2140 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2145 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2148 if (!irqchip_in_kernel(kvm))
2151 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2152 irq_event->irq, irq_event->level,
2158 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2159 struct kvm_enable_cap *cap)
2167 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2168 case KVM_CAP_PPC_ENABLE_HCALL: {
2169 unsigned long hcall = cap->args[0];
2172 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2175 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2178 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2180 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2184 case KVM_CAP_PPC_SMT: {
2185 unsigned long mode = cap->args[0];
2186 unsigned long flags = cap->args[1];
2189 if (kvm->arch.kvm_ops->set_smt_mode)
2190 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2194 case KVM_CAP_PPC_NESTED_HV:
2196 if (!is_kvmppc_hv_enabled(kvm) ||
2197 !kvm->arch.kvm_ops->enable_nested)
2199 r = kvm->arch.kvm_ops->enable_nested(kvm);
2210 #ifdef CONFIG_PPC_BOOK3S_64
2212 * These functions check whether the underlying hardware is safe
2213 * against attacks based on observing the effects of speculatively
2214 * executed instructions, and whether it supplies instructions for
2215 * use in workarounds. The information comes from firmware, either
2216 * via the device tree on powernv platforms or from an hcall on
2217 * pseries platforms.
2219 #ifdef CONFIG_PPC_PSERIES
2220 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2222 struct h_cpu_char_result c;
2225 if (!machine_is(pseries))
2228 rc = plpar_get_cpu_characteristics(&c);
2229 if (rc == H_SUCCESS) {
2230 cp->character = c.character;
2231 cp->behaviour = c.behaviour;
2232 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2233 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2234 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2235 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2236 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2237 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2238 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2239 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2240 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2241 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2242 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2243 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2244 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2249 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2255 static inline bool have_fw_feat(struct device_node *fw_features,
2256 const char *state, const char *name)
2258 struct device_node *np;
2261 np = of_get_child_by_name(fw_features, name);
2263 r = of_property_read_bool(np, state);
2269 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2271 struct device_node *np, *fw_features;
2274 memset(cp, 0, sizeof(*cp));
2275 r = pseries_get_cpu_char(cp);
2279 np = of_find_node_by_name(NULL, "ibm,opal");
2281 fw_features = of_get_child_by_name(np, "fw-features");
2285 if (have_fw_feat(fw_features, "enabled",
2286 "inst-spec-barrier-ori31,31,0"))
2287 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2288 if (have_fw_feat(fw_features, "enabled",
2289 "fw-bcctrl-serialized"))
2290 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2291 if (have_fw_feat(fw_features, "enabled",
2292 "inst-l1d-flush-ori30,30,0"))
2293 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2294 if (have_fw_feat(fw_features, "enabled",
2295 "inst-l1d-flush-trig2"))
2296 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2297 if (have_fw_feat(fw_features, "enabled",
2298 "fw-l1d-thread-split"))
2299 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2300 if (have_fw_feat(fw_features, "enabled",
2301 "fw-count-cache-disabled"))
2302 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2303 if (have_fw_feat(fw_features, "enabled",
2304 "fw-count-cache-flush-bcctr2,0,0"))
2305 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2306 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2307 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2308 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2309 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2310 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2311 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2312 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2314 if (have_fw_feat(fw_features, "enabled",
2315 "speculation-policy-favor-security"))
2316 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2317 if (!have_fw_feat(fw_features, "disabled",
2318 "needs-l1d-flush-msr-pr-0-to-1"))
2319 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2320 if (!have_fw_feat(fw_features, "disabled",
2321 "needs-spec-barrier-for-bound-checks"))
2322 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2323 if (have_fw_feat(fw_features, "enabled",
2324 "needs-count-cache-flush-on-context-switch"))
2325 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2326 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2327 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2328 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2329 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2331 of_node_put(fw_features);
2338 long kvm_arch_vm_ioctl(struct file *filp,
2339 unsigned int ioctl, unsigned long arg)
2341 struct kvm *kvm __maybe_unused = filp->private_data;
2342 void __user *argp = (void __user *)arg;
2346 case KVM_PPC_GET_PVINFO: {
2347 struct kvm_ppc_pvinfo pvinfo;
2348 memset(&pvinfo, 0, sizeof(pvinfo));
2349 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2350 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2357 #ifdef CONFIG_SPAPR_TCE_IOMMU
2358 case KVM_CREATE_SPAPR_TCE_64: {
2359 struct kvm_create_spapr_tce_64 create_tce_64;
2362 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2364 if (create_tce_64.flags) {
2368 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2371 case KVM_CREATE_SPAPR_TCE: {
2372 struct kvm_create_spapr_tce create_tce;
2373 struct kvm_create_spapr_tce_64 create_tce_64;
2376 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2379 create_tce_64.liobn = create_tce.liobn;
2380 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2381 create_tce_64.offset = 0;
2382 create_tce_64.size = create_tce.window_size >>
2383 IOMMU_PAGE_SHIFT_4K;
2384 create_tce_64.flags = 0;
2385 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2389 #ifdef CONFIG_PPC_BOOK3S_64
2390 case KVM_PPC_GET_SMMU_INFO: {
2391 struct kvm_ppc_smmu_info info;
2392 struct kvm *kvm = filp->private_data;
2394 memset(&info, 0, sizeof(info));
2395 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2396 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2400 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2401 struct kvm *kvm = filp->private_data;
2403 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2406 case KVM_PPC_CONFIGURE_V3_MMU: {
2407 struct kvm *kvm = filp->private_data;
2408 struct kvm_ppc_mmuv3_cfg cfg;
2411 if (!kvm->arch.kvm_ops->configure_mmu)
2414 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2416 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2419 case KVM_PPC_GET_RMMU_INFO: {
2420 struct kvm *kvm = filp->private_data;
2421 struct kvm_ppc_rmmu_info info;
2424 if (!kvm->arch.kvm_ops->get_rmmu_info)
2426 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2427 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2431 case KVM_PPC_GET_CPU_CHAR: {
2432 struct kvm_ppc_cpu_char cpuchar;
2434 r = kvmppc_get_cpu_char(&cpuchar);
2435 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2439 case KVM_PPC_SVM_OFF: {
2440 struct kvm *kvm = filp->private_data;
2443 if (!kvm->arch.kvm_ops->svm_off)
2446 r = kvm->arch.kvm_ops->svm_off(kvm);
2450 struct kvm *kvm = filp->private_data;
2451 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2453 #else /* CONFIG_PPC_BOOK3S_64 */
2462 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2463 static unsigned long nr_lpids;
2465 long kvmppc_alloc_lpid(void)
2470 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2471 if (lpid >= nr_lpids) {
2472 pr_err("%s: No LPIDs free\n", __func__);
2475 } while (test_and_set_bit(lpid, lpid_inuse));
2479 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2481 void kvmppc_claim_lpid(long lpid)
2483 set_bit(lpid, lpid_inuse);
2485 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2487 void kvmppc_free_lpid(long lpid)
2489 clear_bit(lpid, lpid_inuse);
2491 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2493 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2495 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2496 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2498 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2500 int kvm_arch_init(void *opaque)
2505 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);