1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2007
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <asm/cputable.h>
23 #include <linux/uaccess.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/cputhreads.h>
26 #include <asm/irqflags.h>
27 #include <asm/iommu.h>
28 #include <asm/switch_to.h>
30 #ifdef CONFIG_PPC_PSERIES
31 #include <asm/hvcall.h>
32 #include <asm/plpar_wrappers.h>
37 #include "../mm/mmu_decl.h"
39 #define CREATE_TRACE_POINTS
42 struct kvmppc_ops *kvmppc_hv_ops;
43 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
44 struct kvmppc_ops *kvmppc_pr_ops;
45 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
48 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
53 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
55 return kvm_arch_vcpu_runnable(vcpu);
58 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
63 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
69 * Common checks before entering the guest world. Call with interrupts
74 * == 1 if we're ready to go into guest state
75 * <= 0 if we need to go back to the host with return value
77 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
81 WARN_ON(irqs_disabled());
92 if (signal_pending(current)) {
93 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
94 vcpu->run->exit_reason = KVM_EXIT_INTR;
99 vcpu->mode = IN_GUEST_MODE;
102 * Reading vcpu->requests must happen after setting vcpu->mode,
103 * so we don't miss a request because the requester sees
104 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
105 * before next entering the guest (and thus doesn't IPI).
106 * This also orders the write to mode from any reads
107 * to the page tables done while the VCPU is running.
108 * Please see the comment in kvm_flush_remote_tlbs.
112 if (kvm_request_pending(vcpu)) {
113 /* Make sure we process requests preemptable */
115 trace_kvm_check_requests(vcpu);
116 r = kvmppc_core_check_requests(vcpu);
123 if (kvmppc_core_prepare_to_enter(vcpu)) {
124 /* interrupts got enabled in between, so we
125 are back at square 1 */
129 guest_enter_irqoff();
137 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
139 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
140 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
142 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
145 shared->sprg0 = swab64(shared->sprg0);
146 shared->sprg1 = swab64(shared->sprg1);
147 shared->sprg2 = swab64(shared->sprg2);
148 shared->sprg3 = swab64(shared->sprg3);
149 shared->srr0 = swab64(shared->srr0);
150 shared->srr1 = swab64(shared->srr1);
151 shared->dar = swab64(shared->dar);
152 shared->msr = swab64(shared->msr);
153 shared->dsisr = swab32(shared->dsisr);
154 shared->int_pending = swab32(shared->int_pending);
155 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
156 shared->sr[i] = swab32(shared->sr[i]);
160 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
162 int nr = kvmppc_get_gpr(vcpu, 11);
164 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
165 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
166 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
167 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
168 unsigned long r2 = 0;
170 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
172 param1 &= 0xffffffff;
173 param2 &= 0xffffffff;
174 param3 &= 0xffffffff;
175 param4 &= 0xffffffff;
179 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
181 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
182 /* Book3S can be little endian, find it out here */
183 int shared_big_endian = true;
184 if (vcpu->arch.intr_msr & MSR_LE)
185 shared_big_endian = false;
186 if (shared_big_endian != vcpu->arch.shared_big_endian)
187 kvmppc_swab_shared(vcpu);
188 vcpu->arch.shared_big_endian = shared_big_endian;
191 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
193 * Older versions of the Linux magic page code had
194 * a bug where they would map their trampoline code
195 * NX. If that's the case, remove !PR NX capability.
197 vcpu->arch.disable_kernel_nx = true;
198 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
201 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
202 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
204 #ifdef CONFIG_PPC_64K_PAGES
206 * Make sure our 4k magic page is in the same window of a 64k
207 * page within the guest and within the host's page.
209 if ((vcpu->arch.magic_page_pa & 0xf000) !=
210 ((ulong)vcpu->arch.shared & 0xf000)) {
211 void *old_shared = vcpu->arch.shared;
212 ulong shared = (ulong)vcpu->arch.shared;
216 shared |= vcpu->arch.magic_page_pa & 0xf000;
217 new_shared = (void*)shared;
218 memcpy(new_shared, old_shared, 0x1000);
219 vcpu->arch.shared = new_shared;
223 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
228 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
230 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
231 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
234 /* Second return value is in r4 */
236 case EV_HCALL_TOKEN(EV_IDLE):
238 kvm_vcpu_block(vcpu);
239 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
242 r = EV_UNIMPLEMENTED;
246 kvmppc_set_gpr(vcpu, 4, r2);
250 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
256 /* We have to know what CPU to virtualize */
260 /* PAPR only works with book3s_64 */
261 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
264 /* HV KVM can only do PAPR mode for now */
265 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
268 #ifdef CONFIG_KVM_BOOKE_HV
269 if (!cpu_has_feature(CPU_FTR_EMB_HV))
277 return r ? 0 : -EINVAL;
279 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
283 enum emulation_result er;
286 er = kvmppc_emulate_loadstore(vcpu);
289 /* Future optimization: only reload non-volatiles if they were
290 * actually modified. */
296 case EMULATE_DO_MMIO:
297 run->exit_reason = KVM_EXIT_MMIO;
298 /* We must reload nonvolatiles because "update" load/store
299 * instructions modify register state. */
300 /* Future optimization: only reload non-volatiles if they were
301 * actually modified. */
308 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
309 /* XXX Deliver Program interrupt to guest. */
310 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
321 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
323 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
326 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
327 struct kvmppc_pte pte;
332 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
333 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
336 if ((!r) || (r == -EAGAIN))
339 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
349 /* Magic page override */
350 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
351 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
352 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
353 void *magic = vcpu->arch.shared;
354 magic += pte.eaddr & 0xfff;
355 memcpy(magic, ptr, size);
359 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
360 return EMULATE_DO_MMIO;
364 EXPORT_SYMBOL_GPL(kvmppc_st);
366 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
369 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
370 struct kvmppc_pte pte;
375 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
376 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
379 if ((!rc) || (rc == -EAGAIN))
382 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
392 if (!data && !pte.may_execute)
395 /* Magic page override */
396 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
397 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
398 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
399 void *magic = vcpu->arch.shared;
400 magic += pte.eaddr & 0xfff;
401 memcpy(ptr, magic, size);
405 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
406 return EMULATE_DO_MMIO;
410 EXPORT_SYMBOL_GPL(kvmppc_ld);
412 int kvm_arch_hardware_enable(void)
417 int kvm_arch_hardware_setup(void)
422 int kvm_arch_check_processor_compat(void)
424 return kvmppc_core_check_processor_compat();
427 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
429 struct kvmppc_ops *kvm_ops = NULL;
431 * if we have both HV and PR enabled, default is HV
435 kvm_ops = kvmppc_hv_ops;
437 kvm_ops = kvmppc_pr_ops;
440 } else if (type == KVM_VM_PPC_HV) {
443 kvm_ops = kvmppc_hv_ops;
444 } else if (type == KVM_VM_PPC_PR) {
447 kvm_ops = kvmppc_pr_ops;
451 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
454 kvm->arch.kvm_ops = kvm_ops;
455 return kvmppc_core_init_vm(kvm);
460 void kvm_arch_destroy_vm(struct kvm *kvm)
463 struct kvm_vcpu *vcpu;
465 #ifdef CONFIG_KVM_XICS
467 * We call kick_all_cpus_sync() to ensure that all
468 * CPUs have executed any pending IPIs before we
469 * continue and free VCPUs structures below.
471 if (is_kvmppc_hv_enabled(kvm))
472 kick_all_cpus_sync();
475 kvm_for_each_vcpu(i, vcpu, kvm)
476 kvm_arch_vcpu_free(vcpu);
478 mutex_lock(&kvm->lock);
479 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
480 kvm->vcpus[i] = NULL;
482 atomic_set(&kvm->online_vcpus, 0);
484 kvmppc_core_destroy_vm(kvm);
486 mutex_unlock(&kvm->lock);
488 /* drop the module reference */
489 module_put(kvm->arch.kvm_ops->owner);
492 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
495 /* Assume we're using HV mode when the HV module is loaded */
496 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
500 * Hooray - we know which VM type we're running on. Depend on
501 * that rather than the guess above.
503 hv_enabled = is_kvmppc_hv_enabled(kvm);
508 case KVM_CAP_PPC_BOOKE_SREGS:
509 case KVM_CAP_PPC_BOOKE_WATCHDOG:
510 case KVM_CAP_PPC_EPR:
512 case KVM_CAP_PPC_SEGSTATE:
513 case KVM_CAP_PPC_HIOR:
514 case KVM_CAP_PPC_PAPR:
516 case KVM_CAP_PPC_UNSET_IRQ:
517 case KVM_CAP_PPC_IRQ_LEVEL:
518 case KVM_CAP_ENABLE_CAP:
519 case KVM_CAP_ONE_REG:
520 case KVM_CAP_IOEVENTFD:
521 case KVM_CAP_DEVICE_CTRL:
522 case KVM_CAP_IMMEDIATE_EXIT:
525 case KVM_CAP_PPC_PAIRED_SINGLES:
526 case KVM_CAP_PPC_OSI:
527 case KVM_CAP_PPC_GET_PVINFO:
528 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
531 /* We support this only for PR */
534 #ifdef CONFIG_KVM_MPIC
535 case KVM_CAP_IRQ_MPIC:
540 #ifdef CONFIG_PPC_BOOK3S_64
541 case KVM_CAP_SPAPR_TCE:
542 case KVM_CAP_SPAPR_TCE_64:
545 case KVM_CAP_SPAPR_TCE_VFIO:
546 r = !!cpu_has_feature(CPU_FTR_HVMODE);
548 case KVM_CAP_PPC_RTAS:
549 case KVM_CAP_PPC_FIXUP_HCALL:
550 case KVM_CAP_PPC_ENABLE_HCALL:
551 #ifdef CONFIG_KVM_XICS
552 case KVM_CAP_IRQ_XICS:
554 case KVM_CAP_PPC_GET_CPU_CHAR:
557 #ifdef CONFIG_KVM_XIVE
558 case KVM_CAP_PPC_IRQ_XIVE:
560 * We need XIVE to be enabled on the platform (implies
561 * a POWER9 processor) and the PowerNV platform, as
562 * nested is not yet supported.
564 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
565 kvmppc_xive_native_supported();
569 case KVM_CAP_PPC_ALLOC_HTAB:
572 #endif /* CONFIG_PPC_BOOK3S_64 */
573 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
574 case KVM_CAP_PPC_SMT:
577 if (kvm->arch.emul_smt_mode > 1)
578 r = kvm->arch.emul_smt_mode;
580 r = kvm->arch.smt_mode;
581 } else if (hv_enabled) {
582 if (cpu_has_feature(CPU_FTR_ARCH_300))
585 r = threads_per_subcore;
588 case KVM_CAP_PPC_SMT_POSSIBLE:
591 if (!cpu_has_feature(CPU_FTR_ARCH_300))
592 r = ((threads_per_subcore << 1) - 1);
594 /* P9 can emulate dbells, so allow any mode */
598 case KVM_CAP_PPC_RMA:
601 case KVM_CAP_PPC_HWRNG:
602 r = kvmppc_hwrng_present();
604 case KVM_CAP_PPC_MMU_RADIX:
605 r = !!(hv_enabled && radix_enabled());
607 case KVM_CAP_PPC_MMU_HASH_V3:
608 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
609 cpu_has_feature(CPU_FTR_HVMODE));
611 case KVM_CAP_PPC_NESTED_HV:
612 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
613 !kvmppc_hv_ops->enable_nested(NULL));
616 case KVM_CAP_SYNC_MMU:
617 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
619 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
625 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
626 case KVM_CAP_PPC_HTAB_FD:
630 case KVM_CAP_NR_VCPUS:
632 * Recommending a number of CPUs is somewhat arbitrary; we
633 * return the number of present CPUs for -HV (since a host
634 * will have secondary threads "offline"), and for other KVM
635 * implementations just count online CPUs.
638 r = num_present_cpus();
640 r = num_online_cpus();
642 case KVM_CAP_MAX_VCPUS:
645 case KVM_CAP_MAX_VCPU_ID:
648 #ifdef CONFIG_PPC_BOOK3S_64
649 case KVM_CAP_PPC_GET_SMMU_INFO:
652 case KVM_CAP_SPAPR_MULTITCE:
655 case KVM_CAP_SPAPR_RESIZE_HPT:
659 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
660 case KVM_CAP_PPC_FWNMI:
664 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
665 case KVM_CAP_PPC_HTM:
666 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
667 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
678 long kvm_arch_dev_ioctl(struct file *filp,
679 unsigned int ioctl, unsigned long arg)
684 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
685 struct kvm_memory_slot *dont)
687 kvmppc_core_free_memslot(kvm, free, dont);
690 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
691 unsigned long npages)
693 return kvmppc_core_create_memslot(kvm, slot, npages);
696 int kvm_arch_prepare_memory_region(struct kvm *kvm,
697 struct kvm_memory_slot *memslot,
698 const struct kvm_userspace_memory_region *mem,
699 enum kvm_mr_change change)
701 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
704 void kvm_arch_commit_memory_region(struct kvm *kvm,
705 const struct kvm_userspace_memory_region *mem,
706 const struct kvm_memory_slot *old,
707 const struct kvm_memory_slot *new,
708 enum kvm_mr_change change)
710 kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
713 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
714 struct kvm_memory_slot *slot)
716 kvmppc_core_flush_memslot(kvm, slot);
719 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
721 struct kvm_vcpu *vcpu;
722 vcpu = kvmppc_core_vcpu_create(kvm, id);
724 vcpu->arch.wqp = &vcpu->wq;
725 kvmppc_create_vcpu_debugfs(vcpu, id);
730 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
734 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
736 /* Make sure we're not using the vcpu anymore */
737 hrtimer_cancel(&vcpu->arch.dec_timer);
739 kvmppc_remove_vcpu_debugfs(vcpu);
741 switch (vcpu->arch.irq_type) {
742 case KVMPPC_IRQ_MPIC:
743 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
745 case KVMPPC_IRQ_XICS:
747 kvmppc_xive_cleanup_vcpu(vcpu);
749 kvmppc_xics_free_icp(vcpu);
751 case KVMPPC_IRQ_XIVE:
752 kvmppc_xive_native_cleanup_vcpu(vcpu);
756 kvmppc_core_vcpu_free(vcpu);
759 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
761 kvm_arch_vcpu_free(vcpu);
764 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
766 return kvmppc_core_pending_dec(vcpu);
769 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
771 struct kvm_vcpu *vcpu;
773 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
774 kvmppc_decrementer_func(vcpu);
776 return HRTIMER_NORESTART;
779 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
783 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
784 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
785 vcpu->arch.dec_expires = get_tb();
787 #ifdef CONFIG_KVM_EXIT_TIMING
788 mutex_init(&vcpu->arch.exit_timing_lock);
790 ret = kvmppc_subarch_vcpu_init(vcpu);
794 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
796 kvmppc_mmu_destroy(vcpu);
797 kvmppc_subarch_vcpu_uninit(vcpu);
800 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
804 * vrsave (formerly usprg0) isn't used by Linux, but may
805 * be used by the guest.
807 * On non-booke this is associated with Altivec and
808 * is handled by code in book3s.c.
810 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
812 kvmppc_core_vcpu_load(vcpu, cpu);
815 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
817 kvmppc_core_vcpu_put(vcpu);
819 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
824 * irq_bypass_add_producer and irq_bypass_del_producer are only
825 * useful if the architecture supports PCI passthrough.
826 * irq_bypass_stop and irq_bypass_start are not needed and so
827 * kvm_ops are not defined for them.
829 bool kvm_arch_has_irq_bypass(void)
831 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
832 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
835 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
836 struct irq_bypass_producer *prod)
838 struct kvm_kernel_irqfd *irqfd =
839 container_of(cons, struct kvm_kernel_irqfd, consumer);
840 struct kvm *kvm = irqfd->kvm;
842 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
843 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
848 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
849 struct irq_bypass_producer *prod)
851 struct kvm_kernel_irqfd *irqfd =
852 container_of(cons, struct kvm_kernel_irqfd, consumer);
853 struct kvm *kvm = irqfd->kvm;
855 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
856 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
860 static inline int kvmppc_get_vsr_dword_offset(int index)
864 if ((index != 0) && (index != 1))
876 static inline int kvmppc_get_vsr_word_offset(int index)
880 if ((index > 3) || (index < 0))
891 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
894 union kvmppc_one_reg val;
895 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
896 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
902 val.vval = VCPU_VSX_VR(vcpu, index - 32);
903 val.vsxval[offset] = gpr;
904 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
906 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
910 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
913 union kvmppc_one_reg val;
914 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
917 val.vval = VCPU_VSX_VR(vcpu, index - 32);
920 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
922 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
923 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
927 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
930 union kvmppc_one_reg val;
931 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
934 val.vsx32val[0] = gpr;
935 val.vsx32val[1] = gpr;
936 val.vsx32val[2] = gpr;
937 val.vsx32val[3] = gpr;
938 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
940 val.vsx32val[0] = gpr;
941 val.vsx32val[1] = gpr;
942 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
943 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
947 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
950 union kvmppc_one_reg val;
951 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
952 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
953 int dword_offset, word_offset;
959 val.vval = VCPU_VSX_VR(vcpu, index - 32);
960 val.vsx32val[offset] = gpr32;
961 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
963 dword_offset = offset / 2;
964 word_offset = offset % 2;
965 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
966 val.vsx32val[word_offset] = gpr32;
967 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
970 #endif /* CONFIG_VSX */
972 #ifdef CONFIG_ALTIVEC
973 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
974 int index, int element_size)
977 int elts = sizeof(vector128)/element_size;
979 if ((index < 0) || (index >= elts))
982 if (kvmppc_need_byteswap(vcpu))
983 offset = elts - index - 1;
990 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
993 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
996 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
999 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1002 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1005 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1008 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1011 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1015 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1018 union kvmppc_one_reg val;
1019 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1020 vcpu->arch.mmio_vmx_offset);
1021 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1026 val.vval = VCPU_VSX_VR(vcpu, index);
1027 val.vsxval[offset] = gpr;
1028 VCPU_VSX_VR(vcpu, index) = val.vval;
1031 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1034 union kvmppc_one_reg val;
1035 int offset = kvmppc_get_vmx_word_offset(vcpu,
1036 vcpu->arch.mmio_vmx_offset);
1037 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1042 val.vval = VCPU_VSX_VR(vcpu, index);
1043 val.vsx32val[offset] = gpr32;
1044 VCPU_VSX_VR(vcpu, index) = val.vval;
1047 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1050 union kvmppc_one_reg val;
1051 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1052 vcpu->arch.mmio_vmx_offset);
1053 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1058 val.vval = VCPU_VSX_VR(vcpu, index);
1059 val.vsx16val[offset] = gpr16;
1060 VCPU_VSX_VR(vcpu, index) = val.vval;
1063 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1066 union kvmppc_one_reg val;
1067 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1068 vcpu->arch.mmio_vmx_offset);
1069 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1074 val.vval = VCPU_VSX_VR(vcpu, index);
1075 val.vsx8val[offset] = gpr8;
1076 VCPU_VSX_VR(vcpu, index) = val.vval;
1078 #endif /* CONFIG_ALTIVEC */
1080 #ifdef CONFIG_PPC_FPU
1081 static inline u64 sp_to_dp(u32 fprs)
1087 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1093 static inline u32 dp_to_sp(u64 fprd)
1099 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1106 #define sp_to_dp(x) (x)
1107 #define dp_to_sp(x) (x)
1108 #endif /* CONFIG_PPC_FPU */
1110 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1111 struct kvm_run *run)
1113 u64 uninitialized_var(gpr);
1115 if (run->mmio.len > sizeof(gpr)) {
1116 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1120 if (!vcpu->arch.mmio_host_swabbed) {
1121 switch (run->mmio.len) {
1122 case 8: gpr = *(u64 *)run->mmio.data; break;
1123 case 4: gpr = *(u32 *)run->mmio.data; break;
1124 case 2: gpr = *(u16 *)run->mmio.data; break;
1125 case 1: gpr = *(u8 *)run->mmio.data; break;
1128 switch (run->mmio.len) {
1129 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1130 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1131 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1132 case 1: gpr = *(u8 *)run->mmio.data; break;
1136 /* conversion between single and double precision */
1137 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1138 gpr = sp_to_dp(gpr);
1140 if (vcpu->arch.mmio_sign_extend) {
1141 switch (run->mmio.len) {
1144 gpr = (s64)(s32)gpr;
1148 gpr = (s64)(s16)gpr;
1156 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1157 case KVM_MMIO_REG_GPR:
1158 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1160 case KVM_MMIO_REG_FPR:
1161 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1162 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1164 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1166 #ifdef CONFIG_PPC_BOOK3S
1167 case KVM_MMIO_REG_QPR:
1168 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1170 case KVM_MMIO_REG_FQPR:
1171 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1172 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1176 case KVM_MMIO_REG_VSX:
1177 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1178 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1180 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1181 kvmppc_set_vsr_dword(vcpu, gpr);
1182 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1183 kvmppc_set_vsr_word(vcpu, gpr);
1184 else if (vcpu->arch.mmio_copy_type ==
1185 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1186 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1187 else if (vcpu->arch.mmio_copy_type ==
1188 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1189 kvmppc_set_vsr_word_dump(vcpu, gpr);
1192 #ifdef CONFIG_ALTIVEC
1193 case KVM_MMIO_REG_VMX:
1194 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1195 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1197 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1198 kvmppc_set_vmx_dword(vcpu, gpr);
1199 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1200 kvmppc_set_vmx_word(vcpu, gpr);
1201 else if (vcpu->arch.mmio_copy_type ==
1202 KVMPPC_VMX_COPY_HWORD)
1203 kvmppc_set_vmx_hword(vcpu, gpr);
1204 else if (vcpu->arch.mmio_copy_type ==
1205 KVMPPC_VMX_COPY_BYTE)
1206 kvmppc_set_vmx_byte(vcpu, gpr);
1209 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1210 case KVM_MMIO_REG_NESTED_GPR:
1211 if (kvmppc_need_byteswap(vcpu))
1213 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1222 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1223 unsigned int rt, unsigned int bytes,
1224 int is_default_endian, int sign_extend)
1229 /* Pity C doesn't have a logical XOR operator */
1230 if (kvmppc_need_byteswap(vcpu)) {
1231 host_swabbed = is_default_endian;
1233 host_swabbed = !is_default_endian;
1236 if (bytes > sizeof(run->mmio.data)) {
1237 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1241 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1242 run->mmio.len = bytes;
1243 run->mmio.is_write = 0;
1245 vcpu->arch.io_gpr = rt;
1246 vcpu->arch.mmio_host_swabbed = host_swabbed;
1247 vcpu->mmio_needed = 1;
1248 vcpu->mmio_is_write = 0;
1249 vcpu->arch.mmio_sign_extend = sign_extend;
1251 idx = srcu_read_lock(&vcpu->kvm->srcu);
1253 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1254 bytes, &run->mmio.data);
1256 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1259 kvmppc_complete_mmio_load(vcpu, run);
1260 vcpu->mmio_needed = 0;
1261 return EMULATE_DONE;
1264 return EMULATE_DO_MMIO;
1267 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1268 unsigned int rt, unsigned int bytes,
1269 int is_default_endian)
1271 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1273 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1275 /* Same as above, but sign extends */
1276 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1277 unsigned int rt, unsigned int bytes,
1278 int is_default_endian)
1280 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1284 int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1285 unsigned int rt, unsigned int bytes,
1286 int is_default_endian, int mmio_sign_extend)
1288 enum emulation_result emulated = EMULATE_DONE;
1290 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1291 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1292 return EMULATE_FAIL;
1294 while (vcpu->arch.mmio_vsx_copy_nums) {
1295 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1296 is_default_endian, mmio_sign_extend);
1298 if (emulated != EMULATE_DONE)
1301 vcpu->arch.paddr_accessed += run->mmio.len;
1303 vcpu->arch.mmio_vsx_copy_nums--;
1304 vcpu->arch.mmio_vsx_offset++;
1308 #endif /* CONFIG_VSX */
1310 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1311 u64 val, unsigned int bytes, int is_default_endian)
1313 void *data = run->mmio.data;
1317 /* Pity C doesn't have a logical XOR operator */
1318 if (kvmppc_need_byteswap(vcpu)) {
1319 host_swabbed = is_default_endian;
1321 host_swabbed = !is_default_endian;
1324 if (bytes > sizeof(run->mmio.data)) {
1325 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1329 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1330 run->mmio.len = bytes;
1331 run->mmio.is_write = 1;
1332 vcpu->mmio_needed = 1;
1333 vcpu->mmio_is_write = 1;
1335 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1336 val = dp_to_sp(val);
1338 /* Store the value at the lowest bytes in 'data'. */
1339 if (!host_swabbed) {
1341 case 8: *(u64 *)data = val; break;
1342 case 4: *(u32 *)data = val; break;
1343 case 2: *(u16 *)data = val; break;
1344 case 1: *(u8 *)data = val; break;
1348 case 8: *(u64 *)data = swab64(val); break;
1349 case 4: *(u32 *)data = swab32(val); break;
1350 case 2: *(u16 *)data = swab16(val); break;
1351 case 1: *(u8 *)data = val; break;
1355 idx = srcu_read_lock(&vcpu->kvm->srcu);
1357 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1358 bytes, &run->mmio.data);
1360 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1363 vcpu->mmio_needed = 0;
1364 return EMULATE_DONE;
1367 return EMULATE_DO_MMIO;
1369 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1372 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1374 u32 dword_offset, word_offset;
1375 union kvmppc_one_reg reg;
1377 int copy_type = vcpu->arch.mmio_copy_type;
1380 switch (copy_type) {
1381 case KVMPPC_VSX_COPY_DWORD:
1383 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1385 if (vsx_offset == -1) {
1391 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1393 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1394 *val = reg.vsxval[vsx_offset];
1398 case KVMPPC_VSX_COPY_WORD:
1400 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1402 if (vsx_offset == -1) {
1408 dword_offset = vsx_offset / 2;
1409 word_offset = vsx_offset % 2;
1410 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1411 *val = reg.vsx32val[word_offset];
1413 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1414 *val = reg.vsx32val[vsx_offset];
1426 int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1427 int rs, unsigned int bytes, int is_default_endian)
1430 enum emulation_result emulated = EMULATE_DONE;
1432 vcpu->arch.io_gpr = rs;
1434 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1435 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1436 return EMULATE_FAIL;
1438 while (vcpu->arch.mmio_vsx_copy_nums) {
1439 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1440 return EMULATE_FAIL;
1442 emulated = kvmppc_handle_store(run, vcpu,
1443 val, bytes, is_default_endian);
1445 if (emulated != EMULATE_DONE)
1448 vcpu->arch.paddr_accessed += run->mmio.len;
1450 vcpu->arch.mmio_vsx_copy_nums--;
1451 vcpu->arch.mmio_vsx_offset++;
1457 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1458 struct kvm_run *run)
1460 enum emulation_result emulated = EMULATE_FAIL;
1463 vcpu->arch.paddr_accessed += run->mmio.len;
1465 if (!vcpu->mmio_is_write) {
1466 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1467 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1469 emulated = kvmppc_handle_vsx_store(run, vcpu,
1470 vcpu->arch.io_gpr, run->mmio.len, 1);
1474 case EMULATE_DO_MMIO:
1475 run->exit_reason = KVM_EXIT_MMIO;
1479 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1480 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1481 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1490 #endif /* CONFIG_VSX */
1492 #ifdef CONFIG_ALTIVEC
1493 int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1494 unsigned int rt, unsigned int bytes, int is_default_endian)
1496 enum emulation_result emulated = EMULATE_DONE;
1498 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1499 return EMULATE_FAIL;
1501 while (vcpu->arch.mmio_vmx_copy_nums) {
1502 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1503 is_default_endian, 0);
1505 if (emulated != EMULATE_DONE)
1508 vcpu->arch.paddr_accessed += run->mmio.len;
1509 vcpu->arch.mmio_vmx_copy_nums--;
1510 vcpu->arch.mmio_vmx_offset++;
1516 int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1518 union kvmppc_one_reg reg;
1523 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1525 if (vmx_offset == -1)
1528 reg.vval = VCPU_VSX_VR(vcpu, index);
1529 *val = reg.vsxval[vmx_offset];
1534 int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1536 union kvmppc_one_reg reg;
1541 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1543 if (vmx_offset == -1)
1546 reg.vval = VCPU_VSX_VR(vcpu, index);
1547 *val = reg.vsx32val[vmx_offset];
1552 int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1554 union kvmppc_one_reg reg;
1559 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1561 if (vmx_offset == -1)
1564 reg.vval = VCPU_VSX_VR(vcpu, index);
1565 *val = reg.vsx16val[vmx_offset];
1570 int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1572 union kvmppc_one_reg reg;
1577 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1579 if (vmx_offset == -1)
1582 reg.vval = VCPU_VSX_VR(vcpu, index);
1583 *val = reg.vsx8val[vmx_offset];
1588 int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1589 unsigned int rs, unsigned int bytes, int is_default_endian)
1592 unsigned int index = rs & KVM_MMIO_REG_MASK;
1593 enum emulation_result emulated = EMULATE_DONE;
1595 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1596 return EMULATE_FAIL;
1598 vcpu->arch.io_gpr = rs;
1600 while (vcpu->arch.mmio_vmx_copy_nums) {
1601 switch (vcpu->arch.mmio_copy_type) {
1602 case KVMPPC_VMX_COPY_DWORD:
1603 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1604 return EMULATE_FAIL;
1607 case KVMPPC_VMX_COPY_WORD:
1608 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1609 return EMULATE_FAIL;
1611 case KVMPPC_VMX_COPY_HWORD:
1612 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1613 return EMULATE_FAIL;
1615 case KVMPPC_VMX_COPY_BYTE:
1616 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1617 return EMULATE_FAIL;
1620 return EMULATE_FAIL;
1623 emulated = kvmppc_handle_store(run, vcpu, val, bytes,
1625 if (emulated != EMULATE_DONE)
1628 vcpu->arch.paddr_accessed += run->mmio.len;
1629 vcpu->arch.mmio_vmx_copy_nums--;
1630 vcpu->arch.mmio_vmx_offset++;
1636 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1637 struct kvm_run *run)
1639 enum emulation_result emulated = EMULATE_FAIL;
1642 vcpu->arch.paddr_accessed += run->mmio.len;
1644 if (!vcpu->mmio_is_write) {
1645 emulated = kvmppc_handle_vmx_load(run, vcpu,
1646 vcpu->arch.io_gpr, run->mmio.len, 1);
1648 emulated = kvmppc_handle_vmx_store(run, vcpu,
1649 vcpu->arch.io_gpr, run->mmio.len, 1);
1653 case EMULATE_DO_MMIO:
1654 run->exit_reason = KVM_EXIT_MMIO;
1658 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1659 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1660 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1669 #endif /* CONFIG_ALTIVEC */
1671 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1674 union kvmppc_one_reg val;
1677 size = one_reg_size(reg->id);
1678 if (size > sizeof(val))
1681 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1685 #ifdef CONFIG_ALTIVEC
1686 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1687 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1691 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1693 case KVM_REG_PPC_VSCR:
1694 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1698 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1700 case KVM_REG_PPC_VRSAVE:
1701 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1703 #endif /* CONFIG_ALTIVEC */
1713 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1719 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1722 union kvmppc_one_reg val;
1725 size = one_reg_size(reg->id);
1726 if (size > sizeof(val))
1729 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1732 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1736 #ifdef CONFIG_ALTIVEC
1737 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1738 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1742 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1744 case KVM_REG_PPC_VSCR:
1745 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1749 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1751 case KVM_REG_PPC_VRSAVE:
1752 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1756 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1758 #endif /* CONFIG_ALTIVEC */
1768 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1774 if (vcpu->mmio_needed) {
1775 vcpu->mmio_needed = 0;
1776 if (!vcpu->mmio_is_write)
1777 kvmppc_complete_mmio_load(vcpu, run);
1779 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1780 vcpu->arch.mmio_vsx_copy_nums--;
1781 vcpu->arch.mmio_vsx_offset++;
1784 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1785 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1786 if (r == RESUME_HOST) {
1787 vcpu->mmio_needed = 1;
1792 #ifdef CONFIG_ALTIVEC
1793 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1794 vcpu->arch.mmio_vmx_copy_nums--;
1795 vcpu->arch.mmio_vmx_offset++;
1798 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1799 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1800 if (r == RESUME_HOST) {
1801 vcpu->mmio_needed = 1;
1806 } else if (vcpu->arch.osi_needed) {
1807 u64 *gprs = run->osi.gprs;
1810 for (i = 0; i < 32; i++)
1811 kvmppc_set_gpr(vcpu, i, gprs[i]);
1812 vcpu->arch.osi_needed = 0;
1813 } else if (vcpu->arch.hcall_needed) {
1816 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1817 for (i = 0; i < 9; ++i)
1818 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1819 vcpu->arch.hcall_needed = 0;
1821 } else if (vcpu->arch.epr_needed) {
1822 kvmppc_set_epr(vcpu, run->epr.epr);
1823 vcpu->arch.epr_needed = 0;
1827 kvm_sigset_activate(vcpu);
1829 if (run->immediate_exit)
1832 r = kvmppc_vcpu_run(run, vcpu);
1834 kvm_sigset_deactivate(vcpu);
1836 #ifdef CONFIG_ALTIVEC
1843 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1845 if (irq->irq == KVM_INTERRUPT_UNSET) {
1846 kvmppc_core_dequeue_external(vcpu);
1850 kvmppc_core_queue_external(vcpu, irq);
1852 kvm_vcpu_kick(vcpu);
1857 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1858 struct kvm_enable_cap *cap)
1866 case KVM_CAP_PPC_OSI:
1868 vcpu->arch.osi_enabled = true;
1870 case KVM_CAP_PPC_PAPR:
1872 vcpu->arch.papr_enabled = true;
1874 case KVM_CAP_PPC_EPR:
1877 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1879 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1882 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1884 vcpu->arch.watchdog_enabled = true;
1887 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1888 case KVM_CAP_SW_TLB: {
1889 struct kvm_config_tlb cfg;
1890 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1893 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1896 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1900 #ifdef CONFIG_KVM_MPIC
1901 case KVM_CAP_IRQ_MPIC: {
1903 struct kvm_device *dev;
1906 f = fdget(cap->args[0]);
1911 dev = kvm_device_from_filp(f.file);
1913 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1919 #ifdef CONFIG_KVM_XICS
1920 case KVM_CAP_IRQ_XICS: {
1922 struct kvm_device *dev;
1925 f = fdget(cap->args[0]);
1930 dev = kvm_device_from_filp(f.file);
1933 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1935 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1941 #endif /* CONFIG_KVM_XICS */
1942 #ifdef CONFIG_KVM_XIVE
1943 case KVM_CAP_PPC_IRQ_XIVE: {
1945 struct kvm_device *dev;
1948 f = fdget(cap->args[0]);
1953 if (!xive_enabled())
1957 dev = kvm_device_from_filp(f.file);
1959 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1965 #endif /* CONFIG_KVM_XIVE */
1966 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1967 case KVM_CAP_PPC_FWNMI:
1969 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1972 vcpu->kvm->arch.fwnmi_enabled = true;
1974 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1981 r = kvmppc_sanity_check(vcpu);
1986 bool kvm_arch_intc_initialized(struct kvm *kvm)
1988 #ifdef CONFIG_KVM_MPIC
1992 #ifdef CONFIG_KVM_XICS
1993 if (kvm->arch.xics || kvm->arch.xive)
1999 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2000 struct kvm_mp_state *mp_state)
2005 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2006 struct kvm_mp_state *mp_state)
2011 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2012 unsigned int ioctl, unsigned long arg)
2014 struct kvm_vcpu *vcpu = filp->private_data;
2015 void __user *argp = (void __user *)arg;
2017 if (ioctl == KVM_INTERRUPT) {
2018 struct kvm_interrupt irq;
2019 if (copy_from_user(&irq, argp, sizeof(irq)))
2021 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2023 return -ENOIOCTLCMD;
2026 long kvm_arch_vcpu_ioctl(struct file *filp,
2027 unsigned int ioctl, unsigned long arg)
2029 struct kvm_vcpu *vcpu = filp->private_data;
2030 void __user *argp = (void __user *)arg;
2034 case KVM_ENABLE_CAP:
2036 struct kvm_enable_cap cap;
2039 if (copy_from_user(&cap, argp, sizeof(cap)))
2041 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2046 case KVM_SET_ONE_REG:
2047 case KVM_GET_ONE_REG:
2049 struct kvm_one_reg reg;
2051 if (copy_from_user(®, argp, sizeof(reg)))
2053 if (ioctl == KVM_SET_ONE_REG)
2054 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2056 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2060 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2061 case KVM_DIRTY_TLB: {
2062 struct kvm_dirty_tlb dirty;
2065 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2067 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2080 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2082 return VM_FAULT_SIGBUS;
2085 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2087 u32 inst_nop = 0x60000000;
2088 #ifdef CONFIG_KVM_BOOKE_HV
2089 u32 inst_sc1 = 0x44000022;
2090 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2091 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2092 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2093 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2095 u32 inst_lis = 0x3c000000;
2096 u32 inst_ori = 0x60000000;
2097 u32 inst_sc = 0x44000002;
2098 u32 inst_imm_mask = 0xffff;
2101 * The hypercall to get into KVM from within guest context is as
2104 * lis r0, r0, KVM_SC_MAGIC_R0@h
2105 * ori r0, KVM_SC_MAGIC_R0@l
2109 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2110 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2111 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2112 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2115 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2120 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2123 if (!irqchip_in_kernel(kvm))
2126 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2127 irq_event->irq, irq_event->level,
2133 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2134 struct kvm_enable_cap *cap)
2142 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2143 case KVM_CAP_PPC_ENABLE_HCALL: {
2144 unsigned long hcall = cap->args[0];
2147 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2150 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2153 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2155 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2159 case KVM_CAP_PPC_SMT: {
2160 unsigned long mode = cap->args[0];
2161 unsigned long flags = cap->args[1];
2164 if (kvm->arch.kvm_ops->set_smt_mode)
2165 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2169 case KVM_CAP_PPC_NESTED_HV:
2171 if (!is_kvmppc_hv_enabled(kvm) ||
2172 !kvm->arch.kvm_ops->enable_nested)
2174 r = kvm->arch.kvm_ops->enable_nested(kvm);
2185 #ifdef CONFIG_PPC_BOOK3S_64
2187 * These functions check whether the underlying hardware is safe
2188 * against attacks based on observing the effects of speculatively
2189 * executed instructions, and whether it supplies instructions for
2190 * use in workarounds. The information comes from firmware, either
2191 * via the device tree on powernv platforms or from an hcall on
2192 * pseries platforms.
2194 #ifdef CONFIG_PPC_PSERIES
2195 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2197 struct h_cpu_char_result c;
2200 if (!machine_is(pseries))
2203 rc = plpar_get_cpu_characteristics(&c);
2204 if (rc == H_SUCCESS) {
2205 cp->character = c.character;
2206 cp->behaviour = c.behaviour;
2207 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2208 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2209 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2210 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2211 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2212 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2213 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2214 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2215 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2216 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2217 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2218 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2219 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2224 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2230 static inline bool have_fw_feat(struct device_node *fw_features,
2231 const char *state, const char *name)
2233 struct device_node *np;
2236 np = of_get_child_by_name(fw_features, name);
2238 r = of_property_read_bool(np, state);
2244 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2246 struct device_node *np, *fw_features;
2249 memset(cp, 0, sizeof(*cp));
2250 r = pseries_get_cpu_char(cp);
2254 np = of_find_node_by_name(NULL, "ibm,opal");
2256 fw_features = of_get_child_by_name(np, "fw-features");
2260 if (have_fw_feat(fw_features, "enabled",
2261 "inst-spec-barrier-ori31,31,0"))
2262 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2263 if (have_fw_feat(fw_features, "enabled",
2264 "fw-bcctrl-serialized"))
2265 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2266 if (have_fw_feat(fw_features, "enabled",
2267 "inst-l1d-flush-ori30,30,0"))
2268 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2269 if (have_fw_feat(fw_features, "enabled",
2270 "inst-l1d-flush-trig2"))
2271 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2272 if (have_fw_feat(fw_features, "enabled",
2273 "fw-l1d-thread-split"))
2274 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2275 if (have_fw_feat(fw_features, "enabled",
2276 "fw-count-cache-disabled"))
2277 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2278 if (have_fw_feat(fw_features, "enabled",
2279 "fw-count-cache-flush-bcctr2,0,0"))
2280 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2281 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2282 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2283 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2284 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2285 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2286 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2287 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2289 if (have_fw_feat(fw_features, "enabled",
2290 "speculation-policy-favor-security"))
2291 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2292 if (!have_fw_feat(fw_features, "disabled",
2293 "needs-l1d-flush-msr-pr-0-to-1"))
2294 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2295 if (!have_fw_feat(fw_features, "disabled",
2296 "needs-spec-barrier-for-bound-checks"))
2297 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2298 if (have_fw_feat(fw_features, "enabled",
2299 "needs-count-cache-flush-on-context-switch"))
2300 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2301 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2302 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2303 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2304 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2306 of_node_put(fw_features);
2313 long kvm_arch_vm_ioctl(struct file *filp,
2314 unsigned int ioctl, unsigned long arg)
2316 struct kvm *kvm __maybe_unused = filp->private_data;
2317 void __user *argp = (void __user *)arg;
2321 case KVM_PPC_GET_PVINFO: {
2322 struct kvm_ppc_pvinfo pvinfo;
2323 memset(&pvinfo, 0, sizeof(pvinfo));
2324 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2325 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2332 #ifdef CONFIG_SPAPR_TCE_IOMMU
2333 case KVM_CREATE_SPAPR_TCE_64: {
2334 struct kvm_create_spapr_tce_64 create_tce_64;
2337 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2339 if (create_tce_64.flags) {
2343 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2346 case KVM_CREATE_SPAPR_TCE: {
2347 struct kvm_create_spapr_tce create_tce;
2348 struct kvm_create_spapr_tce_64 create_tce_64;
2351 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2354 create_tce_64.liobn = create_tce.liobn;
2355 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2356 create_tce_64.offset = 0;
2357 create_tce_64.size = create_tce.window_size >>
2358 IOMMU_PAGE_SHIFT_4K;
2359 create_tce_64.flags = 0;
2360 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2364 #ifdef CONFIG_PPC_BOOK3S_64
2365 case KVM_PPC_GET_SMMU_INFO: {
2366 struct kvm_ppc_smmu_info info;
2367 struct kvm *kvm = filp->private_data;
2369 memset(&info, 0, sizeof(info));
2370 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2371 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2375 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2376 struct kvm *kvm = filp->private_data;
2378 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2381 case KVM_PPC_CONFIGURE_V3_MMU: {
2382 struct kvm *kvm = filp->private_data;
2383 struct kvm_ppc_mmuv3_cfg cfg;
2386 if (!kvm->arch.kvm_ops->configure_mmu)
2389 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2391 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2394 case KVM_PPC_GET_RMMU_INFO: {
2395 struct kvm *kvm = filp->private_data;
2396 struct kvm_ppc_rmmu_info info;
2399 if (!kvm->arch.kvm_ops->get_rmmu_info)
2401 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2402 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2406 case KVM_PPC_GET_CPU_CHAR: {
2407 struct kvm_ppc_cpu_char cpuchar;
2409 r = kvmppc_get_cpu_char(&cpuchar);
2410 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2415 struct kvm *kvm = filp->private_data;
2416 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2418 #else /* CONFIG_PPC_BOOK3S_64 */
2427 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2428 static unsigned long nr_lpids;
2430 long kvmppc_alloc_lpid(void)
2435 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2436 if (lpid >= nr_lpids) {
2437 pr_err("%s: No LPIDs free\n", __func__);
2440 } while (test_and_set_bit(lpid, lpid_inuse));
2444 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2446 void kvmppc_claim_lpid(long lpid)
2448 set_bit(lpid, lpid_inuse);
2450 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2452 void kvmppc_free_lpid(long lpid)
2454 clear_bit(lpid, lpid_inuse);
2456 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2458 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2460 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2461 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2463 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2465 int kvm_arch_init(void *opaque)
2470 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);