2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <linux/uaccess.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
37 #include <asm/firmware.h>
38 #include <asm/setup.h>
39 #include <linux/gfp.h>
40 #include <linux/sched.h>
41 #include <linux/vmalloc.h>
42 #include <linux/highmem.h>
43 #include <linux/module.h>
44 #include <linux/miscdevice.h>
45 #include <asm/asm-prototypes.h>
50 #define CREATE_TRACE_POINTS
53 /* #define EXIT_DEBUG */
54 /* #define DEBUG_EXT */
56 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
58 #ifdef CONFIG_PPC_BOOK3S_64
59 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
62 /* Some compatibility defines */
63 #ifdef CONFIG_PPC_BOOK3S_32
64 #define MSR_USER32 MSR_USER
65 #define MSR_USER64 MSR_USER
66 #define HW_PAGE_SIZE PAGE_SIZE
67 #define HPTE_R_M _PAGE_COHERENT
70 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
72 ulong msr = kvmppc_get_msr(vcpu);
73 return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
76 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
78 ulong msr = kvmppc_get_msr(vcpu);
79 ulong pc = kvmppc_get_pc(vcpu);
81 /* We are in DR only split real mode */
82 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
85 /* We have not fixed up the guest already */
86 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
89 /* The code is in fixupable address space */
90 if (pc & SPLIT_HACK_MASK)
93 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
94 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
97 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
99 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
101 #ifdef CONFIG_PPC_BOOK3S_64
102 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
103 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
104 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
109 /* Disable AIL if supported */
110 if (cpu_has_feature(CPU_FTR_HVMODE) &&
111 cpu_has_feature(CPU_FTR_ARCH_207S))
112 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
114 vcpu->cpu = smp_processor_id();
115 #ifdef CONFIG_PPC_BOOK3S_32
116 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
119 if (kvmppc_is_split_real(vcpu))
120 kvmppc_fixup_split_real(vcpu);
122 kvmppc_restore_tm_pr(vcpu);
125 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
127 #ifdef CONFIG_PPC_BOOK3S_64
128 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
130 kvmppc_copy_from_svcpu(vcpu);
132 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
133 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
137 if (kvmppc_is_split_real(vcpu))
138 kvmppc_unfixup_split_real(vcpu);
140 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
141 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
142 kvmppc_save_tm_pr(vcpu);
144 /* Enable AIL if supported */
145 if (cpu_has_feature(CPU_FTR_HVMODE) &&
146 cpu_has_feature(CPU_FTR_ARCH_207S))
147 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
152 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
153 void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
155 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
157 svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
158 svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
159 svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
160 svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
161 svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
162 svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
163 svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
164 svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
165 svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
166 svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
167 svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
168 svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
169 svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
170 svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
171 svcpu->cr = vcpu->arch.cr;
172 svcpu->xer = vcpu->arch.regs.xer;
173 svcpu->ctr = vcpu->arch.regs.ctr;
174 svcpu->lr = vcpu->arch.regs.link;
175 svcpu->pc = vcpu->arch.regs.nip;
176 #ifdef CONFIG_PPC_BOOK3S_64
177 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
180 * Now also save the current time base value. We use this
181 * to find the guest purr and spurr value.
183 vcpu->arch.entry_tb = get_tb();
184 vcpu->arch.entry_vtb = get_vtb();
185 if (cpu_has_feature(CPU_FTR_ARCH_207S))
186 vcpu->arch.entry_ic = mfspr(SPRN_IC);
187 svcpu->in_use = true;
192 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
194 ulong guest_msr = kvmppc_get_msr(vcpu);
195 ulong smsr = guest_msr;
197 /* Guest MSR values */
198 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
199 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE |
200 MSR_TM | MSR_TS_MASK;
202 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
204 /* Process MSR values */
205 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
206 /* External providers the guest reserved */
207 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
208 /* 64-bit Process MSR values */
209 #ifdef CONFIG_PPC_BOOK3S_64
210 smsr |= MSR_ISF | MSR_HV;
212 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
214 * in guest privileged state, we want to fail all TM transactions.
215 * So disable MSR TM bit so that all tbegin. will be able to be
218 if (!(guest_msr & MSR_PR))
221 vcpu->arch.shadow_msr = smsr;
224 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
225 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
227 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
228 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
233 * Maybe we were already preempted and synced the svcpu from
234 * our preempt notifiers. Don't bother touching this svcpu then.
239 vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
240 vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
241 vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
242 vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
243 vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
244 vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
245 vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
246 vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
247 vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
248 vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
249 vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
250 vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
251 vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
252 vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
253 vcpu->arch.cr = svcpu->cr;
254 vcpu->arch.regs.xer = svcpu->xer;
255 vcpu->arch.regs.ctr = svcpu->ctr;
256 vcpu->arch.regs.link = svcpu->lr;
257 vcpu->arch.regs.nip = svcpu->pc;
258 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
259 vcpu->arch.fault_dar = svcpu->fault_dar;
260 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
261 vcpu->arch.last_inst = svcpu->last_inst;
262 #ifdef CONFIG_PPC_BOOK3S_64
263 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
266 * Update purr and spurr using time base on exit.
268 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
269 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
270 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
271 if (cpu_has_feature(CPU_FTR_ARCH_207S))
272 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
274 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
276 * Unlike other MSR bits, MSR[TS]bits can be changed at guest without
278 * modified by unprivileged instructions like "tbegin"/"tend"/
279 * "tresume"/"tsuspend" in PR KVM guest.
281 * It is necessary to sync here to calculate a correct shadow_msr.
283 * privileged guest's tbegin will be failed at present. So we
284 * only take care of problem state guest.
286 old_msr = kvmppc_get_msr(vcpu);
287 if (unlikely((old_msr & MSR_PR) &&
288 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
289 (old_msr & (MSR_TS_MASK)))) {
290 old_msr &= ~(MSR_TS_MASK);
291 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
292 kvmppc_set_msr_fast(vcpu, old_msr);
293 kvmppc_recalc_shadow_msr(vcpu);
297 svcpu->in_use = false;
303 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
304 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
307 vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
308 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
309 vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
313 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
316 mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
317 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
318 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
322 /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
325 static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
328 ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
329 (MSR_FP | MSR_VEC | MSR_VSX);
334 if (ext_diff == MSR_FP)
335 exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL;
336 else if (ext_diff == MSR_VEC)
337 exit_nr = BOOK3S_INTERRUPT_ALTIVEC;
339 exit_nr = BOOK3S_INTERRUPT_VSX;
341 kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
344 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
346 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
347 kvmppc_save_tm_sprs(vcpu);
351 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
352 kvmppc_giveup_ext(vcpu, MSR_VSX);
355 _kvmppc_save_tm_pr(vcpu, mfmsr());
359 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
361 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
362 kvmppc_restore_tm_sprs(vcpu);
363 if (kvmppc_get_msr(vcpu) & MSR_TM) {
364 kvmppc_handle_lost_math_exts(vcpu);
365 if (vcpu->arch.fscr & FSCR_TAR)
366 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
372 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
375 if (kvmppc_get_msr(vcpu) & MSR_TM) {
376 kvmppc_handle_lost_math_exts(vcpu);
377 if (vcpu->arch.fscr & FSCR_TAR)
378 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
383 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
385 int r = 1; /* Indicate we want to get back into the guest */
387 /* We misuse TLB_FLUSH to indicate that we want to clear
388 all shadow cache entries */
389 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
390 kvmppc_mmu_pte_flush(vcpu, 0, 0);
395 /************* MMU Notifiers *************/
396 static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
400 struct kvm_vcpu *vcpu;
401 struct kvm_memslots *slots;
402 struct kvm_memory_slot *memslot;
404 slots = kvm_memslots(kvm);
405 kvm_for_each_memslot(memslot, slots) {
406 unsigned long hva_start, hva_end;
409 hva_start = max(start, memslot->userspace_addr);
410 hva_end = min(end, memslot->userspace_addr +
411 (memslot->npages << PAGE_SHIFT));
412 if (hva_start >= hva_end)
415 * {gfn(page) | page intersects with [hva_start, hva_end)} =
416 * {gfn, gfn+1, ..., gfn_end-1}.
418 gfn = hva_to_gfn_memslot(hva_start, memslot);
419 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
420 kvm_for_each_vcpu(i, vcpu, kvm)
421 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
422 gfn_end << PAGE_SHIFT);
426 static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
429 do_kvm_unmap_hva(kvm, start, end);
434 static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
437 /* XXX could be more clever ;) */
441 static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
443 /* XXX could be more clever ;) */
447 static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
449 /* The page will get remapped properly on its next fault */
450 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
453 /*****************************************/
455 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
459 /* For PAPR guest, make sure MSR reflects guest mode */
460 if (vcpu->arch.papr_enabled)
461 msr = (msr & ~MSR_HV) | MSR_ME;
464 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
467 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
468 /* We should never target guest MSR to TS=10 && PR=0,
469 * since we always fail transaction for guest privilege
472 if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr))
473 kvmppc_emulate_tabort(vcpu,
474 TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT);
477 old_msr = kvmppc_get_msr(vcpu);
478 msr &= to_book3s(vcpu)->msr_mask;
479 kvmppc_set_msr_fast(vcpu, msr);
480 kvmppc_recalc_shadow_msr(vcpu);
483 if (!vcpu->arch.pending_exceptions) {
484 kvm_vcpu_block(vcpu);
485 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
486 vcpu->stat.halt_wakeup++;
488 /* Unset POW bit after we woke up */
490 kvmppc_set_msr_fast(vcpu, msr);
494 if (kvmppc_is_split_real(vcpu))
495 kvmppc_fixup_split_real(vcpu);
497 kvmppc_unfixup_split_real(vcpu);
499 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
500 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
501 kvmppc_mmu_flush_segments(vcpu);
502 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
504 /* Preload magic page segment when in kernel mode */
505 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
506 struct kvm_vcpu_arch *a = &vcpu->arch;
509 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
511 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
516 * When switching from 32 to 64-bit, we may have a stale 32-bit
517 * magic page around, we need to flush it. Typically 32-bit magic
518 * page will be instanciated when calling into RTAS. Note: We
519 * assume that such transition only happens while in kernel mode,
520 * ie, we never transition from user 32-bit to kernel 64-bit with
521 * a 32-bit magic page around.
523 if (vcpu->arch.magic_page_pa &&
524 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
525 /* going from RTAS to normal kernel code */
526 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
530 /* Preload FPU if it's enabled */
531 if (kvmppc_get_msr(vcpu) & MSR_FP)
532 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
534 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
535 if (kvmppc_get_msr(vcpu) & MSR_TM)
536 kvmppc_handle_lost_math_exts(vcpu);
540 void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
544 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
545 vcpu->arch.pvr = pvr;
546 #ifdef CONFIG_PPC_BOOK3S_64
547 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
548 kvmppc_mmu_book3s_64_init(vcpu);
549 if (!to_book3s(vcpu)->hior_explicit)
550 to_book3s(vcpu)->hior = 0xfff00000;
551 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
552 vcpu->arch.cpu_type = KVM_CPU_3S_64;
556 kvmppc_mmu_book3s_32_init(vcpu);
557 if (!to_book3s(vcpu)->hior_explicit)
558 to_book3s(vcpu)->hior = 0;
559 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
560 vcpu->arch.cpu_type = KVM_CPU_3S_32;
563 kvmppc_sanity_check(vcpu);
565 /* If we are in hypervisor level on 970, we can tell the CPU to
566 * treat DCBZ as 32 bytes store */
567 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
568 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
569 !strcmp(cur_cpu_spec->platform, "ppc970"))
570 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
572 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
573 really needs them in a VM on Cell and force disable them. */
574 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
575 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
578 * If they're asking for POWER6 or later, set the flag
579 * indicating that we can do multiple large page sizes
581 * Also set the flag that indicates that tlbie has the large
582 * page bit in the RB operand instead of the instruction.
584 switch (PVR_VER(pvr)) {
591 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
592 BOOK3S_HFLAG_NEW_TLBIE;
596 #ifdef CONFIG_PPC_BOOK3S_32
597 /* 32 bit Book3S always has 32 byte dcbz */
598 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
601 /* On some CPUs we can execute paired single operations natively */
602 asm ( "mfpvr %0" : "=r"(host_pvr));
604 case 0x00080200: /* lonestar 2.0 */
605 case 0x00088202: /* lonestar 2.2 */
606 case 0x70000100: /* gekko 1.0 */
607 case 0x00080100: /* gekko 2.0 */
608 case 0x00083203: /* gekko 2.3a */
609 case 0x00083213: /* gekko 2.3b */
610 case 0x00083204: /* gekko 2.4 */
611 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
612 case 0x00087200: /* broadway */
613 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
614 /* Enable HID2.PSE - in case we need it later */
615 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
619 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
620 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
621 * emulate 32 bytes dcbz length.
623 * The Book3s_64 inventors also realized this case and implemented a special bit
624 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
626 * My approach here is to patch the dcbz instruction on executing pages.
628 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
635 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
636 if (is_error_page(hpage))
639 hpage_offset = pte->raddr & ~PAGE_MASK;
640 hpage_offset &= ~0xFFFULL;
644 page = kmap_atomic(hpage);
646 /* patch dcbz into reserved instruction, so we trap */
647 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
648 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
649 page[i] &= cpu_to_be32(0xfffffff7);
655 static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
657 ulong mp_pa = vcpu->arch.magic_page_pa;
659 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
660 mp_pa = (uint32_t)mp_pa;
663 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
667 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
670 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
671 ulong eaddr, int vec)
673 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
674 bool iswrite = false;
675 int r = RESUME_GUEST;
678 struct kvmppc_pte pte = { 0 };
679 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
680 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
683 relocated = data ? dr : ir;
684 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
687 /* Resolve real address if translation turned on */
689 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
691 pte.may_execute = true;
693 pte.may_write = true;
694 pte.raddr = eaddr & KVM_PAM;
696 pte.vpage = eaddr >> 12;
697 pte.page_size = MMU_PAGE_64K;
701 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
703 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
707 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
708 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
709 pte.raddr &= ~SPLIT_HACK_MASK;
712 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
714 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
715 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
717 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
721 page_found = -EINVAL;
725 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
726 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
728 * If we do the dcbz hack, we have to NX on every execution,
729 * so we can patch the executing code. This renders our guest
732 pte.may_execute = !data;
735 if (page_found == -ENOENT || page_found == -EPERM) {
736 /* Page not found in guest PTE entries, or protection fault */
739 if (page_found == -EPERM)
740 flags = DSISR_PROTFAULT;
742 flags = DSISR_NOHPTE;
744 flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
745 kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
747 kvmppc_core_queue_inst_storage(vcpu, flags);
749 } else if (page_found == -EINVAL) {
750 /* Page not found in guest SLB */
751 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
752 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
753 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
754 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
756 * There is already a host HPTE there, presumably
757 * a read-only one for a page the guest thinks
758 * is writable, so get rid of it first.
760 kvmppc_mmu_unmap_page(vcpu, &pte);
762 /* The guest's PTE is not mapped yet. Map on the host */
763 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
764 /* Exit KVM if mapping failed */
765 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
769 vcpu->stat.sp_storage++;
770 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
771 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
772 kvmppc_patch_dcbz(vcpu, &pte);
775 vcpu->stat.mmio_exits++;
776 vcpu->arch.paddr_accessed = pte.raddr;
777 vcpu->arch.vaddr_accessed = pte.eaddr;
778 r = kvmppc_emulate_mmio(run, vcpu);
779 if ( r == RESUME_HOST_NV )
786 /* Give up external provider (FPU, Altivec, VSX) */
787 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
789 struct thread_struct *t = ¤t->thread;
792 * VSX instructions can access FP and vector registers, so if
793 * we are giving up VSX, make sure we give up FP and VMX as well.
796 msr |= MSR_FP | MSR_VEC;
798 msr &= vcpu->arch.guest_owned_ext;
803 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
808 * Note that on CPUs with VSX, giveup_fpu stores
809 * both the traditional FP registers and the added VSX
810 * registers into thread.fp_state.fpr[].
812 if (t->regs->msr & MSR_FP)
814 t->fp_save_area = NULL;
817 #ifdef CONFIG_ALTIVEC
819 if (current->thread.regs->msr & MSR_VEC)
820 giveup_altivec(current);
821 t->vr_save_area = NULL;
825 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
826 kvmppc_recalc_shadow_msr(vcpu);
829 /* Give up facility (TAR / EBB / DSCR) */
830 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
832 #ifdef CONFIG_PPC_BOOK3S_64
833 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
834 /* Facility not available to the guest, ignore giveup request*/
840 vcpu->arch.tar = mfspr(SPRN_TAR);
841 mtspr(SPRN_TAR, current->thread.tar);
842 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
848 /* Handle external providers (FPU, Altivec, VSX) */
849 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
852 struct thread_struct *t = ¤t->thread;
854 /* When we have paired singles, we emulate in software */
855 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
858 if (!(kvmppc_get_msr(vcpu) & msr)) {
859 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
863 if (msr == MSR_VSX) {
864 /* No VSX? Give an illegal instruction interrupt */
866 if (!cpu_has_feature(CPU_FTR_VSX))
869 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
874 * We have to load up all the FP and VMX registers before
875 * we can let the guest use VSX instructions.
877 msr = MSR_FP | MSR_VEC | MSR_VSX;
880 /* See if we already own all the ext(s) needed */
881 msr &= ~vcpu->arch.guest_owned_ext;
886 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
892 load_fp_state(&vcpu->arch.fp);
894 t->fp_save_area = &vcpu->arch.fp;
899 #ifdef CONFIG_ALTIVEC
901 enable_kernel_altivec();
902 load_vr_state(&vcpu->arch.vr);
903 disable_kernel_altivec();
904 t->vr_save_area = &vcpu->arch.vr;
910 vcpu->arch.guest_owned_ext |= msr;
911 kvmppc_recalc_shadow_msr(vcpu);
917 * Kernel code using FP or VMX could have flushed guest state to
918 * the thread_struct; if so, get it back now.
920 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
922 unsigned long lost_ext;
924 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
928 if (lost_ext & MSR_FP) {
931 load_fp_state(&vcpu->arch.fp);
935 #ifdef CONFIG_ALTIVEC
936 if (lost_ext & MSR_VEC) {
938 enable_kernel_altivec();
939 load_vr_state(&vcpu->arch.vr);
940 disable_kernel_altivec();
944 current->thread.regs->msr |= lost_ext;
947 #ifdef CONFIG_PPC_BOOK3S_64
949 void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
951 /* Inject the Interrupt Cause field and trigger a guest interrupt */
952 vcpu->arch.fscr &= ~(0xffULL << 56);
953 vcpu->arch.fscr |= (fac << 56);
954 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
957 static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
959 enum emulation_result er = EMULATE_FAIL;
961 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
962 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
964 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
965 /* Couldn't emulate, trigger interrupt in guest */
966 kvmppc_trigger_fac_interrupt(vcpu, fac);
970 /* Enable facilities (TAR, EBB, DSCR) for the guest */
971 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
973 bool guest_fac_enabled;
974 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
977 * Not every facility is enabled by FSCR bits, check whether the
978 * guest has this facility enabled at all.
983 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
986 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
989 guest_fac_enabled = false;
993 if (!guest_fac_enabled) {
994 /* Facility not enabled by the guest */
995 kvmppc_trigger_fac_interrupt(vcpu, fac);
1001 /* TAR switching isn't lazy in Linux yet */
1002 current->thread.tar = mfspr(SPRN_TAR);
1003 mtspr(SPRN_TAR, vcpu->arch.tar);
1004 vcpu->arch.shadow_fscr |= FSCR_TAR;
1007 kvmppc_emulate_fac(vcpu, fac);
1011 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1012 /* Since we disabled MSR_TM at privilege state, the mfspr instruction
1013 * for TM spr can trigger TM fac unavailable. In this case, the
1014 * emulation is handled by kvmppc_emulate_fac(), which invokes
1015 * kvmppc_emulate_mfspr() finally. But note the mfspr can include
1016 * RT for NV registers. So it need to restore those NV reg to reflect
1019 if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
1020 return RESUME_GUEST_NV;
1023 return RESUME_GUEST;
1026 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
1028 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
1029 /* TAR got dropped, drop it in shadow too */
1030 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1031 } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
1032 vcpu->arch.fscr = fscr;
1033 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
1037 vcpu->arch.fscr = fscr;
1041 static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
1043 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1044 u64 msr = kvmppc_get_msr(vcpu);
1046 kvmppc_set_msr(vcpu, msr | MSR_SE);
1050 static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
1052 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1053 u64 msr = kvmppc_get_msr(vcpu);
1055 kvmppc_set_msr(vcpu, msr & ~MSR_SE);
1059 static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
1060 unsigned int exit_nr)
1062 enum emulation_result er;
1068 * shadow_srr1 only contains valid flags if we came here via a program
1069 * exception. The other exceptions (emulation assist, FP unavailable,
1070 * etc.) do not provide flags in SRR1, so use an illegal-instruction
1071 * exception when injecting a program interrupt into the guest.
1073 if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
1074 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
1076 flags = SRR1_PROGILL;
1078 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1079 if (emul != EMULATE_DONE)
1080 return RESUME_GUEST;
1082 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1084 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1085 kvmppc_get_pc(vcpu), last_inst);
1087 if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
1088 kvmppc_core_queue_program(vcpu, flags);
1089 return RESUME_GUEST;
1093 vcpu->stat.emulated_inst_exits++;
1094 er = kvmppc_emulate_instruction(run, vcpu);
1097 r = RESUME_GUEST_NV;
1103 pr_crit("%s: emulation at %lx failed (%08x)\n",
1104 __func__, kvmppc_get_pc(vcpu), last_inst);
1105 kvmppc_core_queue_program(vcpu, flags);
1108 case EMULATE_DO_MMIO:
1109 run->exit_reason = KVM_EXIT_MMIO;
1112 case EMULATE_EXIT_USER:
1122 int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
1123 unsigned int exit_nr)
1125 int r = RESUME_HOST;
1128 vcpu->stat.sum_exits++;
1130 run->exit_reason = KVM_EXIT_UNKNOWN;
1131 run->ready_for_interrupt_injection = 1;
1133 /* We get here with MSR.EE=1 */
1135 trace_kvm_exit(exit_nr, vcpu);
1139 case BOOK3S_INTERRUPT_INST_STORAGE:
1141 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1142 vcpu->stat.pf_instruc++;
1144 if (kvmppc_is_split_real(vcpu))
1145 kvmppc_fixup_split_real(vcpu);
1147 #ifdef CONFIG_PPC_BOOK3S_32
1148 /* We set segments as unused segments when invalidating them. So
1149 * treat the respective fault as segment fault. */
1151 struct kvmppc_book3s_shadow_vcpu *svcpu;
1154 svcpu = svcpu_get(vcpu);
1155 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
1157 if (sr == SR_INVALID) {
1158 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1165 /* only care about PTEG not found errors, but leave NX alone */
1166 if (shadow_srr1 & 0x40000000) {
1167 int idx = srcu_read_lock(&vcpu->kvm->srcu);
1168 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
1169 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1170 vcpu->stat.sp_instruc++;
1171 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1172 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1174 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1175 * so we can't use the NX bit inside the guest. Let's cross our fingers,
1176 * that no guest that needs the dcbz hack does NX.
1178 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1181 kvmppc_core_queue_inst_storage(vcpu,
1182 shadow_srr1 & 0x58000000);
1187 case BOOK3S_INTERRUPT_DATA_STORAGE:
1189 ulong dar = kvmppc_get_fault_dar(vcpu);
1190 u32 fault_dsisr = vcpu->arch.fault_dsisr;
1191 vcpu->stat.pf_storage++;
1193 #ifdef CONFIG_PPC_BOOK3S_32
1194 /* We set segments as unused segments when invalidating them. So
1195 * treat the respective fault as segment fault. */
1197 struct kvmppc_book3s_shadow_vcpu *svcpu;
1200 svcpu = svcpu_get(vcpu);
1201 sr = svcpu->sr[dar >> SID_SHIFT];
1203 if (sr == SR_INVALID) {
1204 kvmppc_mmu_map_segment(vcpu, dar);
1212 * We need to handle missing shadow PTEs, and
1213 * protection faults due to us mapping a page read-only
1214 * when the guest thinks it is writable.
1216 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
1217 int idx = srcu_read_lock(&vcpu->kvm->srcu);
1218 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
1219 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1221 kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
1226 case BOOK3S_INTERRUPT_DATA_SEGMENT:
1227 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
1228 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
1229 kvmppc_book3s_queue_irqprio(vcpu,
1230 BOOK3S_INTERRUPT_DATA_SEGMENT);
1234 case BOOK3S_INTERRUPT_INST_SEGMENT:
1235 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1236 kvmppc_book3s_queue_irqprio(vcpu,
1237 BOOK3S_INTERRUPT_INST_SEGMENT);
1241 /* We're good on these - the host merely wanted to get our attention */
1242 case BOOK3S_INTERRUPT_DECREMENTER:
1243 case BOOK3S_INTERRUPT_HV_DECREMENTER:
1244 case BOOK3S_INTERRUPT_DOORBELL:
1245 case BOOK3S_INTERRUPT_H_DOORBELL:
1246 vcpu->stat.dec_exits++;
1249 case BOOK3S_INTERRUPT_EXTERNAL:
1250 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
1251 case BOOK3S_INTERRUPT_EXTERNAL_HV:
1252 case BOOK3S_INTERRUPT_H_VIRT:
1253 vcpu->stat.ext_intr_exits++;
1256 case BOOK3S_INTERRUPT_HMI:
1257 case BOOK3S_INTERRUPT_PERFMON:
1258 case BOOK3S_INTERRUPT_SYSTEM_RESET:
1261 case BOOK3S_INTERRUPT_PROGRAM:
1262 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1263 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
1265 case BOOK3S_INTERRUPT_SYSCALL:
1270 /* Get last sc for papr */
1271 if (vcpu->arch.papr_enabled) {
1272 /* The sc instuction points SRR0 to the next inst */
1273 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1274 if (emul != EMULATE_DONE) {
1275 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1281 if (vcpu->arch.papr_enabled &&
1282 (last_sc == 0x44000022) &&
1283 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
1284 /* SC 1 papr hypercalls */
1285 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1288 #ifdef CONFIG_PPC_BOOK3S_64
1289 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1295 run->papr_hcall.nr = cmd;
1296 for (i = 0; i < 9; ++i) {
1297 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1298 run->papr_hcall.args[i] = gpr;
1300 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1301 vcpu->arch.hcall_needed = 1;
1303 } else if (vcpu->arch.osi_enabled &&
1304 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1305 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1306 /* MOL hypercalls */
1307 u64 *gprs = run->osi.gprs;
1310 run->exit_reason = KVM_EXIT_OSI;
1311 for (i = 0; i < 32; i++)
1312 gprs[i] = kvmppc_get_gpr(vcpu, i);
1313 vcpu->arch.osi_needed = 1;
1315 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
1316 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1317 /* KVM PV hypercalls */
1318 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1321 /* Guest syscalls */
1322 vcpu->stat.syscall_exits++;
1323 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1328 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1329 case BOOK3S_INTERRUPT_ALTIVEC:
1330 case BOOK3S_INTERRUPT_VSX:
1336 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1337 /* Do paired single instruction emulation */
1338 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1340 if (emul == EMULATE_DONE)
1341 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
1348 /* Enable external provider */
1350 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1354 case BOOK3S_INTERRUPT_ALTIVEC:
1358 case BOOK3S_INTERRUPT_VSX:
1363 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1366 case BOOK3S_INTERRUPT_ALIGNMENT:
1369 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1371 if (emul == EMULATE_DONE) {
1375 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1376 dar = kvmppc_alignment_dar(vcpu, last_inst);
1378 kvmppc_set_dsisr(vcpu, dsisr);
1379 kvmppc_set_dar(vcpu, dar);
1381 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1386 #ifdef CONFIG_PPC_BOOK3S_64
1387 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1388 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1391 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1392 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1395 case BOOK3S_INTERRUPT_TRACE:
1396 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1397 run->exit_reason = KVM_EXIT_DEBUG;
1400 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1406 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1407 /* Ugh - bork here! What did we get? */
1408 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1409 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
1416 if (!(r & RESUME_HOST)) {
1417 /* To avoid clobbering exit_reason, only check for signals if
1418 * we aren't already exiting to userspace for some other
1422 * Interrupts could be timers for the guest which we have to
1423 * inject again, so let's postpone them until we're in the guest
1424 * and if we really did time things so badly, then we just exit
1425 * again due to a host external interrupt.
1427 s = kvmppc_prepare_to_enter(vcpu);
1431 /* interrupts now hard-disabled */
1432 kvmppc_fix_ee_before_entry();
1435 kvmppc_handle_lost_ext(vcpu);
1438 trace_kvm_book3s_reenter(r, vcpu);
1443 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1444 struct kvm_sregs *sregs)
1446 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1449 sregs->pvr = vcpu->arch.pvr;
1451 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1452 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1453 for (i = 0; i < 64; i++) {
1454 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1455 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1458 for (i = 0; i < 16; i++)
1459 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
1461 for (i = 0; i < 8; i++) {
1462 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1463 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1470 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1471 struct kvm_sregs *sregs)
1473 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1476 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
1478 vcpu3s->sdr1 = sregs->u.s.sdr1;
1479 #ifdef CONFIG_PPC_BOOK3S_64
1480 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1481 /* Flush all SLB entries */
1482 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1483 vcpu->arch.mmu.slbia(vcpu);
1485 for (i = 0; i < 64; i++) {
1486 u64 rb = sregs->u.s.ppc64.slb[i].slbe;
1487 u64 rs = sregs->u.s.ppc64.slb[i].slbv;
1489 if (rb & SLB_ESID_V)
1490 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
1495 for (i = 0; i < 16; i++) {
1496 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1498 for (i = 0; i < 8; i++) {
1499 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1500 (u32)sregs->u.s.ppc32.ibat[i]);
1501 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1502 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1503 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1504 (u32)sregs->u.s.ppc32.dbat[i]);
1505 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1506 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1510 /* Flush the MMU after messing with the segments */
1511 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1516 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1517 union kvmppc_one_reg *val)
1522 case KVM_REG_PPC_DEBUG_INST:
1523 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1525 case KVM_REG_PPC_HIOR:
1526 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1528 case KVM_REG_PPC_VTB:
1529 *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1531 case KVM_REG_PPC_LPCR:
1532 case KVM_REG_PPC_LPCR_64:
1534 * We are only interested in the LPCR_ILE bit
1536 if (vcpu->arch.intr_msr & MSR_LE)
1537 *val = get_reg_val(id, LPCR_ILE);
1539 *val = get_reg_val(id, 0);
1541 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1542 case KVM_REG_PPC_TFHAR:
1543 *val = get_reg_val(id, vcpu->arch.tfhar);
1545 case KVM_REG_PPC_TFIAR:
1546 *val = get_reg_val(id, vcpu->arch.tfiar);
1548 case KVM_REG_PPC_TEXASR:
1549 *val = get_reg_val(id, vcpu->arch.texasr);
1551 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1552 *val = get_reg_val(id,
1553 vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
1555 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1559 i = id - KVM_REG_PPC_TM_VSR0;
1561 for (j = 0; j < TS_FPRWIDTH; j++)
1562 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1564 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1565 val->vval = vcpu->arch.vr_tm.vr[i-32];
1571 case KVM_REG_PPC_TM_CR:
1572 *val = get_reg_val(id, vcpu->arch.cr_tm);
1574 case KVM_REG_PPC_TM_XER:
1575 *val = get_reg_val(id, vcpu->arch.xer_tm);
1577 case KVM_REG_PPC_TM_LR:
1578 *val = get_reg_val(id, vcpu->arch.lr_tm);
1580 case KVM_REG_PPC_TM_CTR:
1581 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1583 case KVM_REG_PPC_TM_FPSCR:
1584 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1586 case KVM_REG_PPC_TM_AMR:
1587 *val = get_reg_val(id, vcpu->arch.amr_tm);
1589 case KVM_REG_PPC_TM_PPR:
1590 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1592 case KVM_REG_PPC_TM_VRSAVE:
1593 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1595 case KVM_REG_PPC_TM_VSCR:
1596 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1597 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1601 case KVM_REG_PPC_TM_DSCR:
1602 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1604 case KVM_REG_PPC_TM_TAR:
1605 *val = get_reg_val(id, vcpu->arch.tar_tm);
1616 static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1618 if (new_lpcr & LPCR_ILE)
1619 vcpu->arch.intr_msr |= MSR_LE;
1621 vcpu->arch.intr_msr &= ~MSR_LE;
1624 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1625 union kvmppc_one_reg *val)
1630 case KVM_REG_PPC_HIOR:
1631 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1632 to_book3s(vcpu)->hior_explicit = true;
1634 case KVM_REG_PPC_VTB:
1635 to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1637 case KVM_REG_PPC_LPCR:
1638 case KVM_REG_PPC_LPCR_64:
1639 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1641 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1642 case KVM_REG_PPC_TFHAR:
1643 vcpu->arch.tfhar = set_reg_val(id, *val);
1645 case KVM_REG_PPC_TFIAR:
1646 vcpu->arch.tfiar = set_reg_val(id, *val);
1648 case KVM_REG_PPC_TEXASR:
1649 vcpu->arch.texasr = set_reg_val(id, *val);
1651 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1652 vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
1653 set_reg_val(id, *val);
1655 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1659 i = id - KVM_REG_PPC_TM_VSR0;
1661 for (j = 0; j < TS_FPRWIDTH; j++)
1662 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1664 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1665 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1670 case KVM_REG_PPC_TM_CR:
1671 vcpu->arch.cr_tm = set_reg_val(id, *val);
1673 case KVM_REG_PPC_TM_XER:
1674 vcpu->arch.xer_tm = set_reg_val(id, *val);
1676 case KVM_REG_PPC_TM_LR:
1677 vcpu->arch.lr_tm = set_reg_val(id, *val);
1679 case KVM_REG_PPC_TM_CTR:
1680 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1682 case KVM_REG_PPC_TM_FPSCR:
1683 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1685 case KVM_REG_PPC_TM_AMR:
1686 vcpu->arch.amr_tm = set_reg_val(id, *val);
1688 case KVM_REG_PPC_TM_PPR:
1689 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1691 case KVM_REG_PPC_TM_VRSAVE:
1692 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1694 case KVM_REG_PPC_TM_VSCR:
1695 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1696 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1700 case KVM_REG_PPC_TM_DSCR:
1701 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1703 case KVM_REG_PPC_TM_TAR:
1704 vcpu->arch.tar_tm = set_reg_val(id, *val);
1715 static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1718 struct kvmppc_vcpu_book3s *vcpu_book3s;
1719 struct kvm_vcpu *vcpu;
1723 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1727 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1730 vcpu->arch.book3s = vcpu_book3s;
1732 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1733 vcpu->arch.shadow_vcpu =
1734 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1735 if (!vcpu->arch.shadow_vcpu)
1739 err = kvm_vcpu_init(vcpu, kvm, id);
1741 goto free_shadow_vcpu;
1744 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1747 vcpu->arch.shared = (void *)p;
1748 #ifdef CONFIG_PPC_BOOK3S_64
1749 /* Always start the shared struct in native endian mode */
1750 #ifdef __BIG_ENDIAN__
1751 vcpu->arch.shared_big_endian = true;
1753 vcpu->arch.shared_big_endian = false;
1757 * Default to the same as the host if we're on sufficiently
1758 * recent machine that we have 1TB segments;
1759 * otherwise default to PPC970FX.
1761 vcpu->arch.pvr = 0x3C0301;
1762 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1763 vcpu->arch.pvr = mfspr(SPRN_PVR);
1764 vcpu->arch.intr_msr = MSR_SF;
1766 /* default to book3s_32 (750) */
1767 vcpu->arch.pvr = 0x84202;
1769 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1770 vcpu->arch.slb_nr = 64;
1772 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
1774 err = kvmppc_mmu_init(vcpu);
1781 kvm_vcpu_uninit(vcpu);
1783 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1784 kfree(vcpu->arch.shadow_vcpu);
1789 kmem_cache_free(kvm_vcpu_cache, vcpu);
1791 return ERR_PTR(err);
1794 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1796 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1798 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1799 kvm_vcpu_uninit(vcpu);
1800 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1801 kfree(vcpu->arch.shadow_vcpu);
1804 kmem_cache_free(kvm_vcpu_cache, vcpu);
1807 static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1810 #ifdef CONFIG_ALTIVEC
1811 unsigned long uninitialized_var(vrsave);
1814 /* Check if we can run the vcpu at all */
1815 if (!vcpu->arch.sane) {
1816 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1821 kvmppc_setup_debug(vcpu);
1824 * Interrupts could be timers for the guest which we have to inject
1825 * again, so let's postpone them until we're in the guest and if we
1826 * really did time things so badly, then we just exit again due to
1827 * a host external interrupt.
1829 ret = kvmppc_prepare_to_enter(vcpu);
1832 /* interrupts now hard-disabled */
1834 /* Save FPU, Altivec and VSX state */
1835 giveup_all(current);
1837 /* Preload FPU if it's enabled */
1838 if (kvmppc_get_msr(vcpu) & MSR_FP)
1839 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1841 kvmppc_fix_ee_before_entry();
1843 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1845 kvmppc_clear_debug(vcpu);
1847 /* No need for guest_exit. It's done in handle_exit.
1848 We also get here with interrupts enabled. */
1850 /* Make sure we save the guest FPU/Altivec/VSX state */
1851 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1853 /* Make sure we save the guest TAR/EBB/DSCR state */
1854 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1857 vcpu->mode = OUTSIDE_GUEST_MODE;
1862 * Get (and clear) the dirty memory log for a memory slot.
1864 static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1865 struct kvm_dirty_log *log)
1867 struct kvm_memslots *slots;
1868 struct kvm_memory_slot *memslot;
1869 struct kvm_vcpu *vcpu;
1875 mutex_lock(&kvm->slots_lock);
1877 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1881 /* If nothing is dirty, don't bother messing with page tables. */
1883 slots = kvm_memslots(kvm);
1884 memslot = id_to_memslot(slots, log->slot);
1886 ga = memslot->base_gfn << PAGE_SHIFT;
1887 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1889 kvm_for_each_vcpu(n, vcpu, kvm)
1890 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1892 n = kvm_dirty_bitmap_bytes(memslot);
1893 memset(memslot->dirty_bitmap, 0, n);
1898 mutex_unlock(&kvm->slots_lock);
1902 static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1903 struct kvm_memory_slot *memslot)
1908 static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1909 struct kvm_memory_slot *memslot,
1910 const struct kvm_userspace_memory_region *mem)
1915 static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1916 const struct kvm_userspace_memory_region *mem,
1917 const struct kvm_memory_slot *old,
1918 const struct kvm_memory_slot *new)
1923 static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1924 struct kvm_memory_slot *dont)
1929 static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1930 unsigned long npages)
1937 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1938 struct kvm_ppc_smmu_info *info)
1941 struct kvm_vcpu *vcpu;
1945 /* SLB is always 64 entries */
1946 info->slb_size = 64;
1948 /* Standard 4k base page size segment */
1949 info->sps[0].page_shift = 12;
1950 info->sps[0].slb_enc = 0;
1951 info->sps[0].enc[0].page_shift = 12;
1952 info->sps[0].enc[0].pte_enc = 0;
1955 * 64k large page size.
1956 * We only want to put this in if the CPUs we're emulating
1957 * support it, but unfortunately we don't have a vcpu easily
1958 * to hand here to test. Just pick the first vcpu, and if
1959 * that doesn't exist yet, report the minimum capability,
1960 * i.e., no 64k pages.
1961 * 1T segment support goes along with 64k pages.
1964 vcpu = kvm_get_vcpu(kvm, 0);
1965 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1966 info->flags = KVM_PPC_1T_SEGMENTS;
1967 info->sps[i].page_shift = 16;
1968 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1969 info->sps[i].enc[0].page_shift = 16;
1970 info->sps[i].enc[0].pte_enc = 1;
1974 /* Standard 16M large page size segment */
1975 info->sps[i].page_shift = 24;
1976 info->sps[i].slb_enc = SLB_VSID_L;
1977 info->sps[i].enc[0].page_shift = 24;
1978 info->sps[i].enc[0].pte_enc = 0;
1983 static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
1985 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1987 /* Require flags and process table base and size to all be zero. */
1988 if (cfg->flags || cfg->process_table)
1994 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1995 struct kvm_ppc_smmu_info *info)
1997 /* We should not get called */
2000 #endif /* CONFIG_PPC64 */
2002 static unsigned int kvm_global_user_count = 0;
2003 static DEFINE_SPINLOCK(kvm_global_user_count_lock);
2005 static int kvmppc_core_init_vm_pr(struct kvm *kvm)
2007 mutex_init(&kvm->arch.hpt_mutex);
2009 #ifdef CONFIG_PPC_BOOK3S_64
2010 /* Start out with the default set of hcalls enabled */
2011 kvmppc_pr_init_default_hcalls(kvm);
2014 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
2015 spin_lock(&kvm_global_user_count_lock);
2016 if (++kvm_global_user_count == 1)
2017 pseries_disable_reloc_on_exc();
2018 spin_unlock(&kvm_global_user_count_lock);
2023 static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
2026 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
2029 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
2030 spin_lock(&kvm_global_user_count_lock);
2031 BUG_ON(kvm_global_user_count == 0);
2032 if (--kvm_global_user_count == 0)
2033 pseries_enable_reloc_on_exc();
2034 spin_unlock(&kvm_global_user_count_lock);
2038 static int kvmppc_core_check_processor_compat_pr(void)
2041 * PR KVM can work on POWER9 inside a guest partition
2042 * running in HPT mode. It can't work if we are using
2043 * radix translation (because radix provides no way for
2044 * a process to have unique translations in quadrant 3).
2046 if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
2051 static long kvm_arch_vm_ioctl_pr(struct file *filp,
2052 unsigned int ioctl, unsigned long arg)
2057 static struct kvmppc_ops kvm_ops_pr = {
2058 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
2059 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
2060 .get_one_reg = kvmppc_get_one_reg_pr,
2061 .set_one_reg = kvmppc_set_one_reg_pr,
2062 .vcpu_load = kvmppc_core_vcpu_load_pr,
2063 .vcpu_put = kvmppc_core_vcpu_put_pr,
2064 .set_msr = kvmppc_set_msr_pr,
2065 .vcpu_run = kvmppc_vcpu_run_pr,
2066 .vcpu_create = kvmppc_core_vcpu_create_pr,
2067 .vcpu_free = kvmppc_core_vcpu_free_pr,
2068 .check_requests = kvmppc_core_check_requests_pr,
2069 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
2070 .flush_memslot = kvmppc_core_flush_memslot_pr,
2071 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
2072 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
2073 .unmap_hva_range = kvm_unmap_hva_range_pr,
2074 .age_hva = kvm_age_hva_pr,
2075 .test_age_hva = kvm_test_age_hva_pr,
2076 .set_spte_hva = kvm_set_spte_hva_pr,
2077 .mmu_destroy = kvmppc_mmu_destroy_pr,
2078 .free_memslot = kvmppc_core_free_memslot_pr,
2079 .create_memslot = kvmppc_core_create_memslot_pr,
2080 .init_vm = kvmppc_core_init_vm_pr,
2081 .destroy_vm = kvmppc_core_destroy_vm_pr,
2082 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
2083 .emulate_op = kvmppc_core_emulate_op_pr,
2084 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
2085 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
2086 .fast_vcpu_kick = kvm_vcpu_kick,
2087 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
2088 #ifdef CONFIG_PPC_BOOK3S_64
2089 .hcall_implemented = kvmppc_hcall_impl_pr,
2090 .configure_mmu = kvm_configure_mmu_pr,
2092 .giveup_ext = kvmppc_giveup_ext,
2096 int kvmppc_book3s_init_pr(void)
2100 r = kvmppc_core_check_processor_compat_pr();
2104 kvm_ops_pr.owner = THIS_MODULE;
2105 kvmppc_pr_ops = &kvm_ops_pr;
2107 r = kvmppc_mmu_hpte_sysinit();
2111 void kvmppc_book3s_exit_pr(void)
2113 kvmppc_pr_ops = NULL;
2114 kvmppc_mmu_hpte_sysexit();
2118 * We only support separate modules for book3s 64
2120 #ifdef CONFIG_PPC_BOOK3S_64
2122 module_init(kvmppc_book3s_init_pr);
2123 module_exit(kvmppc_book3s_exit_pr);
2125 MODULE_LICENSE("GPL");
2126 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2127 MODULE_ALIAS("devname:kvm");