2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/book3s/64/mmu-hash.h>
33 #include <asm/xive-regs.h>
35 /* Sign-extend HDEC if not on POWER9 */
36 #define EXTEND_HDEC(reg) \
39 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
41 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
43 /* Values in HSTATE_NAPPING(r13) */
44 #define NAPPING_CEDE 1
45 #define NAPPING_NOVCPU 2
47 /* Stack frame offsets for kvmppc_hv_entry */
49 #define STACK_SLOT_TRAP (SFS-4)
50 #define STACK_SLOT_TID (SFS-16)
51 #define STACK_SLOT_PSSCR (SFS-24)
52 #define STACK_SLOT_PID (SFS-32)
53 #define STACK_SLOT_IAMR (SFS-40)
54 #define STACK_SLOT_CIABR (SFS-48)
55 #define STACK_SLOT_DAWR (SFS-56)
56 #define STACK_SLOT_DAWRX (SFS-64)
57 #define STACK_SLOT_HFSCR (SFS-72)
60 * Call kvmppc_hv_entry in real mode.
61 * Must be called with interrupts hard-disabled.
65 * LR = return address to continue at after eventually re-enabling MMU
67 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
69 std r0, PPC_LR_STKOFF(r1)
72 std r10, HSTATE_HOST_MSR(r13)
73 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
78 mtmsrd r0,1 /* clear RI in MSR */
84 ld r4, HSTATE_KVM_VCPU(r13)
87 /* Back from guest - restore host state and return to caller */
90 /* Restore host DABR and DABRX */
91 ld r5,HSTATE_DABR(r13)
95 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
98 ld r3,PACA_SPRG_VDSO(r13)
99 mtspr SPRN_SPRG_VDSO_WRITE,r3
101 /* Reload the host's PMU registers */
102 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
103 lbz r4, LPPACA_PMCINUSE(r3)
105 beq 23f /* skip if not */
107 ld r3, HSTATE_MMCR0(r13)
108 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
111 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
112 lwz r3, HSTATE_PMC1(r13)
113 lwz r4, HSTATE_PMC2(r13)
114 lwz r5, HSTATE_PMC3(r13)
115 lwz r6, HSTATE_PMC4(r13)
116 lwz r8, HSTATE_PMC5(r13)
117 lwz r9, HSTATE_PMC6(r13)
124 ld r3, HSTATE_MMCR0(r13)
125 ld r4, HSTATE_MMCR1(r13)
126 ld r5, HSTATE_MMCRA(r13)
127 ld r6, HSTATE_SIAR(r13)
128 ld r7, HSTATE_SDAR(r13)
134 ld r8, HSTATE_MMCR2(r13)
135 ld r9, HSTATE_SIER(r13)
138 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
144 * Reload DEC. HDEC interrupts were disabled when
145 * we reloaded the host's LPCR value.
147 ld r3, HSTATE_DECEXP(r13)
152 /* hwthread_req may have got set by cede or no vcpu, so clear it */
154 stb r0, HSTATE_HWTHREAD_REQ(r13)
157 * For external interrupts we need to call the Linux
158 * handler to process the interrupt. We do that by jumping
159 * to absolute address 0x500 for external interrupts.
160 * The [h]rfid at the end of the handler will return to
161 * the book3s_hv_interrupts.S code. For other interrupts
162 * we do the rfid to get back to the book3s_hv_interrupts.S
165 ld r8, 112+PPC_LR_STKOFF(r1)
167 ld r7, HSTATE_HOST_MSR(r13)
169 /* Return the trap number on this thread as the return value */
173 * If we came back from the guest via a relocation-on interrupt,
174 * we will be in virtual mode at this point, which makes it a
175 * little easier to get back to the caller.
178 andi. r0, r0, MSR_IR /* in real mode? */
181 /* RFI into the highmem handler */
185 mtmsrd r6, 1 /* Clear RI in MSR */
190 /* Virtual-mode return */
195 kvmppc_primary_no_guest:
196 /* We handle this much like a ceded vcpu */
197 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
198 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
199 /* HDEC value came from DEC in the first place, it will fit */
203 * Make sure the primary has finished the MMU switch.
204 * We should never get here on a secondary thread, but
205 * check it for robustness' sake.
207 ld r5, HSTATE_KVM_VCORE(r13)
208 65: lbz r0, VCORE_IN_GUEST(r5)
215 /* set our bit in napping_threads */
216 ld r5, HSTATE_KVM_VCORE(r13)
217 lbz r7, HSTATE_PTID(r13)
220 addi r6, r5, VCORE_NAPPING_THREADS
225 /* order napping_threads update vs testing entry_exit_map */
228 lwz r7, VCORE_ENTRY_EXIT(r5)
230 bge kvm_novcpu_exit /* another thread already exiting */
231 li r3, NAPPING_NOVCPU
232 stb r3, HSTATE_NAPPING(r13)
234 li r3, 0 /* Don't wake on privileged (OS) doorbell */
239 * Entered from kvm_start_guest if kvm_hstate.napping is set
245 ld r1, HSTATE_HOST_R1(r13)
246 ld r5, HSTATE_KVM_VCORE(r13)
248 stb r0, HSTATE_NAPPING(r13)
250 /* check the wake reason */
251 bl kvmppc_check_wake_reason
254 * Restore volatile registers since we could have called
255 * a C routine in kvmppc_check_wake_reason.
258 ld r5, HSTATE_KVM_VCORE(r13)
260 /* see if any other thread is already exiting */
261 lwz r0, VCORE_ENTRY_EXIT(r5)
265 /* clear our bit in napping_threads */
266 lbz r7, HSTATE_PTID(r13)
269 addi r6, r5, VCORE_NAPPING_THREADS
275 /* See if the wake reason means we need to exit */
279 /* See if our timeslice has expired (HDEC is negative) */
282 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
286 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
287 ld r4, HSTATE_KVM_VCPU(r13)
289 beq kvmppc_primary_no_guest
291 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
292 addi r3, r4, VCPU_TB_RMENTRY
293 bl kvmhv_start_timing
298 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
299 ld r4, HSTATE_KVM_VCPU(r13)
302 addi r3, r4, VCPU_TB_RMEXIT
303 bl kvmhv_accumulate_time
306 stw r12, STACK_SLOT_TRAP(r1)
307 bl kvmhv_commence_exit
309 lwz r12, STACK_SLOT_TRAP(r1)
310 b kvmhv_switch_to_host
313 * We come in here when wakened from nap mode.
314 * Relocation is off and most register values are lost.
315 * r13 points to the PACA.
316 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
318 .globl kvm_start_guest
320 /* Set runlatch bit the minute you wake up from nap */
326 * Could avoid this and pass it through in r3. For now,
327 * code expects it to be in SRR1.
333 li r0,KVM_HWTHREAD_IN_KVM
334 stb r0,HSTATE_HWTHREAD_STATE(r13)
336 /* NV GPR values from power7_idle() will no longer be valid */
338 stb r0,PACA_NAPSTATELOST(r13)
340 /* were we napping due to cede? */
341 lbz r0,HSTATE_NAPPING(r13)
342 cmpwi r0,NAPPING_CEDE
344 cmpwi r0,NAPPING_NOVCPU
345 beq kvm_novcpu_wakeup
347 ld r1,PACAEMERGSP(r13)
348 subi r1,r1,STACK_FRAME_OVERHEAD
351 * We weren't napping due to cede, so this must be a secondary
352 * thread being woken up to run a guest, or being woken up due
353 * to a stray IPI. (Or due to some machine check or hypervisor
354 * maintenance interrupt while the core is in KVM.)
357 /* Check the wake reason in SRR1 to see why we got here */
358 bl kvmppc_check_wake_reason
360 * kvmppc_check_wake_reason could invoke a C routine, but we
361 * have no volatile registers to restore when we return.
367 /* get vcore pointer, NULL if we have nothing to run */
368 ld r5,HSTATE_KVM_VCORE(r13)
370 /* if we have no vcore to run, go back to sleep */
373 kvm_secondary_got_guest:
375 /* Set HSTATE_DSCR(r13) to something sensible */
376 ld r6, PACA_DSCR_DEFAULT(r13)
377 std r6, HSTATE_DSCR(r13)
379 /* On thread 0 of a subcore, set HDEC to max */
380 lbz r4, HSTATE_PTID(r13)
383 LOAD_REG_ADDR(r6, decrementer_max)
386 /* and set per-LPAR registers, if doing dynamic micro-threading */
387 ld r6, HSTATE_SPLIT_MODE(r13)
390 ld r0, KVM_SPLIT_RPR(r6)
392 ld r0, KVM_SPLIT_PMMAR(r6)
394 ld r0, KVM_SPLIT_LDBAR(r6)
398 /* Order load of vcpu after load of vcore */
400 ld r4, HSTATE_KVM_VCPU(r13)
403 /* Back from the guest, go back to nap */
404 /* Clear our vcpu and vcore pointers so we don't come back in early */
406 std r0, HSTATE_KVM_VCPU(r13)
408 * Once we clear HSTATE_KVM_VCORE(r13), the code in
409 * kvmppc_run_core() is going to assume that all our vcpu
410 * state is visible in memory. This lwsync makes sure
414 std r0, HSTATE_KVM_VCORE(r13)
417 * All secondaries exiting guest will fall through this path.
418 * Before proceeding, just check for HMI interrupt and
419 * invoke opal hmi handler. By now we are sure that the
420 * primary thread on this core/subcore has already made partition
421 * switch/TB resync and we are good to call opal hmi handler.
423 cmpwi r12, BOOK3S_INTERRUPT_HMI
426 li r3,0 /* NULL argument */
427 bl hmi_exception_realmode
429 * At this point we have finished executing in the guest.
430 * We need to wait for hwthread_req to become zero, since
431 * we may not turn on the MMU while hwthread_req is non-zero.
432 * While waiting we also need to check if we get given a vcpu to run.
435 lbz r3, HSTATE_HWTHREAD_REQ(r13)
439 li r0, KVM_HWTHREAD_IN_KERNEL
440 stb r0, HSTATE_HWTHREAD_STATE(r13)
441 /* need to recheck hwthread_req after a barrier, to avoid race */
443 lbz r3, HSTATE_HWTHREAD_REQ(r13)
447 * We jump to pnv_wakeup_loss, which will return to the caller
448 * of power7_nap in the powernv cpu offline loop. The value we
449 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
450 * requires SRR1 in r12.
454 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
461 ld r5, HSTATE_KVM_VCORE(r13)
464 ld r3, HSTATE_SPLIT_MODE(r13)
467 lbz r0, KVM_SPLIT_DO_NAP(r3)
473 b kvm_secondary_got_guest
475 54: li r0, KVM_HWTHREAD_IN_KVM
476 stb r0, HSTATE_HWTHREAD_STATE(r13)
480 * Here the primary thread is trying to return the core to
481 * whole-core mode, so we need to nap.
485 * When secondaries are napping in kvm_unsplit_nap() with
486 * hwthread_req = 1, HMI goes ignored even though subcores are
487 * already exited the guest. Hence HMI keeps waking up secondaries
488 * from nap in a loop and secondaries always go back to nap since
489 * no vcore is assigned to them. This makes impossible for primary
490 * thread to get hold of secondary threads resulting into a soft
491 * lockup in KVM path.
493 * Let us check if HMI is pending and handle it before we go to nap.
495 cmpwi r12, BOOK3S_INTERRUPT_HMI
497 li r3, 0 /* NULL argument */
498 bl hmi_exception_realmode
501 * Ensure that secondary doesn't nap when it has
502 * its vcore pointer set.
504 sync /* matches smp_mb() before setting split_info.do_nap */
505 ld r0, HSTATE_KVM_VCORE(r13)
508 /* clear any pending message */
510 lis r6, (PPC_DBELL_SERVER << (63-36))@h
512 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
513 /* Set kvm_split_mode.napped[tid] = 1 */
514 ld r3, HSTATE_SPLIT_MODE(r13)
516 lhz r4, PACAPACAINDEX(r13)
517 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
518 addi r4, r4, KVM_SPLIT_NAPPED
520 /* Check the do_nap flag again after setting napped[] */
522 lbz r0, KVM_SPLIT_DO_NAP(r3)
525 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
527 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
534 /******************************************************************************
538 *****************************************************************************/
540 .global kvmppc_hv_entry
545 * R4 = vcpu pointer (or NULL)
550 * all other volatile GPRS = free
551 * Does not preserve non-volatile GPRs or CR fields
554 std r0, PPC_LR_STKOFF(r1)
557 /* Save R1 in the PACA */
558 std r1, HSTATE_HOST_R1(r13)
560 li r6, KVM_GUEST_MODE_HOST_HV
561 stb r6, HSTATE_IN_GUEST(r13)
563 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
564 /* Store initial timestamp */
567 addi r3, r4, VCPU_TB_RMENTRY
568 bl kvmhv_start_timing
572 /* Use cr7 as an indication of radix mode */
573 ld r5, HSTATE_KVM_VCORE(r13)
574 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
575 lbz r0, KVM_RADIX(r9)
578 /* Clear out SLB if hash */
586 * POWER7/POWER8 host -> guest partition switch code.
587 * We don't have to lock against concurrent tlbies,
588 * but we do have to coordinate across hardware threads.
590 /* Set bit in entry map iff exit map is zero. */
592 lbz r6, HSTATE_PTID(r13)
594 addi r8, r5, VCORE_ENTRY_EXIT
596 cmpwi r3, 0x100 /* any threads starting to exit? */
597 bge secondary_too_late /* if so we're too late to the party */
602 /* Primary thread switches to guest partition. */
608 li r0,LPID_RSVD /* switch to reserved LPID */
611 mtspr SPRN_SDR1,r6 /* switch to partition page table */
612 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
616 /* See if we need to flush the TLB */
617 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
620 * On POWER9, individual threads can come in here, but the
621 * TLB is shared between the 4 threads in a core, hence
622 * invalidating on one thread invalidates for all.
623 * Thus we make all 4 threads use the same bit here.
626 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
627 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
628 srdi r6,r6,6 /* doubleword number */
629 sldi r6,r6,3 /* address offset */
631 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
637 /* Flush the TLB of any entries for this LPID */
638 lwz r0,KVM_TLB_SETS(r9)
640 li r7,0x800 /* IS field = 0b10 */
642 li r0,0 /* RS for P9 version of tlbiel */
644 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
648 29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
652 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
657 /* Add timebase offset onto timebase */
658 22: ld r8,VCORE_TB_OFFSET(r5)
661 mftb r6 /* current host timebase */
663 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
664 mftb r7 /* check if lower 24 bits overflowed */
669 addis r8,r8,0x100 /* if so, increment upper 40 bits */
672 /* Load guest PCR value to select appropriate compat mode */
673 37: ld r7, VCORE_PCR(r5)
680 /* DPDES and VTB are shared between threads */
681 ld r8, VCORE_DPDES(r5)
685 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
687 /* Mark the subcore state as inside guest */
688 bl kvmppc_subcore_enter_guest
690 ld r5, HSTATE_KVM_VCORE(r13)
691 ld r4, HSTATE_KVM_VCPU(r13)
693 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
695 /* Do we have a guest vcpu to run? */
697 beq kvmppc_primary_no_guest
700 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
701 lwz r5,VCPU_SLB_MAX(r4)
706 1: ld r8,VCPU_SLB_E(r6)
709 addi r6,r6,VCPU_SLB_SIZE
712 /* Increment yield count if they have a VPA */
716 li r6, LPPACA_YIELDCOUNT
721 stb r6, VCPU_VPA_DIRTY(r4)
724 /* Save purr/spurr */
727 std r5,HSTATE_PURR(r13)
728 std r6,HSTATE_SPURR(r13)
734 /* Save host values of some registers */
740 std r5, STACK_SLOT_TID(r1)
741 std r6, STACK_SLOT_PSSCR(r1)
742 std r7, STACK_SLOT_PID(r1)
743 std r8, STACK_SLOT_IAMR(r1)
745 std r5, STACK_SLOT_HFSCR(r1)
746 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
751 std r5, STACK_SLOT_CIABR(r1)
752 std r6, STACK_SLOT_DAWR(r1)
753 std r7, STACK_SLOT_DAWRX(r1)
754 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
757 /* Set partition DABR */
758 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
759 lwz r5,VCPU_DABRX(r4)
764 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
766 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
769 END_FTR_SECTION_IFSET(CPU_FTR_TM)
772 /* Load guest PMU registers */
773 /* R4 is live here (vcpu pointer) */
775 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
776 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
780 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
783 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
784 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
785 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
786 lwz r6, VCPU_PMC + 8(r4)
787 lwz r7, VCPU_PMC + 12(r4)
788 lwz r8, VCPU_PMC + 16(r4)
789 lwz r9, VCPU_PMC + 20(r4)
797 ld r5, VCPU_MMCR + 8(r4)
798 ld r6, VCPU_MMCR + 16(r4)
806 ld r5, VCPU_MMCR + 24(r4)
810 BEGIN_FTR_SECTION_NESTED(96)
811 lwz r7, VCPU_PMC + 24(r4)
812 lwz r8, VCPU_PMC + 28(r4)
813 ld r9, VCPU_MMCR + 32(r4)
817 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
818 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
822 /* Load up FP, VMX and VSX registers */
825 ld r14, VCPU_GPR(R14)(r4)
826 ld r15, VCPU_GPR(R15)(r4)
827 ld r16, VCPU_GPR(R16)(r4)
828 ld r17, VCPU_GPR(R17)(r4)
829 ld r18, VCPU_GPR(R18)(r4)
830 ld r19, VCPU_GPR(R19)(r4)
831 ld r20, VCPU_GPR(R20)(r4)
832 ld r21, VCPU_GPR(R21)(r4)
833 ld r22, VCPU_GPR(R22)(r4)
834 ld r23, VCPU_GPR(R23)(r4)
835 ld r24, VCPU_GPR(R24)(r4)
836 ld r25, VCPU_GPR(R25)(r4)
837 ld r26, VCPU_GPR(R26)(r4)
838 ld r27, VCPU_GPR(R27)(r4)
839 ld r28, VCPU_GPR(R28)(r4)
840 ld r29, VCPU_GPR(R29)(r4)
841 ld r30, VCPU_GPR(R30)(r4)
842 ld r31, VCPU_GPR(R31)(r4)
844 /* Switch DSCR to guest value */
849 /* Skip next section on POWER7 */
851 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
852 /* Load up POWER8-specific registers */
854 lwz r6, VCPU_PSPB(r4)
860 ld r6, VCPU_DAWRX(r4)
861 ld r7, VCPU_CIABR(r4)
868 ld r8, VCPU_EBBHR(r4)
871 ld r5, VCPU_EBBRR(r4)
872 ld r6, VCPU_BESCR(r4)
873 lwz r7, VCPU_GUEST_PID(r4)
881 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
883 /* POWER8-only registers */
884 ld r5, VCPU_TCSCR(r4)
886 ld r7, VCPU_CSIGR(r4)
893 /* POWER9-only registers */
895 ld r6, VCPU_PSSCR(r4)
896 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
897 ld r7, VCPU_HFSCR(r4)
901 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
905 * Set the decrementer to the guest decrementer.
907 ld r8,VCPU_DEC_EXPIRES(r4)
908 /* r8 is a host timebase value here, convert to guest TB */
909 ld r5,HSTATE_KVM_VCORE(r13)
910 ld r6,VCORE_TB_OFFSET(r5)
917 ld r5, VCPU_SPRG0(r4)
918 ld r6, VCPU_SPRG1(r4)
919 ld r7, VCPU_SPRG2(r4)
920 ld r8, VCPU_SPRG3(r4)
926 /* Load up DAR and DSISR */
928 lwz r6, VCPU_DSISR(r4)
932 /* Restore AMR and UAMOR, set AMOR to all 1s */
940 /* Restore state of CTRL run bit; assume 1 on entry */
948 /* Secondary threads wait for primary to have done partition switch */
949 ld r5, HSTATE_KVM_VCORE(r13)
950 lbz r6, HSTATE_PTID(r13)
953 lbz r0, VCORE_IN_GUEST(r5)
957 20: lwz r3, VCORE_ENTRY_EXIT(r5)
960 lbz r0, VCORE_IN_GUEST(r5)
970 /* Check if HDEC expires soon */
973 cmpdi r3, 512 /* 1 microsecond */
976 #ifdef CONFIG_KVM_XICS
977 /* We are entering the guest on that thread, push VCPU to XIVE */
978 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
981 ld r11, VCPU_XIVE_SAVED_STATE(r4)
985 lwz r11, VCPU_XIVE_CAM_WORD(r4)
986 li r9, TM_QW1_OS + TM_WORD2
989 stw r9, VCPU_XIVE_PUSHED(r4)
991 #endif /* CONFIG_KVM_XICS */
993 deliver_guest_interrupt:
1000 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
1002 ld r11, VCPU_MSR(r4)
1003 ld r6, VCPU_SRR0(r4)
1004 ld r7, VCPU_SRR1(r4)
1008 /* r11 = vcpu->arch.msr & ~MSR_HV */
1009 rldicl r11, r11, 63 - MSR_HV_LG, 1
1010 rotldi r11, r11, 1 + MSR_HV_LG
1011 ori r11, r11, MSR_ME
1013 /* Check if we can deliver an external or decrementer interrupt now */
1014 ld r0, VCPU_PENDING_EXC(r4)
1015 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1017 andi. r8, r11, MSR_EE
1019 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1020 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1024 li r0, BOOK3S_INTERRUPT_EXTERNAL
1028 /* On POWER9 check whether the guest has large decrementer enabled */
1029 andis. r8, r8, LPCR_LD@h
1031 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1034 li r0, BOOK3S_INTERRUPT_DECREMENTER
1037 12: mtspr SPRN_SRR0, r10
1039 mtspr SPRN_SRR1, r11
1041 bl kvmppc_msr_interrupt
1045 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1046 /* On POWER9, check for pending doorbell requests */
1047 lbz r0, VCPU_DBELL_REQ(r4)
1049 beq fast_guest_return
1050 ld r5, HSTATE_KVM_VCORE(r13)
1051 /* Set DPDES register so the CPU will take a doorbell interrupt */
1053 mtspr SPRN_DPDES, r0
1054 std r0, VCORE_DPDES(r5)
1055 /* Make sure other cpus see vcore->dpdes set before dbell req clear */
1057 /* Clear the pending doorbell request */
1059 stb r0, VCPU_DBELL_REQ(r4)
1064 * R10: value for HSRR0
1065 * R11: value for HSRR1
1070 stb r0,VCPU_CEDED(r4) /* cancel cede */
1071 mtspr SPRN_HSRR0,r10
1072 mtspr SPRN_HSRR1,r11
1074 /* Activate guest mode, so faults get handled by KVM */
1075 li r9, KVM_GUEST_MODE_GUEST_HV
1076 stb r9, HSTATE_IN_GUEST(r13)
1078 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1079 /* Accumulate timing */
1080 addi r3, r4, VCPU_TB_GUEST
1081 bl kvmhv_accumulate_time
1087 ld r5, VCPU_CFAR(r4)
1089 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1092 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1099 ld r1, VCPU_GPR(R1)(r4)
1100 ld r2, VCPU_GPR(R2)(r4)
1101 ld r3, VCPU_GPR(R3)(r4)
1102 ld r5, VCPU_GPR(R5)(r4)
1103 ld r6, VCPU_GPR(R6)(r4)
1104 ld r7, VCPU_GPR(R7)(r4)
1105 ld r8, VCPU_GPR(R8)(r4)
1106 ld r9, VCPU_GPR(R9)(r4)
1107 ld r10, VCPU_GPR(R10)(r4)
1108 ld r11, VCPU_GPR(R11)(r4)
1109 ld r12, VCPU_GPR(R12)(r4)
1110 ld r13, VCPU_GPR(R13)(r4)
1114 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1115 ld r0, VCPU_GPR(R0)(r4)
1116 ld r4, VCPU_GPR(R4)(r4)
1125 stw r12, VCPU_TRAP(r4)
1126 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1127 addi r3, r4, VCPU_TB_RMEXIT
1128 bl kvmhv_accumulate_time
1130 11: b kvmhv_switch_to_host
1137 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1138 12: stw r12, VCPU_TRAP(r4)
1140 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1141 addi r3, r4, VCPU_TB_RMEXIT
1142 bl kvmhv_accumulate_time
1146 /******************************************************************************
1150 *****************************************************************************/
1153 * We come here from the first-level interrupt handlers.
1155 .globl kvmppc_interrupt_hv
1156 kvmppc_interrupt_hv:
1158 * Register contents:
1159 * R12 = (guest CR << 32) | interrupt vector
1161 * guest R12 saved in shadow VCPU SCRATCH0
1162 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1163 * guest R13 saved in SPRN_SCRATCH0
1165 std r9, HSTATE_SCRATCH2(r13)
1166 lbz r9, HSTATE_IN_GUEST(r13)
1167 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1168 beq kvmppc_bad_host_intr
1169 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1170 cmpwi r9, KVM_GUEST_MODE_GUEST
1171 ld r9, HSTATE_SCRATCH2(r13)
1172 beq kvmppc_interrupt_pr
1174 /* We're now back in the host but in guest MMU context */
1175 li r9, KVM_GUEST_MODE_HOST_HV
1176 stb r9, HSTATE_IN_GUEST(r13)
1178 ld r9, HSTATE_KVM_VCPU(r13)
1180 /* Save registers */
1182 std r0, VCPU_GPR(R0)(r9)
1183 std r1, VCPU_GPR(R1)(r9)
1184 std r2, VCPU_GPR(R2)(r9)
1185 std r3, VCPU_GPR(R3)(r9)
1186 std r4, VCPU_GPR(R4)(r9)
1187 std r5, VCPU_GPR(R5)(r9)
1188 std r6, VCPU_GPR(R6)(r9)
1189 std r7, VCPU_GPR(R7)(r9)
1190 std r8, VCPU_GPR(R8)(r9)
1191 ld r0, HSTATE_SCRATCH2(r13)
1192 std r0, VCPU_GPR(R9)(r9)
1193 std r10, VCPU_GPR(R10)(r9)
1194 std r11, VCPU_GPR(R11)(r9)
1195 ld r3, HSTATE_SCRATCH0(r13)
1196 std r3, VCPU_GPR(R12)(r9)
1197 /* CR is in the high half of r12 */
1201 ld r3, HSTATE_CFAR(r13)
1202 std r3, VCPU_CFAR(r9)
1203 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1205 ld r4, HSTATE_PPR(r13)
1206 std r4, VCPU_PPR(r9)
1207 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1209 /* Restore R1/R2 so we can handle faults */
1210 ld r1, HSTATE_HOST_R1(r13)
1213 mfspr r10, SPRN_SRR0
1214 mfspr r11, SPRN_SRR1
1215 std r10, VCPU_SRR0(r9)
1216 std r11, VCPU_SRR1(r9)
1217 /* trap is in the low half of r12, clear CR from the high half */
1219 andi. r0, r12, 2 /* need to read HSRR0/1? */
1221 mfspr r10, SPRN_HSRR0
1222 mfspr r11, SPRN_HSRR1
1224 1: std r10, VCPU_PC(r9)
1225 std r11, VCPU_MSR(r9)
1229 std r3, VCPU_GPR(R13)(r9)
1232 stw r12,VCPU_TRAP(r9)
1235 * Now that we have saved away SRR0/1 and HSRR0/1,
1236 * interrupts are recoverable in principle, so set MSR_RI.
1237 * This becomes important for relocation-on interrupts from
1238 * the guest, which we can get in radix mode on POWER9.
1243 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1244 addi r3, r9, VCPU_TB_RMINTR
1246 bl kvmhv_accumulate_time
1247 ld r5, VCPU_GPR(R5)(r9)
1248 ld r6, VCPU_GPR(R6)(r9)
1249 ld r7, VCPU_GPR(R7)(r9)
1250 ld r8, VCPU_GPR(R8)(r9)
1253 /* Save HEIR (HV emulation assist reg) in emul_inst
1254 if this is an HEI (HV emulation interrupt, e40) */
1255 li r3,KVM_INST_FETCH_FAILED
1256 stw r3,VCPU_LAST_INST(r9)
1257 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1260 11: stw r3,VCPU_HEIR(r9)
1262 /* these are volatile across C function calls */
1263 #ifdef CONFIG_RELOCATABLE
1264 ld r3, HSTATE_SCRATCH1(r13)
1270 std r3, VCPU_CTR(r9)
1271 std r4, VCPU_XER(r9)
1273 /* If this is a page table miss then see if it's theirs or ours */
1274 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1276 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1279 /* See if this is a leftover HDEC interrupt */
1280 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1285 bge fast_guest_return
1287 /* See if this is an hcall we can handle in real mode */
1288 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1289 beq hcall_try_real_mode
1291 /* Hypervisor doorbell - exit only if host IPI flag set */
1292 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1294 lbz r0, HSTATE_HOST_IPI(r13)
1299 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1300 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1302 mfspr r3, SPRN_HFSCR
1303 std r3, VCPU_HFSCR(r9)
1306 /* External interrupt ? */
1307 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1308 bne+ guest_exit_cont
1310 /* External interrupt, first check for host_ipi. If this is
1311 * set, we know the host wants us out so let's do it now
1316 * Restore the active volatile registers after returning from
1319 ld r9, HSTATE_KVM_VCPU(r13)
1320 li r12, BOOK3S_INTERRUPT_EXTERNAL
1323 * kvmppc_read_intr return codes:
1325 * Exit to host (r3 > 0)
1326 * 1 An interrupt is pending that needs to be handled by the host
1327 * Exit guest and return to host by branching to guest_exit_cont
1329 * 2 Passthrough that needs completion in the host
1330 * Exit guest and return to host by branching to guest_exit_cont
1331 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1332 * to indicate to the host to complete handling the interrupt
1334 * Before returning to guest, we check if any CPU is heading out
1335 * to the host and if so, we head out also. If no CPUs are heading
1336 * check return values <= 0.
1338 * Return to guest (r3 <= 0)
1339 * 0 No external interrupt is pending
1340 * -1 A guest wakeup IPI (which has now been cleared)
1341 * In either case, we return to guest to deliver any pending
1344 * -2 A PCI passthrough external interrupt was handled
1345 * (interrupt was delivered directly to guest)
1346 * Return to guest to deliver any pending guest interrupts.
1352 /* Return code = 2 */
1353 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1354 stw r12, VCPU_TRAP(r9)
1357 1: /* Return code <= 1 */
1361 /* Return code <= 0 */
1362 4: ld r5, HSTATE_KVM_VCORE(r13)
1363 lwz r0, VCORE_ENTRY_EXIT(r5)
1366 blt deliver_guest_interrupt
1368 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1369 #ifdef CONFIG_KVM_XICS
1370 /* We are exiting, pull the VP from the XIVE */
1371 lwz r0, VCPU_XIVE_PUSHED(r9)
1374 li r7, TM_SPC_PULL_OS_CTX
1377 andi. r0, r0, MSR_IR /* in real mode? */
1379 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1382 /* First load to pull the context, we ignore the value */
1385 /* Second load to recover the context state (Words 0 and 1) */
1388 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1391 /* First load to pull the context, we ignore the value */
1394 /* Second load to recover the context state (Words 0 and 1) */
1396 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
1397 /* Fixup some of the state for the next load */
1400 stw r10, VCPU_XIVE_PUSHED(r9)
1401 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1402 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1404 #endif /* CONFIG_KVM_XICS */
1405 /* Save more register state */
1408 std r6, VCPU_DAR(r9)
1409 stw r7, VCPU_DSISR(r9)
1410 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1411 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1413 std r6, VCPU_FAULT_DAR(r9)
1414 stw r7, VCPU_FAULT_DSISR(r9)
1416 /* See if it is a machine check */
1417 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1418 beq machine_check_realmode
1420 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1421 addi r3, r9, VCPU_TB_RMEXIT
1423 bl kvmhv_accumulate_time
1427 /* Increment exit count, poke other threads to exit */
1428 bl kvmhv_commence_exit
1430 ld r9, HSTATE_KVM_VCPU(r13)
1431 lwz r12, VCPU_TRAP(r9)
1433 /* Stop others sending VCPU interrupts to this physical CPU */
1435 stw r0, VCPU_CPU(r9)
1436 stw r0, VCPU_THREAD_CPU(r9)
1438 /* Save guest CTRL register, set runlatch to 1 */
1440 stw r6,VCPU_CTRL(r9)
1446 /* Read the guest SLB and save it away */
1448 lbz r0, KVM_RADIX(r5)
1451 bne 3f /* for radix, save 0 entries */
1452 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1457 andis. r0,r8,SLB_ESID_V@h
1459 add r8,r8,r6 /* put index in */
1461 std r8,VCPU_SLB_E(r7)
1462 std r3,VCPU_SLB_V(r7)
1463 addi r7,r7,VCPU_SLB_SIZE
1467 3: stw r5,VCPU_SLB_MAX(r9)
1470 * Save the guest PURR/SPURR
1475 ld r8,VCPU_SPURR(r9)
1476 std r5,VCPU_PURR(r9)
1477 std r6,VCPU_SPURR(r9)
1482 * Restore host PURR/SPURR and add guest times
1483 * so that the time in the guest gets accounted.
1485 ld r3,HSTATE_PURR(r13)
1486 ld r4,HSTATE_SPURR(r13)
1493 ld r3, HSTATE_KVM_VCORE(r13)
1496 /* On P9, if the guest has large decr enabled, don't sign extend */
1498 ld r4, VCORE_LPCR(r3)
1499 andis. r4, r4, LPCR_LD@h
1501 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1504 /* r5 is a guest timebase value here, convert to host TB */
1505 ld r4,VCORE_TB_OFFSET(r3)
1507 std r5,VCPU_DEC_EXPIRES(r9)
1511 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1512 /* Save POWER8-specific registers */
1516 std r5, VCPU_IAMR(r9)
1517 stw r6, VCPU_PSPB(r9)
1518 std r7, VCPU_FSCR(r9)
1522 std r7, VCPU_TAR(r9)
1523 mfspr r8, SPRN_EBBHR
1524 std r8, VCPU_EBBHR(r9)
1525 mfspr r5, SPRN_EBBRR
1526 mfspr r6, SPRN_BESCR
1529 std r5, VCPU_EBBRR(r9)
1530 std r6, VCPU_BESCR(r9)
1531 stw r7, VCPU_GUEST_PID(r9)
1532 std r8, VCPU_WORT(r9)
1534 mfspr r5, SPRN_TCSCR
1536 mfspr r7, SPRN_CSIGR
1538 std r5, VCPU_TCSCR(r9)
1539 std r6, VCPU_ACOP(r9)
1540 std r7, VCPU_CSIGR(r9)
1541 std r8, VCPU_TACR(r9)
1544 mfspr r6, SPRN_PSSCR
1545 std r5, VCPU_TID(r9)
1546 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1548 std r6, VCPU_PSSCR(r9)
1549 /* Restore host HFSCR value */
1550 ld r7, STACK_SLOT_HFSCR(r1)
1551 mtspr SPRN_HFSCR, r7
1552 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1554 * Restore various registers to 0, where non-zero values
1555 * set by the guest could disrupt the host.
1562 mtspr SPRN_TCSCR, r0
1563 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1566 mtspr SPRN_MMCRS, r0
1567 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1570 /* Save and reset AMR and UAMOR before turning on the MMU */
1574 std r6,VCPU_UAMOR(r9)
1577 mtspr SPRN_UAMOR, r6
1579 /* Switch DSCR back to host value */
1581 ld r7, HSTATE_DSCR(r13)
1582 std r8, VCPU_DSCR(r9)
1585 /* Save non-volatile GPRs */
1586 std r14, VCPU_GPR(R14)(r9)
1587 std r15, VCPU_GPR(R15)(r9)
1588 std r16, VCPU_GPR(R16)(r9)
1589 std r17, VCPU_GPR(R17)(r9)
1590 std r18, VCPU_GPR(R18)(r9)
1591 std r19, VCPU_GPR(R19)(r9)
1592 std r20, VCPU_GPR(R20)(r9)
1593 std r21, VCPU_GPR(R21)(r9)
1594 std r22, VCPU_GPR(R22)(r9)
1595 std r23, VCPU_GPR(R23)(r9)
1596 std r24, VCPU_GPR(R24)(r9)
1597 std r25, VCPU_GPR(R25)(r9)
1598 std r26, VCPU_GPR(R26)(r9)
1599 std r27, VCPU_GPR(R27)(r9)
1600 std r28, VCPU_GPR(R28)(r9)
1601 std r29, VCPU_GPR(R29)(r9)
1602 std r30, VCPU_GPR(R30)(r9)
1603 std r31, VCPU_GPR(R31)(r9)
1606 mfspr r3, SPRN_SPRG0
1607 mfspr r4, SPRN_SPRG1
1608 mfspr r5, SPRN_SPRG2
1609 mfspr r6, SPRN_SPRG3
1610 std r3, VCPU_SPRG0(r9)
1611 std r4, VCPU_SPRG1(r9)
1612 std r5, VCPU_SPRG2(r9)
1613 std r6, VCPU_SPRG3(r9)
1619 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1622 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1625 /* Increment yield count if they have a VPA */
1626 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1629 li r4, LPPACA_YIELDCOUNT
1634 stb r3, VCPU_VPA_DIRTY(r9)
1636 /* Save PMU registers if requested */
1637 /* r8 and cr0.eq are live here */
1640 * POWER8 seems to have a hardware bug where setting
1641 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1642 * when some counters are already negative doesn't seem
1643 * to cause a performance monitor alert (and hence interrupt).
1644 * The effect of this is that when saving the PMU state,
1645 * if there is no PMU alert pending when we read MMCR0
1646 * before freezing the counters, but one becomes pending
1647 * before we read the counters, we lose it.
1648 * To work around this, we need a way to freeze the counters
1649 * before reading MMCR0. Normally, freezing the counters
1650 * is done by writing MMCR0 (to set MMCR0[FC]) which
1651 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1652 * we can also freeze the counters using MMCR2, by writing
1653 * 1s to all the counter freeze condition bits (there are
1654 * 9 bits each for 6 counters).
1656 li r3, -1 /* set all freeze bits */
1658 mfspr r10, SPRN_MMCR2
1659 mtspr SPRN_MMCR2, r3
1661 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1663 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1664 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1665 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1666 mfspr r6, SPRN_MMCRA
1667 /* Clear MMCRA in order to disable SDAR updates */
1669 mtspr SPRN_MMCRA, r7
1671 beq 21f /* if no VPA, save PMU stuff anyway */
1672 lbz r7, LPPACA_PMCINUSE(r8)
1673 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1675 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1677 21: mfspr r5, SPRN_MMCR1
1680 std r4, VCPU_MMCR(r9)
1681 std r5, VCPU_MMCR + 8(r9)
1682 std r6, VCPU_MMCR + 16(r9)
1684 std r10, VCPU_MMCR + 24(r9)
1685 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1686 std r7, VCPU_SIAR(r9)
1687 std r8, VCPU_SDAR(r9)
1694 stw r3, VCPU_PMC(r9)
1695 stw r4, VCPU_PMC + 4(r9)
1696 stw r5, VCPU_PMC + 8(r9)
1697 stw r6, VCPU_PMC + 12(r9)
1698 stw r7, VCPU_PMC + 16(r9)
1699 stw r8, VCPU_PMC + 20(r9)
1702 std r5, VCPU_SIER(r9)
1703 BEGIN_FTR_SECTION_NESTED(96)
1704 mfspr r6, SPRN_SPMC1
1705 mfspr r7, SPRN_SPMC2
1706 mfspr r8, SPRN_MMCRS
1707 stw r6, VCPU_PMC + 24(r9)
1708 stw r7, VCPU_PMC + 28(r9)
1709 std r8, VCPU_MMCR + 32(r9)
1711 mtspr SPRN_MMCRS, r4
1712 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1713 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1721 /* Restore host values of some registers */
1723 ld r5, STACK_SLOT_CIABR(r1)
1724 ld r6, STACK_SLOT_DAWR(r1)
1725 ld r7, STACK_SLOT_DAWRX(r1)
1726 mtspr SPRN_CIABR, r5
1728 mtspr SPRN_DAWRX, r7
1729 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1731 ld r5, STACK_SLOT_TID(r1)
1732 ld r6, STACK_SLOT_PSSCR(r1)
1733 ld r7, STACK_SLOT_PID(r1)
1734 ld r8, STACK_SLOT_IAMR(r1)
1736 mtspr SPRN_PSSCR, r6
1739 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1742 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
1745 * POWER7/POWER8 guest -> host partition switch code.
1746 * We don't have to lock against tlbies but we do
1747 * have to coordinate the hardware threads.
1749 kvmhv_switch_to_host:
1750 /* Secondary threads wait for primary to do partition switch */
1751 ld r5,HSTATE_KVM_VCORE(r13)
1752 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1753 lbz r3,HSTATE_PTID(r13)
1757 13: lbz r3,VCORE_IN_GUEST(r5)
1763 /* Primary thread waits for all the secondaries to exit guest */
1764 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1765 rlwinm r0,r3,32-8,0xff
1771 /* Did we actually switch to the guest at all? */
1772 lbz r6, VCORE_IN_GUEST(r5)
1776 /* Primary thread switches back to host partition */
1777 lwz r7,KVM_HOST_LPID(r4)
1779 ld r6,KVM_HOST_SDR1(r4)
1780 li r8,LPID_RSVD /* switch to reserved LPID */
1783 mtspr SPRN_SDR1,r6 /* switch to host page table */
1784 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1789 /* DPDES and VTB are shared between threads */
1790 mfspr r7, SPRN_DPDES
1792 std r7, VCORE_DPDES(r5)
1793 std r8, VCORE_VTB(r5)
1794 /* clear DPDES so we don't get guest doorbells in the host */
1796 mtspr SPRN_DPDES, r8
1797 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1799 /* If HMI, call kvmppc_realmode_hmi_handler() */
1800 cmpwi r12, BOOK3S_INTERRUPT_HMI
1802 bl kvmppc_realmode_hmi_handler
1804 li r12, BOOK3S_INTERRUPT_HMI
1806 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1807 * the TB. Hence it is not required to subtract guest timebase
1808 * offset from timebase. So, skip it.
1810 * Also, do not call kvmppc_subcore_exit_guest() because it has
1811 * been invoked as part of kvmppc_realmode_hmi_handler().
1816 /* Subtract timebase offset from timebase */
1817 ld r8,VCORE_TB_OFFSET(r5)
1820 mftb r6 /* current guest timebase */
1822 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1823 mftb r7 /* check if lower 24 bits overflowed */
1828 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1831 17: bl kvmppc_subcore_exit_guest
1833 30: ld r5,HSTATE_KVM_VCORE(r13)
1834 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1837 ld r0, VCORE_PCR(r5)
1843 /* Signal secondary CPUs to continue */
1844 stb r0,VCORE_IN_GUEST(r5)
1845 19: lis r8,0x7fff /* MAX_INT@h */
1848 16: ld r8,KVM_HOST_LPCR(r4)
1852 /* load host SLB entries */
1853 BEGIN_MMU_FTR_SECTION
1855 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1856 ld r8,PACA_SLBSHADOWPTR(r13)
1858 .rept SLB_NUM_BOLTED
1859 li r3, SLBSHADOW_SAVEAREA
1863 andis. r7,r5,SLB_ESID_V@h
1869 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1870 /* Finish timing, if we have a vcpu */
1871 ld r4, HSTATE_KVM_VCPU(r13)
1875 bl kvmhv_accumulate_time
1878 /* Unset guest mode */
1879 li r0, KVM_GUEST_MODE_NONE
1880 stb r0, HSTATE_IN_GUEST(r13)
1882 ld r0, SFS+PPC_LR_STKOFF(r1)
1888 * Check whether an HDSI is an HPTE not found fault or something else.
1889 * If it is an HPTE not found fault that is due to the guest accessing
1890 * a page that they have mapped but which we have paged out, then
1891 * we continue on with the guest exit path. In all other cases,
1892 * reflect the HDSI to the guest as a DSI.
1896 lbz r0, KVM_RADIX(r3)
1899 mfspr r6, SPRN_HDSISR
1900 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
1901 /* HPTE not found fault or protection fault? */
1902 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1903 beq 1f /* if not, send it to the guest */
1904 andi. r0, r11, MSR_DR /* data relocation enabled? */
1907 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
1909 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1911 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1912 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1913 bne 7f /* if no SLB entry found */
1914 4: std r4, VCPU_FAULT_DAR(r9)
1915 stw r6, VCPU_FAULT_DSISR(r9)
1917 /* Search the hash table. */
1918 mr r3, r9 /* vcpu pointer */
1919 li r7, 1 /* data fault */
1920 bl kvmppc_hpte_hv_fault
1921 ld r9, HSTATE_KVM_VCPU(r13)
1923 ld r11, VCPU_MSR(r9)
1924 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1925 cmpdi r3, 0 /* retry the instruction */
1927 cmpdi r3, -1 /* handle in kernel mode */
1929 cmpdi r3, -2 /* MMIO emulation; need instr word */
1932 /* Synthesize a DSI (or DSegI) for the guest */
1933 ld r4, VCPU_FAULT_DAR(r9)
1935 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
1936 mtspr SPRN_DSISR, r6
1937 7: mtspr SPRN_DAR, r4
1938 mtspr SPRN_SRR0, r10
1939 mtspr SPRN_SRR1, r11
1941 bl kvmppc_msr_interrupt
1942 fast_interrupt_c_return:
1943 6: ld r7, VCPU_CTR(r9)
1950 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1951 ld r5, KVM_VRMA_SLB_V(r5)
1954 /* If this is for emulated MMIO, load the instruction word */
1955 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1957 /* Set guest mode to 'jump over instruction' so if lwz faults
1958 * we'll just continue at the next IP. */
1959 li r0, KVM_GUEST_MODE_SKIP
1960 stb r0, HSTATE_IN_GUEST(r13)
1962 /* Do the access with MSR:DR enabled */
1964 ori r4, r3, MSR_DR /* Enable paging for data */
1969 /* Store the result */
1970 stw r8, VCPU_LAST_INST(r9)
1972 /* Unset guest mode. */
1973 li r0, KVM_GUEST_MODE_HOST_HV
1974 stb r0, HSTATE_IN_GUEST(r13)
1978 std r4, VCPU_FAULT_DAR(r9)
1979 stw r6, VCPU_FAULT_DSISR(r9)
1982 std r5, VCPU_FAULT_GPA(r9)
1986 * Similarly for an HISI, reflect it to the guest as an ISI unless
1987 * it is an HPTE not found fault for a page that we have paged out.
1991 lbz r0, KVM_RADIX(r3)
1993 bne .Lradix_hisi /* for radix, just save ASDR */
1994 andis. r0, r11, SRR1_ISI_NOPT@h
1996 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1999 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2001 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2003 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2004 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2005 bne 7f /* if no SLB entry found */
2007 /* Search the hash table. */
2008 mr r3, r9 /* vcpu pointer */
2011 li r7, 0 /* instruction fault */
2012 bl kvmppc_hpte_hv_fault
2013 ld r9, HSTATE_KVM_VCPU(r13)
2015 ld r11, VCPU_MSR(r9)
2016 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2017 cmpdi r3, 0 /* retry the instruction */
2018 beq fast_interrupt_c_return
2019 cmpdi r3, -1 /* handle in kernel mode */
2022 /* Synthesize an ISI (or ISegI) for the guest */
2024 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
2025 7: mtspr SPRN_SRR0, r10
2026 mtspr SPRN_SRR1, r11
2028 bl kvmppc_msr_interrupt
2029 b fast_interrupt_c_return
2031 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2032 ld r5, KVM_VRMA_SLB_V(r6)
2036 * Try to handle an hcall in real mode.
2037 * Returns to the guest if we handle it, or continues on up to
2038 * the kernel if we can't (i.e. if we don't have a handler for
2039 * it, or if the handler returns H_TOO_HARD).
2041 * r5 - r8 contain hcall args,
2042 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2044 hcall_try_real_mode:
2045 ld r3,VCPU_GPR(R3)(r9)
2047 /* sc 1 from userspace - reflect to guest syscall */
2048 bne sc_1_fast_return
2050 cmpldi r3,hcall_real_table_end - hcall_real_table
2052 /* See if this hcall is enabled for in-kernel handling */
2054 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2055 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2057 ld r0, KVM_ENABLED_HCALLS(r4)
2058 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2062 /* Get pointer to handler, if any, and call it */
2063 LOAD_REG_ADDR(r4, hcall_real_table)
2069 mr r3,r9 /* get vcpu pointer */
2070 ld r4,VCPU_GPR(R4)(r9)
2073 beq hcall_real_fallback
2074 ld r4,HSTATE_KVM_VCPU(r13)
2075 std r3,VCPU_GPR(R3)(r4)
2083 li r10, BOOK3S_INTERRUPT_SYSCALL
2084 bl kvmppc_msr_interrupt
2088 /* We've attempted a real mode hcall, but it's punted it back
2089 * to userspace. We need to restore some clobbered volatiles
2090 * before resuming the pass-it-to-qemu path */
2091 hcall_real_fallback:
2092 li r12,BOOK3S_INTERRUPT_SYSCALL
2093 ld r9, HSTATE_KVM_VCPU(r13)
2097 .globl hcall_real_table
2099 .long 0 /* 0 - unused */
2100 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2101 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2102 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2103 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2104 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2105 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2106 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2107 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2108 .long 0 /* 0x24 - H_SET_SPRG0 */
2109 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2124 #ifdef CONFIG_KVM_XICS
2125 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2126 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2127 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2128 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2129 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2131 .long 0 /* 0x64 - H_EOI */
2132 .long 0 /* 0x68 - H_CPPR */
2133 .long 0 /* 0x6c - H_IPI */
2134 .long 0 /* 0x70 - H_IPOLL */
2135 .long 0 /* 0x74 - H_XIRR */
2163 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2164 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2180 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2184 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2185 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2186 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2298 #ifdef CONFIG_KVM_XICS
2299 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2301 .long 0 /* 0x2fc - H_XIRR_X*/
2303 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2304 .globl hcall_real_table_end
2305 hcall_real_table_end:
2307 _GLOBAL(kvmppc_h_set_xdabr)
2308 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2310 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2313 6: li r3, H_PARAMETER
2316 _GLOBAL(kvmppc_h_set_dabr)
2317 li r5, DABRX_USER | DABRX_KERNEL
2321 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2322 std r4,VCPU_DABR(r3)
2323 stw r5, VCPU_DABRX(r3)
2324 mtspr SPRN_DABRX, r5
2325 /* Work around P7 bug where DABR can get corrupted on mtspr */
2326 1: mtspr SPRN_DABR,r4
2334 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2335 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2336 rlwimi r5, r4, 2, DAWRX_WT
2338 std r4, VCPU_DAWR(r3)
2339 std r5, VCPU_DAWRX(r3)
2341 mtspr SPRN_DAWRX, r5
2345 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2347 std r11,VCPU_MSR(r3)
2349 stb r0,VCPU_CEDED(r3)
2350 sync /* order setting ceded vs. testing prodded */
2351 lbz r5,VCPU_PRODDED(r3)
2353 bne kvm_cede_prodded
2354 li r12,0 /* set trap to 0 to say hcall is handled */
2355 stw r12,VCPU_TRAP(r3)
2357 std r0,VCPU_GPR(R3)(r3)
2360 * Set our bit in the bitmask of napping threads unless all the
2361 * other threads are already napping, in which case we send this
2364 ld r5,HSTATE_KVM_VCORE(r13)
2365 lbz r6,HSTATE_PTID(r13)
2366 lwz r8,VCORE_ENTRY_EXIT(r5)
2370 addi r6,r5,VCORE_NAPPING_THREADS
2377 /* order napping_threads update vs testing entry_exit_map */
2380 stb r0,HSTATE_NAPPING(r13)
2381 lwz r7,VCORE_ENTRY_EXIT(r5)
2383 bge 33f /* another thread already exiting */
2386 * Although not specifically required by the architecture, POWER7
2387 * preserves the following registers in nap mode, even if an SMT mode
2388 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2389 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2391 /* Save non-volatile GPRs */
2392 std r14, VCPU_GPR(R14)(r3)
2393 std r15, VCPU_GPR(R15)(r3)
2394 std r16, VCPU_GPR(R16)(r3)
2395 std r17, VCPU_GPR(R17)(r3)
2396 std r18, VCPU_GPR(R18)(r3)
2397 std r19, VCPU_GPR(R19)(r3)
2398 std r20, VCPU_GPR(R20)(r3)
2399 std r21, VCPU_GPR(R21)(r3)
2400 std r22, VCPU_GPR(R22)(r3)
2401 std r23, VCPU_GPR(R23)(r3)
2402 std r24, VCPU_GPR(R24)(r3)
2403 std r25, VCPU_GPR(R25)(r3)
2404 std r26, VCPU_GPR(R26)(r3)
2405 std r27, VCPU_GPR(R27)(r3)
2406 std r28, VCPU_GPR(R28)(r3)
2407 std r29, VCPU_GPR(R29)(r3)
2408 std r30, VCPU_GPR(R30)(r3)
2409 std r31, VCPU_GPR(R31)(r3)
2414 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2416 ld r9, HSTATE_KVM_VCPU(r13)
2418 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2422 * Set DEC to the smaller of DEC and HDEC, so that we wake
2423 * no later than the end of our timeslice (HDEC interrupts
2424 * don't wake us from nap).
2430 /* On P9 check whether the guest has large decrementer mode enabled */
2431 ld r6, HSTATE_KVM_VCORE(r13)
2432 ld r6, VCORE_LPCR(r6)
2433 andis. r6, r6, LPCR_LD@h
2435 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2442 /* save expiry time of guest decrementer */
2444 ld r4, HSTATE_KVM_VCPU(r13)
2445 ld r5, HSTATE_KVM_VCORE(r13)
2446 ld r6, VCORE_TB_OFFSET(r5)
2447 subf r3, r6, r3 /* convert to host TB value */
2448 std r3, VCPU_DEC_EXPIRES(r4)
2450 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2451 ld r4, HSTATE_KVM_VCPU(r13)
2452 addi r3, r4, VCPU_TB_CEDE
2453 bl kvmhv_accumulate_time
2456 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2459 * Take a nap until a decrementer or external or doobell interrupt
2460 * occurs, with PECE1 and PECE0 set in LPCR.
2461 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2462 * Also clear the runlatch bit before napping.
2465 mfspr r0, SPRN_CTRLF
2467 mtspr SPRN_CTRLT, r0
2470 stb r0,HSTATE_HWTHREAD_REQ(r13)
2472 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2474 ori r5, r5, LPCR_PECEDH
2475 rlwimi r5, r3, 0, LPCR_PECEDP
2476 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2478 kvm_nap_sequence: /* desired LPCR value in r5 */
2481 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2482 * enable state loss = 1 (allow SMT mode switch)
2483 * requested level = 0 (just stop dispatching)
2485 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2486 mtspr SPRN_PSSCR, r3
2487 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2488 li r4, LPCR_PECE_HVEE@higher
2491 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2495 std r0, HSTATE_SCRATCH0(r13)
2497 ld r0, HSTATE_SCRATCH0(r13)
2504 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2513 /* get vcpu pointer */
2514 ld r4, HSTATE_KVM_VCPU(r13)
2516 /* Woken by external or decrementer interrupt */
2517 ld r1, HSTATE_HOST_R1(r13)
2519 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2520 addi r3, r4, VCPU_TB_RMINTR
2521 bl kvmhv_accumulate_time
2524 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2526 bl kvmppc_restore_tm
2527 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2530 /* load up FP state */
2533 /* Restore guest decrementer */
2534 ld r3, VCPU_DEC_EXPIRES(r4)
2535 ld r5, HSTATE_KVM_VCORE(r13)
2536 ld r6, VCORE_TB_OFFSET(r5)
2537 add r3, r3, r6 /* convert host TB to guest TB value */
2543 ld r14, VCPU_GPR(R14)(r4)
2544 ld r15, VCPU_GPR(R15)(r4)
2545 ld r16, VCPU_GPR(R16)(r4)
2546 ld r17, VCPU_GPR(R17)(r4)
2547 ld r18, VCPU_GPR(R18)(r4)
2548 ld r19, VCPU_GPR(R19)(r4)
2549 ld r20, VCPU_GPR(R20)(r4)
2550 ld r21, VCPU_GPR(R21)(r4)
2551 ld r22, VCPU_GPR(R22)(r4)
2552 ld r23, VCPU_GPR(R23)(r4)
2553 ld r24, VCPU_GPR(R24)(r4)
2554 ld r25, VCPU_GPR(R25)(r4)
2555 ld r26, VCPU_GPR(R26)(r4)
2556 ld r27, VCPU_GPR(R27)(r4)
2557 ld r28, VCPU_GPR(R28)(r4)
2558 ld r29, VCPU_GPR(R29)(r4)
2559 ld r30, VCPU_GPR(R30)(r4)
2560 ld r31, VCPU_GPR(R31)(r4)
2562 /* Check the wake reason in SRR1 to see why we got here */
2563 bl kvmppc_check_wake_reason
2566 * Restore volatile registers since we could have called a
2567 * C routine in kvmppc_check_wake_reason
2569 * r3 tells us whether we need to return to host or not
2570 * WARNING: it gets checked further down:
2571 * should not modify r3 until this check is done.
2573 ld r4, HSTATE_KVM_VCPU(r13)
2575 /* clear our bit in vcore->napping_threads */
2576 34: ld r5,HSTATE_KVM_VCORE(r13)
2577 lbz r7,HSTATE_PTID(r13)
2580 addi r6,r5,VCORE_NAPPING_THREADS
2586 stb r0,HSTATE_NAPPING(r13)
2588 /* See if the wake reason saved in r3 means we need to exit */
2589 stw r12, VCPU_TRAP(r4)
2594 /* see if any other thread is already exiting */
2595 lwz r0,VCORE_ENTRY_EXIT(r5)
2599 b kvmppc_cede_reentry /* if not go back to guest */
2601 /* cede when already previously prodded case */
2604 stb r0,VCPU_PRODDED(r3)
2605 sync /* order testing prodded vs. clearing ceded */
2606 stb r0,VCPU_CEDED(r3)
2610 /* we've ceded but we want to give control to the host */
2612 ld r9, HSTATE_KVM_VCPU(r13)
2615 /* Try to handle a machine check in real mode */
2616 machine_check_realmode:
2617 mr r3, r9 /* get vcpu pointer */
2618 bl kvmppc_realmode_machine_check
2620 ld r9, HSTATE_KVM_VCPU(r13)
2621 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2623 * For the guest that is FWNMI capable, deliver all the MCE errors
2624 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2625 * reason. This new approach injects machine check errors in guest
2626 * address space to guest with additional information in the form
2627 * of RTAS event, thus enabling guest kernel to suitably handle
2630 * For the guest that is not FWNMI capable (old QEMU) fallback
2631 * to old behaviour for backward compatibility:
2632 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2633 * through machine check interrupt (set HSRR0 to 0x200).
2634 * For handled errors (no-fatal), just go back to guest execution
2635 * with current HSRR0.
2636 * if we receive machine check with MSR(RI=0) then deliver it to
2637 * guest as machine check causing guest to crash.
2639 ld r11, VCPU_MSR(r9)
2640 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2641 bne mc_cont /* if so, exit to host */
2642 /* Check if guest is capable of handling NMI exit */
2643 ld r10, VCPU_KVM(r9)
2644 lbz r10, KVM_FWNMI(r10)
2645 cmpdi r10, 1 /* FWNMI capable? */
2646 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
2648 /* if not, fall through for backward compatibility. */
2649 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2650 beq 1f /* Deliver a machine check to guest */
2652 cmpdi r3, 0 /* Did we handle MCE ? */
2653 bne 2f /* Continue guest execution. */
2654 /* If not, deliver a machine check. SRR0/1 are already set */
2655 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2656 bl kvmppc_msr_interrupt
2657 2: b fast_interrupt_c_return
2660 * Check the reason we woke from nap, and take appropriate action.
2662 * 0 if nothing needs to be done
2663 * 1 if something happened that needs to be handled by the host
2664 * -1 if there was a guest wakeup (IPI or msgsnd)
2665 * -2 if we handled a PCI passthrough interrupt (returned by
2666 * kvmppc_read_intr only)
2668 * Also sets r12 to the interrupt vector for any interrupt that needs
2669 * to be handled now by the host (0x500 for external interrupt), or zero.
2670 * Modifies all volatile registers (since it may call a C function).
2671 * This routine calls kvmppc_read_intr, a C function, if an external
2672 * interrupt is pending.
2674 kvmppc_check_wake_reason:
2677 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2679 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2680 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2681 cmpwi r6, 8 /* was it an external interrupt? */
2682 beq 7f /* if so, see what it was */
2685 cmpwi r6, 6 /* was it the decrementer? */
2688 cmpwi r6, 5 /* privileged doorbell? */
2690 cmpwi r6, 3 /* hypervisor doorbell? */
2692 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2693 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2695 li r3, 1 /* anything else, return 1 */
2698 /* hypervisor doorbell */
2699 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2702 * Clear the doorbell as we will invoke the handler
2703 * explicitly in the guest exit path.
2705 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2707 /* see if it's a host IPI */
2709 lbz r0, HSTATE_HOST_IPI(r13)
2712 /* if not, return -1 */
2716 /* Woken up due to Hypervisor maintenance interrupt */
2717 4: li r12, BOOK3S_INTERRUPT_HMI
2721 /* external interrupt - create a stack frame so we can call C */
2723 std r0, PPC_LR_STKOFF(r1)
2724 stdu r1, -PPC_MIN_STKFRM(r1)
2727 li r12, BOOK3S_INTERRUPT_EXTERNAL
2732 * Return code of 2 means PCI passthrough interrupt, but
2733 * we need to return back to host to complete handling the
2734 * interrupt. Trap reason is expected in r12 by guest
2737 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2739 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2740 addi r1, r1, PPC_MIN_STKFRM
2745 * Save away FP, VMX and VSX registers.
2747 * N.B. r30 and r31 are volatile across this function,
2748 * thus it is not callable from C.
2755 #ifdef CONFIG_ALTIVEC
2757 oris r8,r8,MSR_VEC@h
2758 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2762 oris r8,r8,MSR_VSX@h
2763 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2766 addi r3,r3,VCPU_FPRS
2768 #ifdef CONFIG_ALTIVEC
2770 addi r3,r31,VCPU_VRS
2772 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2774 mfspr r6,SPRN_VRSAVE
2775 stw r6,VCPU_VRSAVE(r31)
2780 * Load up FP, VMX and VSX registers
2782 * N.B. r30 and r31 are volatile across this function,
2783 * thus it is not callable from C.
2790 #ifdef CONFIG_ALTIVEC
2792 oris r8,r8,MSR_VEC@h
2793 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2797 oris r8,r8,MSR_VSX@h
2798 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2801 addi r3,r4,VCPU_FPRS
2803 #ifdef CONFIG_ALTIVEC
2805 addi r3,r31,VCPU_VRS
2807 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2809 lwz r7,VCPU_VRSAVE(r31)
2810 mtspr SPRN_VRSAVE,r7
2815 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2817 * Save transactional state and TM-related registers.
2818 * Called with r9 pointing to the vcpu struct.
2819 * This can modify all checkpointed registers, but
2820 * restores r1, r2 and r9 (vcpu pointer) before exit.
2824 std r0, PPC_LR_STKOFF(r1)
2829 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2833 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2834 beq 1f /* TM not active in guest. */
2836 std r1, HSTATE_HOST_R1(r13)
2837 li r3, TM_CAUSE_KVM_RESCHED
2839 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2843 /* All GPRs are volatile at this point. */
2846 /* Temporarily store r13 and r9 so we have some regs to play with */
2849 std r9, PACATMSCRATCH(r13)
2850 ld r9, HSTATE_KVM_VCPU(r13)
2852 /* Get a few more GPRs free. */
2853 std r29, VCPU_GPRS_TM(29)(r9)
2854 std r30, VCPU_GPRS_TM(30)(r9)
2855 std r31, VCPU_GPRS_TM(31)(r9)
2857 /* Save away PPR and DSCR soon so don't run with user values. */
2860 mfspr r30, SPRN_DSCR
2861 ld r29, HSTATE_DSCR(r13)
2862 mtspr SPRN_DSCR, r29
2864 /* Save all but r9, r13 & r29-r31 */
2867 .if (reg != 9) && (reg != 13)
2868 std reg, VCPU_GPRS_TM(reg)(r9)
2872 /* ... now save r13 */
2874 std r4, VCPU_GPRS_TM(13)(r9)
2875 /* ... and save r9 */
2876 ld r4, PACATMSCRATCH(r13)
2877 std r4, VCPU_GPRS_TM(9)(r9)
2879 /* Reload stack pointer and TOC. */
2880 ld r1, HSTATE_HOST_R1(r13)
2883 /* Set MSR RI now we have r1 and r13 back. */
2887 /* Save away checkpinted SPRs. */
2888 std r31, VCPU_PPR_TM(r9)
2889 std r30, VCPU_DSCR_TM(r9)
2896 std r5, VCPU_LR_TM(r9)
2897 stw r6, VCPU_CR_TM(r9)
2898 std r7, VCPU_CTR_TM(r9)
2899 std r8, VCPU_AMR_TM(r9)
2900 std r10, VCPU_TAR_TM(r9)
2901 std r11, VCPU_XER_TM(r9)
2903 /* Restore r12 as trap number. */
2904 lwz r12, VCPU_TRAP(r9)
2907 addi r3, r9, VCPU_FPRS_TM
2909 addi r3, r9, VCPU_VRS_TM
2911 mfspr r6, SPRN_VRSAVE
2912 stw r6, VCPU_VRSAVE_TM(r9)
2915 * We need to save these SPRs after the treclaim so that the software
2916 * error code is recorded correctly in the TEXASR. Also the user may
2917 * change these outside of a transaction, so they must always be
2920 mfspr r5, SPRN_TFHAR
2921 mfspr r6, SPRN_TFIAR
2922 mfspr r7, SPRN_TEXASR
2923 std r5, VCPU_TFHAR(r9)
2924 std r6, VCPU_TFIAR(r9)
2925 std r7, VCPU_TEXASR(r9)
2927 ld r0, PPC_LR_STKOFF(r1)
2932 * Restore transactional state and TM-related registers.
2933 * Called with r4 pointing to the vcpu struct.
2934 * This potentially modifies all checkpointed registers.
2935 * It restores r1, r2, r4 from the PACA.
2939 std r0, PPC_LR_STKOFF(r1)
2941 /* Turn on TM/FP/VSX/VMX so we can restore them. */
2947 oris r5, r5, (MSR_VEC | MSR_VSX)@h
2951 * The user may change these outside of a transaction, so they must
2952 * always be context switched.
2954 ld r5, VCPU_TFHAR(r4)
2955 ld r6, VCPU_TFIAR(r4)
2956 ld r7, VCPU_TEXASR(r4)
2957 mtspr SPRN_TFHAR, r5
2958 mtspr SPRN_TFIAR, r6
2959 mtspr SPRN_TEXASR, r7
2962 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2963 beqlr /* TM not active in guest */
2964 std r1, HSTATE_HOST_R1(r13)
2966 /* Make sure the failure summary is set, otherwise we'll program check
2967 * when we trechkpt. It's possible that this might have been not set
2968 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
2971 oris r7, r7, (TEXASR_FS)@h
2972 mtspr SPRN_TEXASR, r7
2975 * We need to load up the checkpointed state for the guest.
2976 * We need to do this early as it will blow away any GPRs, VSRs and
2981 addi r3, r31, VCPU_FPRS_TM
2983 addi r3, r31, VCPU_VRS_TM
2986 lwz r7, VCPU_VRSAVE_TM(r4)
2987 mtspr SPRN_VRSAVE, r7
2989 ld r5, VCPU_LR_TM(r4)
2990 lwz r6, VCPU_CR_TM(r4)
2991 ld r7, VCPU_CTR_TM(r4)
2992 ld r8, VCPU_AMR_TM(r4)
2993 ld r9, VCPU_TAR_TM(r4)
2994 ld r10, VCPU_XER_TM(r4)
3003 * Load up PPR and DSCR values but don't put them in the actual SPRs
3004 * till the last moment to avoid running with userspace PPR and DSCR for
3007 ld r29, VCPU_DSCR_TM(r4)
3008 ld r30, VCPU_PPR_TM(r4)
3010 std r2, PACATMSCRATCH(r13) /* Save TOC */
3012 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
3016 /* Load GPRs r0-r28 */
3019 ld reg, VCPU_GPRS_TM(reg)(r31)
3023 mtspr SPRN_DSCR, r29
3026 /* Load final GPRs */
3027 ld 29, VCPU_GPRS_TM(29)(r31)
3028 ld 30, VCPU_GPRS_TM(30)(r31)
3029 ld 31, VCPU_GPRS_TM(31)(r31)
3031 /* TM checkpointed state is now setup. All GPRs are now volatile. */
3034 /* Now let's get back the state we need. */
3037 ld r29, HSTATE_DSCR(r13)
3038 mtspr SPRN_DSCR, r29
3039 ld r4, HSTATE_KVM_VCPU(r13)
3040 ld r1, HSTATE_HOST_R1(r13)
3041 ld r2, PACATMSCRATCH(r13)
3043 /* Set the MSR RI since we have our registers back. */
3047 ld r0, PPC_LR_STKOFF(r1)
3053 * We come here if we get any exception or interrupt while we are
3054 * executing host real mode code while in guest MMU context.
3055 * For now just spin, but we should do something better.
3057 kvmppc_bad_host_intr:
3061 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3062 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3063 * r11 has the guest MSR value (in/out)
3064 * r9 has a vcpu pointer (in)
3065 * r0 is used as a scratch register
3067 kvmppc_msr_interrupt:
3068 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3069 cmpwi r0, 2 /* Check if we are in transactional state.. */
3070 ld r11, VCPU_INTR_MSR(r9)
3072 /* ... if transactional, change to suspended */
3074 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3078 * This works around a hardware bug on POWER8E processors, where
3079 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3080 * performance monitor interrupt. Instead, when we need to have
3081 * an interrupt pending, we have to arrange for a counter to overflow.
3085 mtspr SPRN_MMCR2, r3
3086 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3087 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3088 mtspr SPRN_MMCR0, r3
3095 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3097 * Start timing an activity
3098 * r3 = pointer to time accumulation struct, r4 = vcpu
3101 ld r5, HSTATE_KVM_VCORE(r13)
3102 lbz r6, VCORE_IN_GUEST(r5)
3104 beq 5f /* if in guest, need to */
3105 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
3108 std r3, VCPU_CUR_ACTIVITY(r4)
3109 std r5, VCPU_ACTIVITY_START(r4)
3113 * Accumulate time to one activity and start another.
3114 * r3 = pointer to new time accumulation struct, r4 = vcpu
3116 kvmhv_accumulate_time:
3117 ld r5, HSTATE_KVM_VCORE(r13)
3118 lbz r8, VCORE_IN_GUEST(r5)
3120 beq 4f /* if in guest, need to */
3121 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
3122 4: ld r5, VCPU_CUR_ACTIVITY(r4)
3123 ld r6, VCPU_ACTIVITY_START(r4)
3124 std r3, VCPU_CUR_ACTIVITY(r4)
3127 std r7, VCPU_ACTIVITY_START(r4)
3131 ld r8, TAS_SEQCOUNT(r5)
3134 std r8, TAS_SEQCOUNT(r5)
3136 ld r7, TAS_TOTAL(r5)
3138 std r7, TAS_TOTAL(r5)
3144 3: std r3, TAS_MIN(r5)
3150 std r8, TAS_SEQCOUNT(r5)