1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 * Derived from book3s_rmhandlers.S and other files, which are:
8 * Copyright SUSE Linux Products GmbH 2009
10 * Authors: Alexander Graf <agraf@suse.de>
13 #include <asm/ppc_asm.h>
14 #include <asm/kvm_asm.h>
18 #include <asm/ptrace.h>
19 #include <asm/hvcall.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/exception-64s.h>
22 #include <asm/kvm_book3s_asm.h>
23 #include <asm/book3s/64/mmu-hash.h>
24 #include <asm/export.h>
27 #include <asm/xive-regs.h>
28 #include <asm/thread_info.h>
29 #include <asm/asm-compat.h>
30 #include <asm/feature-fixups.h>
31 #include <asm/cpuidle.h>
33 /* Sign-extend HDEC if not on POWER9 */
34 #define EXTEND_HDEC(reg) \
37 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
39 /* Values in HSTATE_NAPPING(r13) */
40 #define NAPPING_CEDE 1
41 #define NAPPING_NOVCPU 2
42 #define NAPPING_UNSPLIT 3
44 /* Stack frame offsets for kvmppc_hv_entry */
46 #define STACK_SLOT_TRAP (SFS-4)
47 #define STACK_SLOT_SHORT_PATH (SFS-8)
48 #define STACK_SLOT_TID (SFS-16)
49 #define STACK_SLOT_PSSCR (SFS-24)
50 #define STACK_SLOT_PID (SFS-32)
51 #define STACK_SLOT_IAMR (SFS-40)
52 #define STACK_SLOT_CIABR (SFS-48)
53 #define STACK_SLOT_DAWR (SFS-56)
54 #define STACK_SLOT_DAWRX (SFS-64)
55 #define STACK_SLOT_HFSCR (SFS-72)
56 #define STACK_SLOT_AMR (SFS-80)
57 #define STACK_SLOT_UAMOR (SFS-88)
58 /* the following is used by the P9 short path */
59 #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
62 * Call kvmppc_hv_entry in real mode.
63 * Must be called with interrupts hard-disabled.
67 * LR = return address to continue at after eventually re-enabling MMU
69 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
71 std r0, PPC_LR_STKOFF(r1)
74 std r10, HSTATE_HOST_MSR(r13)
75 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
80 mtmsrd r0,1 /* clear RI in MSR */
87 /* On P9, do LPCR setting, if necessary */
88 ld r3, HSTATE_SPLIT_MODE(r13)
91 lwz r4, KVM_SPLIT_DO_SET(r3)
97 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
99 ld r4, HSTATE_KVM_VCPU(r13)
102 /* Back from guest - restore host state and return to caller */
105 /* Restore host DABR and DABRX */
106 ld r5,HSTATE_DABR(r13)
110 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
113 ld r3,PACA_SPRG_VDSO(r13)
114 mtspr SPRN_SPRG_VDSO_WRITE,r3
116 /* Reload the host's PMU registers */
117 bl kvmhv_load_host_pmu
120 * Reload DEC. HDEC interrupts were disabled when
121 * we reloaded the host's LPCR value.
123 ld r3, HSTATE_DECEXP(r13)
128 /* hwthread_req may have got set by cede or no vcpu, so clear it */
130 stb r0, HSTATE_HWTHREAD_REQ(r13)
133 * For external interrupts we need to call the Linux
134 * handler to process the interrupt. We do that by jumping
135 * to absolute address 0x500 for external interrupts.
136 * The [h]rfid at the end of the handler will return to
137 * the book3s_hv_interrupts.S code. For other interrupts
138 * we do the rfid to get back to the book3s_hv_interrupts.S
141 ld r8, 112+PPC_LR_STKOFF(r1)
143 ld r7, HSTATE_HOST_MSR(r13)
145 /* Return the trap number on this thread as the return value */
149 * If we came back from the guest via a relocation-on interrupt,
150 * we will be in virtual mode at this point, which makes it a
151 * little easier to get back to the caller.
154 andi. r0, r0, MSR_IR /* in real mode? */
157 /* RFI into the highmem handler */
161 mtmsrd r6, 1 /* Clear RI in MSR */
166 /* Virtual-mode return */
171 kvmppc_primary_no_guest:
172 /* We handle this much like a ceded vcpu */
173 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
174 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
175 /* HDEC value came from DEC in the first place, it will fit */
179 * Make sure the primary has finished the MMU switch.
180 * We should never get here on a secondary thread, but
181 * check it for robustness' sake.
183 ld r5, HSTATE_KVM_VCORE(r13)
184 65: lbz r0, VCORE_IN_GUEST(r5)
191 /* set our bit in napping_threads */
192 ld r5, HSTATE_KVM_VCORE(r13)
193 lbz r7, HSTATE_PTID(r13)
196 addi r6, r5, VCORE_NAPPING_THREADS
201 /* order napping_threads update vs testing entry_exit_map */
204 lwz r7, VCORE_ENTRY_EXIT(r5)
206 bge kvm_novcpu_exit /* another thread already exiting */
207 li r3, NAPPING_NOVCPU
208 stb r3, HSTATE_NAPPING(r13)
210 li r3, 0 /* Don't wake on privileged (OS) doorbell */
215 * Entered from kvm_start_guest if kvm_hstate.napping is set
221 ld r1, HSTATE_HOST_R1(r13)
222 ld r5, HSTATE_KVM_VCORE(r13)
224 stb r0, HSTATE_NAPPING(r13)
226 /* check the wake reason */
227 bl kvmppc_check_wake_reason
230 * Restore volatile registers since we could have called
231 * a C routine in kvmppc_check_wake_reason.
234 ld r5, HSTATE_KVM_VCORE(r13)
236 /* see if any other thread is already exiting */
237 lwz r0, VCORE_ENTRY_EXIT(r5)
241 /* clear our bit in napping_threads */
242 lbz r7, HSTATE_PTID(r13)
245 addi r6, r5, VCORE_NAPPING_THREADS
251 /* See if the wake reason means we need to exit */
255 /* See if our timeslice has expired (HDEC is negative) */
258 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
262 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
263 ld r4, HSTATE_KVM_VCPU(r13)
265 beq kvmppc_primary_no_guest
267 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
268 addi r3, r4, VCPU_TB_RMENTRY
269 bl kvmhv_start_timing
274 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
275 ld r4, HSTATE_KVM_VCPU(r13)
278 addi r3, r4, VCPU_TB_RMEXIT
279 bl kvmhv_accumulate_time
282 stw r12, STACK_SLOT_TRAP(r1)
283 bl kvmhv_commence_exit
285 b kvmhv_switch_to_host
288 * We come in here when wakened from Linux offline idle code.
290 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
292 _GLOBAL(idle_kvm_start_guest)
293 ld r4,PACAEMERGSP(r13)
299 subi r1,r4,STACK_FRAME_OVERHEAD
303 * Could avoid this and pass it through in r3. For now,
304 * code expects it to be in SRR1.
309 stb r0,PACA_FTRACE_ENABLED(r13)
311 li r0,KVM_HWTHREAD_IN_KVM
312 stb r0,HSTATE_HWTHREAD_STATE(r13)
314 /* kvm cede / napping does not come through here */
315 lbz r0,HSTATE_NAPPING(r13)
322 stb r0, HSTATE_NAPPING(r13)
327 * We weren't napping due to cede, so this must be a secondary
328 * thread being woken up to run a guest, or being woken up due
329 * to a stray IPI. (Or due to some machine check or hypervisor
330 * maintenance interrupt while the core is in KVM.)
333 /* Check the wake reason in SRR1 to see why we got here */
334 bl kvmppc_check_wake_reason
336 * kvmppc_check_wake_reason could invoke a C routine, but we
337 * have no volatile registers to restore when we return.
343 /* get vcore pointer, NULL if we have nothing to run */
344 ld r5,HSTATE_KVM_VCORE(r13)
346 /* if we have no vcore to run, go back to sleep */
349 kvm_secondary_got_guest:
351 /* Set HSTATE_DSCR(r13) to something sensible */
352 ld r6, PACA_DSCR_DEFAULT(r13)
353 std r6, HSTATE_DSCR(r13)
355 /* On thread 0 of a subcore, set HDEC to max */
356 lbz r4, HSTATE_PTID(r13)
359 LOAD_REG_ADDR(r6, decrementer_max)
362 /* and set per-LPAR registers, if doing dynamic micro-threading */
363 ld r6, HSTATE_SPLIT_MODE(r13)
367 ld r0, KVM_SPLIT_RPR(r6)
369 ld r0, KVM_SPLIT_PMMAR(r6)
371 ld r0, KVM_SPLIT_LDBAR(r6)
375 /* On P9 we use the split_info for coordinating LPCR changes */
376 lwz r4, KVM_SPLIT_DO_SET(r6)
383 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
385 /* Order load of vcpu after load of vcore */
387 ld r4, HSTATE_KVM_VCPU(r13)
390 /* Back from the guest, go back to nap */
391 /* Clear our vcpu and vcore pointers so we don't come back in early */
393 std r0, HSTATE_KVM_VCPU(r13)
395 * Once we clear HSTATE_KVM_VCORE(r13), the code in
396 * kvmppc_run_core() is going to assume that all our vcpu
397 * state is visible in memory. This lwsync makes sure
401 std r0, HSTATE_KVM_VCORE(r13)
404 * All secondaries exiting guest will fall through this path.
405 * Before proceeding, just check for HMI interrupt and
406 * invoke opal hmi handler. By now we are sure that the
407 * primary thread on this core/subcore has already made partition
408 * switch/TB resync and we are good to call opal hmi handler.
410 cmpwi r12, BOOK3S_INTERRUPT_HMI
413 li r3,0 /* NULL argument */
414 bl hmi_exception_realmode
416 * At this point we have finished executing in the guest.
417 * We need to wait for hwthread_req to become zero, since
418 * we may not turn on the MMU while hwthread_req is non-zero.
419 * While waiting we also need to check if we get given a vcpu to run.
422 lbz r3, HSTATE_HWTHREAD_REQ(r13)
426 li r0, KVM_HWTHREAD_IN_KERNEL
427 stb r0, HSTATE_HWTHREAD_STATE(r13)
428 /* need to recheck hwthread_req after a barrier, to avoid race */
430 lbz r3, HSTATE_HWTHREAD_REQ(r13)
435 * Jump to idle_return_gpr_loss, which returns to the
436 * idle_kvm_start_guest caller.
440 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
442 /* set up r3 for return */
445 addi r1, r1, STACK_FRAME_OVERHEAD
454 ld r5, HSTATE_KVM_VCORE(r13)
457 ld r3, HSTATE_SPLIT_MODE(r13)
460 lwz r0, KVM_SPLIT_DO_SET(r3)
463 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
466 lbz r0, KVM_SPLIT_DO_NAP(r3)
472 b kvm_secondary_got_guest
474 54: li r0, KVM_HWTHREAD_IN_KVM
475 stb r0, HSTATE_HWTHREAD_STATE(r13)
479 /* Set LPCR, LPIDR etc. on P9 */
487 bl kvmhv_p9_restore_lpcr
492 * Here the primary thread is trying to return the core to
493 * whole-core mode, so we need to nap.
497 * When secondaries are napping in kvm_unsplit_nap() with
498 * hwthread_req = 1, HMI goes ignored even though subcores are
499 * already exited the guest. Hence HMI keeps waking up secondaries
500 * from nap in a loop and secondaries always go back to nap since
501 * no vcore is assigned to them. This makes impossible for primary
502 * thread to get hold of secondary threads resulting into a soft
503 * lockup in KVM path.
505 * Let us check if HMI is pending and handle it before we go to nap.
507 cmpwi r12, BOOK3S_INTERRUPT_HMI
509 li r3, 0 /* NULL argument */
510 bl hmi_exception_realmode
513 * Ensure that secondary doesn't nap when it has
514 * its vcore pointer set.
516 sync /* matches smp_mb() before setting split_info.do_nap */
517 ld r0, HSTATE_KVM_VCORE(r13)
520 /* clear any pending message */
522 lis r6, (PPC_DBELL_SERVER << (63-36))@h
524 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
525 /* Set kvm_split_mode.napped[tid] = 1 */
526 ld r3, HSTATE_SPLIT_MODE(r13)
528 lbz r4, HSTATE_TID(r13)
529 addi r4, r4, KVM_SPLIT_NAPPED
531 /* Check the do_nap flag again after setting napped[] */
533 lbz r0, KVM_SPLIT_DO_NAP(r3)
536 li r3, NAPPING_UNSPLIT
537 stb r3, HSTATE_NAPPING(r13)
538 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
540 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
547 /******************************************************************************
551 *****************************************************************************/
553 .global kvmppc_hv_entry
558 * R4 = vcpu pointer (or NULL)
563 * all other volatile GPRS = free
564 * Does not preserve non-volatile GPRs or CR fields
567 std r0, PPC_LR_STKOFF(r1)
570 /* Save R1 in the PACA */
571 std r1, HSTATE_HOST_R1(r13)
573 li r6, KVM_GUEST_MODE_HOST_HV
574 stb r6, HSTATE_IN_GUEST(r13)
576 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
577 /* Store initial timestamp */
580 addi r3, r4, VCPU_TB_RMENTRY
581 bl kvmhv_start_timing
585 ld r5, HSTATE_KVM_VCORE(r13)
586 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
589 * POWER7/POWER8 host -> guest partition switch code.
590 * We don't have to lock against concurrent tlbies,
591 * but we do have to coordinate across hardware threads.
593 /* Set bit in entry map iff exit map is zero. */
595 lbz r6, HSTATE_PTID(r13)
597 addi r8, r5, VCORE_ENTRY_EXIT
599 cmpwi r3, 0x100 /* any threads starting to exit? */
600 bge secondary_too_late /* if so we're too late to the party */
605 /* Primary thread switches to guest partition. */
612 li r0,LPID_RSVD /* switch to reserved LPID */
615 mtspr SPRN_SDR1,r6 /* switch to partition page table */
616 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
620 /* See if we need to flush the TLB. */
621 mr r3, r9 /* kvm pointer */
622 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */
623 li r5, 0 /* nested vcpu pointer */
624 bl kvmppc_check_need_tlb_flush
626 ld r5, HSTATE_KVM_VCORE(r13)
628 /* Add timebase offset onto timebase */
629 22: ld r8,VCORE_TB_OFFSET(r5)
632 std r8, VCORE_TB_OFFSET_APPL(r5)
633 mftb r6 /* current host timebase */
635 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
636 mftb r7 /* check if lower 24 bits overflowed */
641 addis r8,r8,0x100 /* if so, increment upper 40 bits */
644 /* Load guest PCR value to select appropriate compat mode */
645 37: ld r7, VCORE_PCR(r5)
652 /* DPDES and VTB are shared between threads */
653 ld r8, VCORE_DPDES(r5)
657 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
659 /* Mark the subcore state as inside guest */
660 bl kvmppc_subcore_enter_guest
662 ld r5, HSTATE_KVM_VCORE(r13)
663 ld r4, HSTATE_KVM_VCPU(r13)
665 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
667 /* Do we have a guest vcpu to run? */
669 beq kvmppc_primary_no_guest
671 /* Increment yield count if they have a VPA */
675 li r6, LPPACA_YIELDCOUNT
680 stb r6, VCPU_VPA_DIRTY(r4)
683 /* Save purr/spurr */
686 std r5,HSTATE_PURR(r13)
687 std r6,HSTATE_SPURR(r13)
693 /* Save host values of some registers */
698 std r5, STACK_SLOT_TID(r1)
699 std r6, STACK_SLOT_PSSCR(r1)
700 std r7, STACK_SLOT_PID(r1)
702 std r5, STACK_SLOT_HFSCR(r1)
703 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
709 std r5, STACK_SLOT_CIABR(r1)
710 std r6, STACK_SLOT_DAWR(r1)
711 std r7, STACK_SLOT_DAWRX(r1)
712 std r8, STACK_SLOT_IAMR(r1)
713 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
716 std r5, STACK_SLOT_AMR(r1)
718 std r6, STACK_SLOT_UAMOR(r1)
721 /* Set partition DABR */
722 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
723 lwz r5,VCPU_DABRX(r4)
728 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
730 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
732 * Branch around the call if both CPU_FTR_TM and
733 * CPU_FTR_P9_TM_HV_ASSIST are off.
737 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
739 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
743 li r5, 0 /* don't preserve non-vol regs */
744 bl kvmppc_restore_tm_hv
746 ld r4, HSTATE_KVM_VCPU(r13)
750 /* Load guest PMU registers; r4 = vcpu pointer here */
752 bl kvmhv_load_guest_pmu
754 /* Load up FP, VMX and VSX registers */
755 ld r4, HSTATE_KVM_VCPU(r13)
758 ld r14, VCPU_GPR(R14)(r4)
759 ld r15, VCPU_GPR(R15)(r4)
760 ld r16, VCPU_GPR(R16)(r4)
761 ld r17, VCPU_GPR(R17)(r4)
762 ld r18, VCPU_GPR(R18)(r4)
763 ld r19, VCPU_GPR(R19)(r4)
764 ld r20, VCPU_GPR(R20)(r4)
765 ld r21, VCPU_GPR(R21)(r4)
766 ld r22, VCPU_GPR(R22)(r4)
767 ld r23, VCPU_GPR(R23)(r4)
768 ld r24, VCPU_GPR(R24)(r4)
769 ld r25, VCPU_GPR(R25)(r4)
770 ld r26, VCPU_GPR(R26)(r4)
771 ld r27, VCPU_GPR(R27)(r4)
772 ld r28, VCPU_GPR(R28)(r4)
773 ld r29, VCPU_GPR(R29)(r4)
774 ld r30, VCPU_GPR(R30)(r4)
775 ld r31, VCPU_GPR(R31)(r4)
777 /* Switch DSCR to guest value */
782 /* Skip next section on POWER7 */
784 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
785 /* Load up POWER8-specific registers */
787 lwz r6, VCPU_PSPB(r4)
793 * Handle broken DAWR case by not writing it. This means we
794 * can still store the DAWR register for migration.
796 LOAD_REG_ADDR(r5, dawr_force_enable)
801 ld r6, VCPU_DAWRX(r4)
805 ld r7, VCPU_CIABR(r4)
810 ld r8, VCPU_EBBHR(r4)
813 ld r5, VCPU_EBBRR(r4)
814 ld r6, VCPU_BESCR(r4)
815 lwz r7, VCPU_GUEST_PID(r4)
822 /* POWER8-only registers */
823 ld r5, VCPU_TCSCR(r4)
825 ld r7, VCPU_CSIGR(r4)
833 /* POWER9-only registers */
835 ld r6, VCPU_PSSCR(r4)
836 lbz r8, HSTATE_FAKE_SUSPEND(r13)
837 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
838 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
839 ld r7, VCPU_HFSCR(r4)
843 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
846 ld r5, VCPU_SPRG0(r4)
847 ld r6, VCPU_SPRG1(r4)
848 ld r7, VCPU_SPRG2(r4)
849 ld r8, VCPU_SPRG3(r4)
855 /* Load up DAR and DSISR */
857 lwz r6, VCPU_DSISR(r4)
861 /* Restore AMR and UAMOR, set AMOR to all 1s */
869 /* Restore state of CTRL run bit; assume 1 on entry */
877 /* Secondary threads wait for primary to have done partition switch */
878 ld r5, HSTATE_KVM_VCORE(r13)
879 lbz r6, HSTATE_PTID(r13)
882 lbz r0, VCORE_IN_GUEST(r5)
886 20: lwz r3, VCORE_ENTRY_EXIT(r5)
889 lbz r0, VCORE_IN_GUEST(r5)
900 * Set the decrementer to the guest decrementer.
902 ld r8,VCPU_DEC_EXPIRES(r4)
903 /* r8 is a host timebase value here, convert to guest TB */
904 ld r5,HSTATE_KVM_VCORE(r13)
905 ld r6,VCORE_TB_OFFSET_APPL(r5)
911 /* Check if HDEC expires soon */
914 cmpdi r3, 512 /* 1 microsecond */
917 /* For hash guest, clear out and reload the SLB */
919 lbz r0, KVM_RADIX(r6)
927 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
928 lwz r5,VCPU_SLB_MAX(r4)
933 1: ld r8,VCPU_SLB_E(r6)
936 addi r6,r6,VCPU_SLB_SIZE
940 #ifdef CONFIG_KVM_XICS
941 /* We are entering the guest on that thread, push VCPU to XIVE */
942 ld r11, VCPU_XIVE_SAVED_STATE(r4)
944 lwz r8, VCPU_XIVE_CAM_WORD(r4)
947 li r7, TM_QW1_OS + TM_WORD2
949 andi. r0, r0, MSR_DR /* in real mode? */
951 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
958 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
965 stb r9, VCPU_XIVE_PUSHED(r4)
969 * We clear the irq_pending flag. There is a small chance of a
970 * race vs. the escalation interrupt happening on another
971 * processor setting it again, but the only consequence is to
972 * cause a spurrious wakeup on the next H_CEDE which is not an
976 stb r0, VCPU_IRQ_PENDING(r4)
979 * In single escalation mode, if the escalation interrupt is
982 lbz r0, VCPU_XIVE_ESC_ON(r4)
985 li r9, XIVE_ESB_SET_PQ_01
986 beq 4f /* in real mode? */
987 ld r10, VCPU_XIVE_ESC_VADDR(r4)
990 4: ld r10, VCPU_XIVE_ESC_RADDR(r4)
994 /* We have a possible subtle race here: The escalation interrupt might
995 * have fired and be on its way to the host queue while we mask it,
996 * and if we unmask it early enough (re-cede right away), there is
997 * a theorical possibility that it fires again, thus landing in the
998 * target queue more than once which is a big no-no.
1000 * Fortunately, solving this is rather easy. If the above load setting
1001 * PQ to 01 returns a previous value where P is set, then we know the
1002 * escalation interrupt is somewhere on its way to the host. In that
1003 * case we simply don't clear the xive_esc_on flag below. It will be
1004 * eventually cleared by the handler for the escalation interrupt.
1006 * Then, when doing a cede, we check that flag again before re-enabling
1007 * the escalation interrupt, and if set, we abort the cede.
1009 andi. r0, r0, XIVE_ESB_VAL_P
1012 /* Now P is 0, we can clear the flag */
1014 stb r0, VCPU_XIVE_ESC_ON(r4)
1017 #endif /* CONFIG_KVM_XICS */
1020 stw r0, STACK_SLOT_SHORT_PATH(r1)
1022 deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
1023 /* Check if we can deliver an external or decrementer interrupt now */
1024 ld r0, VCPU_PENDING_EXC(r4)
1026 /* On POWER9, also check for emulated doorbell interrupt */
1027 lbz r3, VCPU_DBELL_REQ(r4)
1029 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1033 bl kvmppc_guest_entry_inject_int
1034 ld r4, HSTATE_KVM_VCPU(r13)
1036 ld r6, VCPU_SRR0(r4)
1037 ld r7, VCPU_SRR1(r4)
1043 ld r11, VCPU_MSR(r4)
1044 /* r11 = vcpu->arch.msr & ~MSR_HV */
1045 rldicl r11, r11, 63 - MSR_HV_LG, 1
1046 rotldi r11, r11, 1 + MSR_HV_LG
1047 ori r11, r11, MSR_ME
1057 * R10: value for HSRR0
1058 * R11: value for HSRR1
1063 stb r0,VCPU_CEDED(r4) /* cancel cede */
1064 mtspr SPRN_HSRR0,r10
1065 mtspr SPRN_HSRR1,r11
1067 /* Activate guest mode, so faults get handled by KVM */
1068 li r9, KVM_GUEST_MODE_GUEST_HV
1069 stb r9, HSTATE_IN_GUEST(r13)
1071 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1072 /* Accumulate timing */
1073 addi r3, r4, VCPU_TB_GUEST
1074 bl kvmhv_accumulate_time
1080 ld r5, VCPU_CFAR(r4)
1082 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1085 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1092 ld r1, VCPU_GPR(R1)(r4)
1093 ld r2, VCPU_GPR(R2)(r4)
1094 ld r3, VCPU_GPR(R3)(r4)
1095 ld r5, VCPU_GPR(R5)(r4)
1096 ld r6, VCPU_GPR(R6)(r4)
1097 ld r7, VCPU_GPR(R7)(r4)
1098 ld r8, VCPU_GPR(R8)(r4)
1099 ld r9, VCPU_GPR(R9)(r4)
1100 ld r10, VCPU_GPR(R10)(r4)
1101 ld r11, VCPU_GPR(R11)(r4)
1102 ld r12, VCPU_GPR(R12)(r4)
1103 ld r13, VCPU_GPR(R13)(r4)
1107 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1109 /* Move canary into DSISR to check for later */
1112 mtspr SPRN_HDSISR, r0
1113 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1115 ld r0, VCPU_GPR(R0)(r4)
1116 ld r4, VCPU_GPR(R4)(r4)
1121 * Enter the guest on a P9 or later system where we have exactly
1122 * one vcpu per vcore and we don't need to go to real mode
1123 * (which implies that host and guest are both using radix MMU mode).
1125 * Most SPRs and all the VSRs have been loaded already.
1127 _GLOBAL(__kvmhv_vcpu_entry_p9)
1128 EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
1130 std r0, PPC_LR_STKOFF(r1)
1134 stw r0, STACK_SLOT_SHORT_PATH(r1)
1136 std r3, HSTATE_KVM_VCPU(r13)
1140 std r1, HSTATE_HOST_R1(r13)
1144 std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1150 ld reg, __VCPU_GPR(reg)(r3)
1155 std r10, HSTATE_HOST_MSR(r13)
1158 b fast_guest_entry_c
1159 guest_exit_short_path:
1161 li r0, KVM_GUEST_MODE_NONE
1162 stb r0, HSTATE_IN_GUEST(r13)
1166 std reg, __VCPU_GPR(reg)(r9)
1172 ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1179 mr r3, r12 /* trap number */
1182 ld r0, PPC_LR_STKOFF(r1)
1185 /* If we are in real mode, do a rfid to get back to the caller */
1187 andi. r5, r4, MSR_IR
1189 rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
1191 ld r10, HSTATE_HOST_MSR(r13)
1192 rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
1193 mtspr SPRN_SRR1, r10
1199 stw r12, STACK_SLOT_TRAP(r1)
1202 stw r12, VCPU_TRAP(r4)
1203 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1204 addi r3, r4, VCPU_TB_RMEXIT
1205 bl kvmhv_accumulate_time
1207 11: b kvmhv_switch_to_host
1214 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1215 12: stw r12, VCPU_TRAP(r4)
1217 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1218 addi r3, r4, VCPU_TB_RMEXIT
1219 bl kvmhv_accumulate_time
1223 /******************************************************************************
1227 *****************************************************************************/
1230 * We come here from the first-level interrupt handlers.
1232 .globl kvmppc_interrupt_hv
1233 kvmppc_interrupt_hv:
1235 * Register contents:
1236 * R12 = (guest CR << 32) | interrupt vector
1238 * guest R12 saved in shadow VCPU SCRATCH0
1239 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1240 * guest R13 saved in SPRN_SCRATCH0
1242 std r9, HSTATE_SCRATCH2(r13)
1243 lbz r9, HSTATE_IN_GUEST(r13)
1244 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1245 beq kvmppc_bad_host_intr
1246 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1247 cmpwi r9, KVM_GUEST_MODE_GUEST
1248 ld r9, HSTATE_SCRATCH2(r13)
1249 beq kvmppc_interrupt_pr
1251 /* We're now back in the host but in guest MMU context */
1252 li r9, KVM_GUEST_MODE_HOST_HV
1253 stb r9, HSTATE_IN_GUEST(r13)
1255 ld r9, HSTATE_KVM_VCPU(r13)
1257 /* Save registers */
1259 std r0, VCPU_GPR(R0)(r9)
1260 std r1, VCPU_GPR(R1)(r9)
1261 std r2, VCPU_GPR(R2)(r9)
1262 std r3, VCPU_GPR(R3)(r9)
1263 std r4, VCPU_GPR(R4)(r9)
1264 std r5, VCPU_GPR(R5)(r9)
1265 std r6, VCPU_GPR(R6)(r9)
1266 std r7, VCPU_GPR(R7)(r9)
1267 std r8, VCPU_GPR(R8)(r9)
1268 ld r0, HSTATE_SCRATCH2(r13)
1269 std r0, VCPU_GPR(R9)(r9)
1270 std r10, VCPU_GPR(R10)(r9)
1271 std r11, VCPU_GPR(R11)(r9)
1272 ld r3, HSTATE_SCRATCH0(r13)
1273 std r3, VCPU_GPR(R12)(r9)
1274 /* CR is in the high half of r12 */
1278 ld r3, HSTATE_CFAR(r13)
1279 std r3, VCPU_CFAR(r9)
1280 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1282 ld r4, HSTATE_PPR(r13)
1283 std r4, VCPU_PPR(r9)
1284 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1286 /* Restore R1/R2 so we can handle faults */
1287 ld r1, HSTATE_HOST_R1(r13)
1290 mfspr r10, SPRN_SRR0
1291 mfspr r11, SPRN_SRR1
1292 std r10, VCPU_SRR0(r9)
1293 std r11, VCPU_SRR1(r9)
1294 /* trap is in the low half of r12, clear CR from the high half */
1296 andi. r0, r12, 2 /* need to read HSRR0/1? */
1298 mfspr r10, SPRN_HSRR0
1299 mfspr r11, SPRN_HSRR1
1301 1: std r10, VCPU_PC(r9)
1302 std r11, VCPU_MSR(r9)
1306 std r3, VCPU_GPR(R13)(r9)
1309 stw r12,VCPU_TRAP(r9)
1312 * Now that we have saved away SRR0/1 and HSRR0/1,
1313 * interrupts are recoverable in principle, so set MSR_RI.
1314 * This becomes important for relocation-on interrupts from
1315 * the guest, which we can get in radix mode on POWER9.
1320 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1321 addi r3, r9, VCPU_TB_RMINTR
1323 bl kvmhv_accumulate_time
1324 ld r5, VCPU_GPR(R5)(r9)
1325 ld r6, VCPU_GPR(R6)(r9)
1326 ld r7, VCPU_GPR(R7)(r9)
1327 ld r8, VCPU_GPR(R8)(r9)
1330 /* Save HEIR (HV emulation assist reg) in emul_inst
1331 if this is an HEI (HV emulation interrupt, e40) */
1332 li r3,KVM_INST_FETCH_FAILED
1333 stw r3,VCPU_LAST_INST(r9)
1334 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1337 11: stw r3,VCPU_HEIR(r9)
1339 /* these are volatile across C function calls */
1340 #ifdef CONFIG_RELOCATABLE
1341 ld r3, HSTATE_SCRATCH1(r13)
1347 std r3, VCPU_CTR(r9)
1348 std r4, VCPU_XER(r9)
1350 /* Save more register state */
1353 std r3, VCPU_DAR(r9)
1354 stw r4, VCPU_DSISR(r9)
1356 /* If this is a page table miss then see if it's theirs or ours */
1357 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1359 std r3, VCPU_FAULT_DAR(r9)
1360 stw r4, VCPU_FAULT_DSISR(r9)
1361 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1364 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1365 /* For softpatch interrupt, go off and do TM instruction emulation */
1366 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1370 /* See if this is a leftover HDEC interrupt */
1371 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1377 bge fast_guest_return
1379 /* See if this is an hcall we can handle in real mode */
1380 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1381 beq hcall_try_real_mode
1383 /* Hypervisor doorbell - exit only if host IPI flag set */
1384 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1389 /* always exit if we're running a nested guest */
1390 ld r0, VCPU_NESTED(r9)
1393 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1394 lbz r0, HSTATE_HOST_IPI(r13)
1396 beq maybe_reenter_guest
1399 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1400 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1402 mfspr r3, SPRN_HFSCR
1403 std r3, VCPU_HFSCR(r9)
1406 /* External interrupt ? */
1407 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1408 beq kvmppc_guest_external
1409 /* See if it is a machine check */
1410 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1411 beq machine_check_realmode
1412 /* Or a hypervisor maintenance interrupt */
1413 cmpwi r12, BOOK3S_INTERRUPT_HMI
1416 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1418 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1419 addi r3, r9, VCPU_TB_RMEXIT
1421 bl kvmhv_accumulate_time
1423 #ifdef CONFIG_KVM_XICS
1424 /* We are exiting, pull the VP from the XIVE */
1425 lbz r0, VCPU_XIVE_PUSHED(r9)
1428 li r7, TM_SPC_PULL_OS_CTX
1431 andi. r0, r0, MSR_DR /* in real mode? */
1433 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1436 /* First load to pull the context, we ignore the value */
1439 /* Second load to recover the context state (Words 0 and 1) */
1442 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1445 /* First load to pull the context, we ignore the value */
1448 /* Second load to recover the context state (Words 0 and 1) */
1450 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
1451 /* Fixup some of the state for the next load */
1454 stb r10, VCPU_XIVE_PUSHED(r9)
1455 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1456 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1459 #endif /* CONFIG_KVM_XICS */
1461 /* If we came in through the P9 short path, go back out to C now */
1462 lwz r0, STACK_SLOT_SHORT_PATH(r1)
1464 bne guest_exit_short_path
1466 /* For hash guest, read the guest SLB and save it away */
1468 lbz r0, KVM_RADIX(r5)
1471 bne 3f /* for radix, save 0 entries */
1472 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1477 andis. r0,r8,SLB_ESID_V@h
1479 add r8,r8,r6 /* put index in */
1481 std r8,VCPU_SLB_E(r7)
1482 std r3,VCPU_SLB_V(r7)
1483 addi r7,r7,VCPU_SLB_SIZE
1487 /* Finally clear out the SLB */
1492 3: stw r5,VCPU_SLB_MAX(r9)
1494 /* load host SLB entries */
1495 BEGIN_MMU_FTR_SECTION
1497 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1498 ld r8,PACA_SLBSHADOWPTR(r13)
1500 .rept SLB_NUM_BOLTED
1501 li r3, SLBSHADOW_SAVEAREA
1505 andis. r7,r5,SLB_ESID_V@h
1513 stw r12, STACK_SLOT_TRAP(r1)
1516 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1517 ld r3, HSTATE_KVM_VCORE(r13)
1520 /* On P9, if the guest has large decr enabled, don't sign extend */
1522 ld r4, VCORE_LPCR(r3)
1523 andis. r4, r4, LPCR_LD@h
1525 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1528 /* r5 is a guest timebase value here, convert to host TB */
1529 ld r4,VCORE_TB_OFFSET_APPL(r3)
1531 std r5,VCPU_DEC_EXPIRES(r9)
1533 /* Increment exit count, poke other threads to exit */
1535 bl kvmhv_commence_exit
1537 ld r9, HSTATE_KVM_VCPU(r13)
1539 /* Stop others sending VCPU interrupts to this physical CPU */
1541 stw r0, VCPU_CPU(r9)
1542 stw r0, VCPU_THREAD_CPU(r9)
1544 /* Save guest CTRL register, set runlatch to 1 */
1546 stw r6,VCPU_CTRL(r9)
1553 * Save the guest PURR/SPURR
1558 ld r8,VCPU_SPURR(r9)
1559 std r5,VCPU_PURR(r9)
1560 std r6,VCPU_SPURR(r9)
1565 * Restore host PURR/SPURR and add guest times
1566 * so that the time in the guest gets accounted.
1568 ld r3,HSTATE_PURR(r13)
1569 ld r4,HSTATE_SPURR(r13)
1577 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1578 /* Save POWER8-specific registers */
1582 std r5, VCPU_IAMR(r9)
1583 stw r6, VCPU_PSPB(r9)
1584 std r7, VCPU_FSCR(r9)
1588 std r7, VCPU_TAR(r9)
1589 mfspr r8, SPRN_EBBHR
1590 std r8, VCPU_EBBHR(r9)
1591 mfspr r5, SPRN_EBBRR
1592 mfspr r6, SPRN_BESCR
1595 std r5, VCPU_EBBRR(r9)
1596 std r6, VCPU_BESCR(r9)
1597 stw r7, VCPU_GUEST_PID(r9)
1598 std r8, VCPU_WORT(r9)
1600 mfspr r5, SPRN_TCSCR
1602 mfspr r7, SPRN_CSIGR
1604 std r5, VCPU_TCSCR(r9)
1605 std r6, VCPU_ACOP(r9)
1606 std r7, VCPU_CSIGR(r9)
1607 std r8, VCPU_TACR(r9)
1610 mfspr r6, SPRN_PSSCR
1611 std r5, VCPU_TID(r9)
1612 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1614 std r6, VCPU_PSSCR(r9)
1615 /* Restore host HFSCR value */
1616 ld r7, STACK_SLOT_HFSCR(r1)
1617 mtspr SPRN_HFSCR, r7
1618 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1620 * Restore various registers to 0, where non-zero values
1621 * set by the guest could disrupt the host.
1627 mtspr SPRN_TCSCR, r0
1628 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1631 mtspr SPRN_MMCRS, r0
1632 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1634 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
1635 ld r8, STACK_SLOT_IAMR(r1)
1638 8: /* Power7 jumps back in here */
1642 std r6,VCPU_UAMOR(r9)
1643 ld r5,STACK_SLOT_AMR(r1)
1644 ld r6,STACK_SLOT_UAMOR(r1)
1646 mtspr SPRN_UAMOR, r6
1648 /* Switch DSCR back to host value */
1650 ld r7, HSTATE_DSCR(r13)
1651 std r8, VCPU_DSCR(r9)
1654 /* Save non-volatile GPRs */
1655 std r14, VCPU_GPR(R14)(r9)
1656 std r15, VCPU_GPR(R15)(r9)
1657 std r16, VCPU_GPR(R16)(r9)
1658 std r17, VCPU_GPR(R17)(r9)
1659 std r18, VCPU_GPR(R18)(r9)
1660 std r19, VCPU_GPR(R19)(r9)
1661 std r20, VCPU_GPR(R20)(r9)
1662 std r21, VCPU_GPR(R21)(r9)
1663 std r22, VCPU_GPR(R22)(r9)
1664 std r23, VCPU_GPR(R23)(r9)
1665 std r24, VCPU_GPR(R24)(r9)
1666 std r25, VCPU_GPR(R25)(r9)
1667 std r26, VCPU_GPR(R26)(r9)
1668 std r27, VCPU_GPR(R27)(r9)
1669 std r28, VCPU_GPR(R28)(r9)
1670 std r29, VCPU_GPR(R29)(r9)
1671 std r30, VCPU_GPR(R30)(r9)
1672 std r31, VCPU_GPR(R31)(r9)
1675 mfspr r3, SPRN_SPRG0
1676 mfspr r4, SPRN_SPRG1
1677 mfspr r5, SPRN_SPRG2
1678 mfspr r6, SPRN_SPRG3
1679 std r3, VCPU_SPRG0(r9)
1680 std r4, VCPU_SPRG1(r9)
1681 std r5, VCPU_SPRG2(r9)
1682 std r6, VCPU_SPRG3(r9)
1688 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1690 * Branch around the call if both CPU_FTR_TM and
1691 * CPU_FTR_P9_TM_HV_ASSIST are off.
1695 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
1697 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
1701 li r5, 0 /* don't preserve non-vol regs */
1702 bl kvmppc_save_tm_hv
1704 ld r9, HSTATE_KVM_VCPU(r13)
1708 /* Increment yield count if they have a VPA */
1709 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1712 li r4, LPPACA_YIELDCOUNT
1717 stb r3, VCPU_VPA_DIRTY(r9)
1719 /* Save PMU registers if requested */
1720 /* r8 and cr0.eq are live here */
1723 beq 21f /* if no VPA, save PMU stuff anyway */
1724 lbz r4, LPPACA_PMCINUSE(r8)
1725 21: bl kvmhv_save_guest_pmu
1726 ld r9, HSTATE_KVM_VCPU(r13)
1728 /* Restore host values of some registers */
1730 ld r5, STACK_SLOT_CIABR(r1)
1731 ld r6, STACK_SLOT_DAWR(r1)
1732 ld r7, STACK_SLOT_DAWRX(r1)
1733 mtspr SPRN_CIABR, r5
1735 * If the DAWR doesn't work, it's ok to write these here as
1736 * this value should always be zero
1739 mtspr SPRN_DAWRX, r7
1740 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1742 ld r5, STACK_SLOT_TID(r1)
1743 ld r6, STACK_SLOT_PSSCR(r1)
1744 ld r7, STACK_SLOT_PID(r1)
1746 mtspr SPRN_PSSCR, r6
1748 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1750 #ifdef CONFIG_PPC_RADIX_MMU
1752 * Are we running hash or radix ?
1755 lbz r0, KVM_RADIX(r5)
1760 * Radix: do eieio; tlbsync; ptesync sequence in case we
1761 * interrupted the guest between a tlbie and a ptesync.
1767 /* Radix: Handle the case where the guest used an illegal PID */
1768 LOAD_REG_ADDR(r4, mmu_base_pid)
1769 lwz r3, VCPU_GUEST_PID(r9)
1775 * Illegal PID, the HW might have prefetched and cached in the TLB
1776 * some translations for the LPID 0 / guest PID combination which
1777 * Linux doesn't know about, so we need to flush that PID out of
1778 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1779 * the right context.
1785 /* Then do a congruence class local flush */
1787 lwz r0,KVM_TLB_SETS(r6)
1789 li r7,0x400 /* IS field = 0b01 */
1791 sldi r0,r3,32 /* RS has PID */
1792 1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1798 #endif /* CONFIG_PPC_RADIX_MMU */
1801 * POWER7/POWER8 guest -> host partition switch code.
1802 * We don't have to lock against tlbies but we do
1803 * have to coordinate the hardware threads.
1804 * Here STACK_SLOT_TRAP(r1) contains the trap number.
1806 kvmhv_switch_to_host:
1807 /* Secondary threads wait for primary to do partition switch */
1808 ld r5,HSTATE_KVM_VCORE(r13)
1809 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1810 lbz r3,HSTATE_PTID(r13)
1814 13: lbz r3,VCORE_IN_GUEST(r5)
1820 /* Primary thread waits for all the secondaries to exit guest */
1821 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1822 rlwinm r0,r3,32-8,0xff
1828 /* Did we actually switch to the guest at all? */
1829 lbz r6, VCORE_IN_GUEST(r5)
1833 /* Primary thread switches back to host partition */
1834 lwz r7,KVM_HOST_LPID(r4)
1836 ld r6,KVM_HOST_SDR1(r4)
1837 li r8,LPID_RSVD /* switch to reserved LPID */
1840 mtspr SPRN_SDR1,r6 /* switch to host page table */
1841 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1846 /* DPDES and VTB are shared between threads */
1847 mfspr r7, SPRN_DPDES
1849 std r7, VCORE_DPDES(r5)
1850 std r8, VCORE_VTB(r5)
1851 /* clear DPDES so we don't get guest doorbells in the host */
1853 mtspr SPRN_DPDES, r8
1854 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1856 /* Subtract timebase offset from timebase */
1857 ld r8, VCORE_TB_OFFSET_APPL(r5)
1861 std r0, VCORE_TB_OFFSET_APPL(r5)
1862 mftb r6 /* current guest timebase */
1864 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1865 mftb r7 /* check if lower 24 bits overflowed */
1870 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1875 * If this is an HMI, we called kvmppc_realmode_hmi_handler
1876 * above, which may or may not have already called
1877 * kvmppc_subcore_exit_guest. Fortunately, all that
1878 * kvmppc_subcore_exit_guest does is clear a flag, so calling
1879 * it again here is benign even if kvmppc_realmode_hmi_handler
1880 * has already called it.
1882 bl kvmppc_subcore_exit_guest
1884 30: ld r5,HSTATE_KVM_VCORE(r13)
1885 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1888 ld r0, VCORE_PCR(r5)
1894 /* Signal secondary CPUs to continue */
1895 stb r0,VCORE_IN_GUEST(r5)
1896 19: lis r8,0x7fff /* MAX_INT@h */
1901 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
1902 ld r3, HSTATE_SPLIT_MODE(r13)
1905 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
1908 bl kvmhv_p9_restore_lpcr
1912 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1913 ld r8,KVM_HOST_LPCR(r4)
1917 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1918 /* Finish timing, if we have a vcpu */
1919 ld r4, HSTATE_KVM_VCPU(r13)
1923 bl kvmhv_accumulate_time
1926 /* Unset guest mode */
1927 li r0, KVM_GUEST_MODE_NONE
1928 stb r0, HSTATE_IN_GUEST(r13)
1930 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
1931 ld r0, SFS+PPC_LR_STKOFF(r1)
1936 kvmppc_guest_external:
1937 /* External interrupt, first check for host_ipi. If this is
1938 * set, we know the host wants us out so let's do it now
1943 * Restore the active volatile registers after returning from
1946 ld r9, HSTATE_KVM_VCPU(r13)
1947 li r12, BOOK3S_INTERRUPT_EXTERNAL
1950 * kvmppc_read_intr return codes:
1952 * Exit to host (r3 > 0)
1953 * 1 An interrupt is pending that needs to be handled by the host
1954 * Exit guest and return to host by branching to guest_exit_cont
1956 * 2 Passthrough that needs completion in the host
1957 * Exit guest and return to host by branching to guest_exit_cont
1958 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1959 * to indicate to the host to complete handling the interrupt
1961 * Before returning to guest, we check if any CPU is heading out
1962 * to the host and if so, we head out also. If no CPUs are heading
1963 * check return values <= 0.
1965 * Return to guest (r3 <= 0)
1966 * 0 No external interrupt is pending
1967 * -1 A guest wakeup IPI (which has now been cleared)
1968 * In either case, we return to guest to deliver any pending
1971 * -2 A PCI passthrough external interrupt was handled
1972 * (interrupt was delivered directly to guest)
1973 * Return to guest to deliver any pending guest interrupts.
1979 /* Return code = 2 */
1980 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1981 stw r12, VCPU_TRAP(r9)
1984 1: /* Return code <= 1 */
1988 /* Return code <= 0 */
1989 maybe_reenter_guest:
1990 ld r5, HSTATE_KVM_VCORE(r13)
1991 lwz r0, VCORE_ENTRY_EXIT(r5)
1994 blt deliver_guest_interrupt
1997 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1999 * Softpatch interrupt for transactional memory emulation cases
2000 * on POWER9 DD2.2. This is early in the guest exit path - we
2001 * haven't saved registers or done a treclaim yet.
2004 /* Save instruction image in HEIR */
2006 stw r3, VCPU_HEIR(r9)
2009 * The cases we want to handle here are those where the guest
2010 * is in real suspend mode and is trying to transition to
2011 * transactional mode.
2013 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2014 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2016 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2017 cmpwi r3, 1 /* or if not in suspend state */
2020 /* Call C code to do the emulation */
2022 bl kvmhv_p9_tm_emulation_early
2024 ld r9, HSTATE_KVM_VCPU(r13)
2025 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2027 beq guest_exit_cont /* continue exiting if not handled */
2029 ld r11, VCPU_MSR(r9)
2030 b fast_interrupt_c_return /* go back to guest if handled */
2031 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2034 * Check whether an HDSI is an HPTE not found fault or something else.
2035 * If it is an HPTE not found fault that is due to the guest accessing
2036 * a page that they have mapped but which we have paged out, then
2037 * we continue on with the guest exit path. In all other cases,
2038 * reflect the HDSI to the guest as a DSI.
2042 lbz r0, KVM_RADIX(r3)
2044 mfspr r6, SPRN_HDSISR
2046 /* Look for DSISR canary. If we find it, retry instruction */
2049 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2051 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
2052 /* HPTE not found fault or protection fault? */
2053 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2054 beq 1f /* if not, send it to the guest */
2055 andi. r0, r11, MSR_DR /* data relocation enabled? */
2058 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2060 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2062 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2063 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2064 bne 7f /* if no SLB entry found */
2065 4: std r4, VCPU_FAULT_DAR(r9)
2066 stw r6, VCPU_FAULT_DSISR(r9)
2068 /* Search the hash table. */
2069 mr r3, r9 /* vcpu pointer */
2070 li r7, 1 /* data fault */
2071 bl kvmppc_hpte_hv_fault
2072 ld r9, HSTATE_KVM_VCPU(r13)
2074 ld r11, VCPU_MSR(r9)
2075 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2076 cmpdi r3, 0 /* retry the instruction */
2078 cmpdi r3, -1 /* handle in kernel mode */
2080 cmpdi r3, -2 /* MMIO emulation; need instr word */
2083 /* Synthesize a DSI (or DSegI) for the guest */
2084 ld r4, VCPU_FAULT_DAR(r9)
2086 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
2087 mtspr SPRN_DSISR, r6
2088 7: mtspr SPRN_DAR, r4
2089 mtspr SPRN_SRR0, r10
2090 mtspr SPRN_SRR1, r11
2092 bl kvmppc_msr_interrupt
2093 fast_interrupt_c_return:
2094 6: ld r7, VCPU_CTR(r9)
2101 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2102 ld r5, KVM_VRMA_SLB_V(r5)
2105 /* If this is for emulated MMIO, load the instruction word */
2106 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2108 /* Set guest mode to 'jump over instruction' so if lwz faults
2109 * we'll just continue at the next IP. */
2110 li r0, KVM_GUEST_MODE_SKIP
2111 stb r0, HSTATE_IN_GUEST(r13)
2113 /* Do the access with MSR:DR enabled */
2115 ori r4, r3, MSR_DR /* Enable paging for data */
2120 /* Store the result */
2121 stw r8, VCPU_LAST_INST(r9)
2123 /* Unset guest mode. */
2124 li r0, KVM_GUEST_MODE_HOST_HV
2125 stb r0, HSTATE_IN_GUEST(r13)
2129 std r4, VCPU_FAULT_DAR(r9)
2130 stw r6, VCPU_FAULT_DSISR(r9)
2133 std r5, VCPU_FAULT_GPA(r9)
2137 * Similarly for an HISI, reflect it to the guest as an ISI unless
2138 * it is an HPTE not found fault for a page that we have paged out.
2142 lbz r0, KVM_RADIX(r3)
2144 bne .Lradix_hisi /* for radix, just save ASDR */
2145 andis. r0, r11, SRR1_ISI_NOPT@h
2147 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2150 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2152 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2154 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2155 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2156 bne 7f /* if no SLB entry found */
2158 /* Search the hash table. */
2159 mr r3, r9 /* vcpu pointer */
2162 li r7, 0 /* instruction fault */
2163 bl kvmppc_hpte_hv_fault
2164 ld r9, HSTATE_KVM_VCPU(r13)
2166 ld r11, VCPU_MSR(r9)
2167 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2168 cmpdi r3, 0 /* retry the instruction */
2169 beq fast_interrupt_c_return
2170 cmpdi r3, -1 /* handle in kernel mode */
2173 /* Synthesize an ISI (or ISegI) for the guest */
2175 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
2176 7: mtspr SPRN_SRR0, r10
2177 mtspr SPRN_SRR1, r11
2179 bl kvmppc_msr_interrupt
2180 b fast_interrupt_c_return
2182 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2183 ld r5, KVM_VRMA_SLB_V(r6)
2187 * Try to handle an hcall in real mode.
2188 * Returns to the guest if we handle it, or continues on up to
2189 * the kernel if we can't (i.e. if we don't have a handler for
2190 * it, or if the handler returns H_TOO_HARD).
2192 * r5 - r8 contain hcall args,
2193 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2195 hcall_try_real_mode:
2196 ld r3,VCPU_GPR(R3)(r9)
2198 /* sc 1 from userspace - reflect to guest syscall */
2199 bne sc_1_fast_return
2200 /* sc 1 from nested guest - give it to L1 to handle */
2201 ld r0, VCPU_NESTED(r9)
2205 cmpldi r3,hcall_real_table_end - hcall_real_table
2207 /* See if this hcall is enabled for in-kernel handling */
2209 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2210 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2212 ld r0, KVM_ENABLED_HCALLS(r4)
2213 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2217 /* Get pointer to handler, if any, and call it */
2218 LOAD_REG_ADDR(r4, hcall_real_table)
2224 mr r3,r9 /* get vcpu pointer */
2225 ld r4,VCPU_GPR(R4)(r9)
2228 beq hcall_real_fallback
2229 ld r4,HSTATE_KVM_VCPU(r13)
2230 std r3,VCPU_GPR(R3)(r4)
2238 li r10, BOOK3S_INTERRUPT_SYSCALL
2239 bl kvmppc_msr_interrupt
2243 /* We've attempted a real mode hcall, but it's punted it back
2244 * to userspace. We need to restore some clobbered volatiles
2245 * before resuming the pass-it-to-qemu path */
2246 hcall_real_fallback:
2247 li r12,BOOK3S_INTERRUPT_SYSCALL
2248 ld r9, HSTATE_KVM_VCPU(r13)
2252 .globl hcall_real_table
2254 .long 0 /* 0 - unused */
2255 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2256 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2257 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2258 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2259 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2260 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2261 #ifdef CONFIG_SPAPR_TCE_IOMMU
2262 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2263 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2268 .long 0 /* 0x24 - H_SET_SPRG0 */
2269 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2270 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
2284 #ifdef CONFIG_KVM_XICS
2285 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2286 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2287 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2288 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2289 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2291 .long 0 /* 0x64 - H_EOI */
2292 .long 0 /* 0x68 - H_CPPR */
2293 .long 0 /* 0x6c - H_IPI */
2294 .long 0 /* 0x70 - H_IPOLL */
2295 .long 0 /* 0x74 - H_XIRR */
2323 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2324 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2340 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2344 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2345 #ifdef CONFIG_SPAPR_TCE_IOMMU
2346 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2347 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2463 #ifdef CONFIG_KVM_XICS
2464 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2466 .long 0 /* 0x2fc - H_XIRR_X*/
2468 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2469 .globl hcall_real_table_end
2470 hcall_real_table_end:
2472 _GLOBAL(kvmppc_h_set_xdabr)
2473 EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
2474 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2476 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2479 6: li r3, H_PARAMETER
2482 _GLOBAL(kvmppc_h_set_dabr)
2483 EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
2484 li r5, DABRX_USER | DABRX_KERNEL
2488 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2489 std r4,VCPU_DABR(r3)
2490 stw r5, VCPU_DABRX(r3)
2491 mtspr SPRN_DABRX, r5
2492 /* Work around P7 bug where DABR can get corrupted on mtspr */
2493 1: mtspr SPRN_DABR,r4
2502 LOAD_REG_ADDR(r11, dawr_force_enable)
2509 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2510 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2511 rlwimi r5, r4, 2, DAWRX_WT
2513 std r4, VCPU_DAWR(r3)
2514 std r5, VCPU_DAWRX(r3)
2516 * If came in through the real mode hcall handler then it is necessary
2517 * to write the registers since the return path won't. Otherwise it is
2518 * sufficient to store then in the vcpu struct as they will be loaded
2519 * next time the vcpu is run.
2522 andi. r6, r6, MSR_DR /* in real mode? */
2525 mtspr SPRN_DAWRX, r5
2529 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2531 std r11,VCPU_MSR(r3)
2533 stb r0,VCPU_CEDED(r3)
2534 sync /* order setting ceded vs. testing prodded */
2535 lbz r5,VCPU_PRODDED(r3)
2537 bne kvm_cede_prodded
2538 li r12,0 /* set trap to 0 to say hcall is handled */
2539 stw r12,VCPU_TRAP(r3)
2541 std r0,VCPU_GPR(R3)(r3)
2544 * Set our bit in the bitmask of napping threads unless all the
2545 * other threads are already napping, in which case we send this
2548 ld r5,HSTATE_KVM_VCORE(r13)
2549 lbz r6,HSTATE_PTID(r13)
2550 lwz r8,VCORE_ENTRY_EXIT(r5)
2554 addi r6,r5,VCORE_NAPPING_THREADS
2561 /* order napping_threads update vs testing entry_exit_map */
2564 stb r0,HSTATE_NAPPING(r13)
2565 lwz r7,VCORE_ENTRY_EXIT(r5)
2567 bge 33f /* another thread already exiting */
2570 * Although not specifically required by the architecture, POWER7
2571 * preserves the following registers in nap mode, even if an SMT mode
2572 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2573 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2575 /* Save non-volatile GPRs */
2576 std r14, VCPU_GPR(R14)(r3)
2577 std r15, VCPU_GPR(R15)(r3)
2578 std r16, VCPU_GPR(R16)(r3)
2579 std r17, VCPU_GPR(R17)(r3)
2580 std r18, VCPU_GPR(R18)(r3)
2581 std r19, VCPU_GPR(R19)(r3)
2582 std r20, VCPU_GPR(R20)(r3)
2583 std r21, VCPU_GPR(R21)(r3)
2584 std r22, VCPU_GPR(R22)(r3)
2585 std r23, VCPU_GPR(R23)(r3)
2586 std r24, VCPU_GPR(R24)(r3)
2587 std r25, VCPU_GPR(R25)(r3)
2588 std r26, VCPU_GPR(R26)(r3)
2589 std r27, VCPU_GPR(R27)(r3)
2590 std r28, VCPU_GPR(R28)(r3)
2591 std r29, VCPU_GPR(R29)(r3)
2592 std r30, VCPU_GPR(R30)(r3)
2593 std r31, VCPU_GPR(R31)(r3)
2598 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2600 * Branch around the call if both CPU_FTR_TM and
2601 * CPU_FTR_P9_TM_HV_ASSIST are off.
2605 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2607 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2609 ld r3, HSTATE_KVM_VCPU(r13)
2611 li r5, 0 /* don't preserve non-vol regs */
2612 bl kvmppc_save_tm_hv
2618 * Set DEC to the smaller of DEC and HDEC, so that we wake
2619 * no later than the end of our timeslice (HDEC interrupts
2620 * don't wake us from nap).
2626 /* On P9 check whether the guest has large decrementer mode enabled */
2627 ld r6, HSTATE_KVM_VCORE(r13)
2628 ld r6, VCORE_LPCR(r6)
2629 andis. r6, r6, LPCR_LD@h
2631 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2638 /* save expiry time of guest decrementer */
2640 ld r4, HSTATE_KVM_VCPU(r13)
2641 ld r5, HSTATE_KVM_VCORE(r13)
2642 ld r6, VCORE_TB_OFFSET_APPL(r5)
2643 subf r3, r6, r3 /* convert to host TB value */
2644 std r3, VCPU_DEC_EXPIRES(r4)
2646 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2647 ld r4, HSTATE_KVM_VCPU(r13)
2648 addi r3, r4, VCPU_TB_CEDE
2649 bl kvmhv_accumulate_time
2652 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2654 /* Go back to host stack */
2655 ld r1, HSTATE_HOST_R1(r13)
2658 * Take a nap until a decrementer or external or doobell interrupt
2659 * occurs, with PECE1 and PECE0 set in LPCR.
2660 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2661 * Also clear the runlatch bit before napping.
2664 mfspr r0, SPRN_CTRLF
2666 mtspr SPRN_CTRLT, r0
2669 stb r0,HSTATE_HWTHREAD_REQ(r13)
2671 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2673 ori r5, r5, LPCR_PECEDH
2674 rlwimi r5, r3, 0, LPCR_PECEDP
2675 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2677 kvm_nap_sequence: /* desired LPCR value in r5 */
2680 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2681 * enable state loss = 1 (allow SMT mode switch)
2682 * requested level = 0 (just stop dispatching)
2684 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2685 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2686 li r4, LPCR_PECE_HVEE@higher
2690 li r3, PNV_THREAD_NAP
2691 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
2696 bl isa300_idle_stop_mayloss
2698 bl isa206_idle_insn_mayloss
2699 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
2701 mfspr r0, SPRN_CTRLF
2703 mtspr SPRN_CTRLT, r0
2708 stb r0, PACA_FTRACE_ENABLED(r13)
2710 li r0, KVM_HWTHREAD_IN_KVM
2711 stb r0, HSTATE_HWTHREAD_STATE(r13)
2713 lbz r0, HSTATE_NAPPING(r13)
2714 cmpwi r0, NAPPING_CEDE
2716 cmpwi r0, NAPPING_NOVCPU
2717 beq kvm_novcpu_wakeup
2718 cmpwi r0, NAPPING_UNSPLIT
2719 beq kvm_unsplit_wakeup
2720 twi 31,0,0 /* Nap state must not be zero */
2728 /* Woken by external or decrementer interrupt */
2730 /* get vcpu pointer */
2731 ld r4, HSTATE_KVM_VCPU(r13)
2733 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2734 addi r3, r4, VCPU_TB_RMINTR
2735 bl kvmhv_accumulate_time
2738 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2740 * Branch around the call if both CPU_FTR_TM and
2741 * CPU_FTR_P9_TM_HV_ASSIST are off.
2745 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2747 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2751 li r5, 0 /* don't preserve non-vol regs */
2752 bl kvmppc_restore_tm_hv
2754 ld r4, HSTATE_KVM_VCPU(r13)
2758 /* load up FP state */
2761 /* Restore guest decrementer */
2762 ld r3, VCPU_DEC_EXPIRES(r4)
2763 ld r5, HSTATE_KVM_VCORE(r13)
2764 ld r6, VCORE_TB_OFFSET_APPL(r5)
2765 add r3, r3, r6 /* convert host TB to guest TB value */
2771 ld r14, VCPU_GPR(R14)(r4)
2772 ld r15, VCPU_GPR(R15)(r4)
2773 ld r16, VCPU_GPR(R16)(r4)
2774 ld r17, VCPU_GPR(R17)(r4)
2775 ld r18, VCPU_GPR(R18)(r4)
2776 ld r19, VCPU_GPR(R19)(r4)
2777 ld r20, VCPU_GPR(R20)(r4)
2778 ld r21, VCPU_GPR(R21)(r4)
2779 ld r22, VCPU_GPR(R22)(r4)
2780 ld r23, VCPU_GPR(R23)(r4)
2781 ld r24, VCPU_GPR(R24)(r4)
2782 ld r25, VCPU_GPR(R25)(r4)
2783 ld r26, VCPU_GPR(R26)(r4)
2784 ld r27, VCPU_GPR(R27)(r4)
2785 ld r28, VCPU_GPR(R28)(r4)
2786 ld r29, VCPU_GPR(R29)(r4)
2787 ld r30, VCPU_GPR(R30)(r4)
2788 ld r31, VCPU_GPR(R31)(r4)
2790 /* Check the wake reason in SRR1 to see why we got here */
2791 bl kvmppc_check_wake_reason
2794 * Restore volatile registers since we could have called a
2795 * C routine in kvmppc_check_wake_reason
2797 * r3 tells us whether we need to return to host or not
2798 * WARNING: it gets checked further down:
2799 * should not modify r3 until this check is done.
2801 ld r4, HSTATE_KVM_VCPU(r13)
2803 /* clear our bit in vcore->napping_threads */
2804 34: ld r5,HSTATE_KVM_VCORE(r13)
2805 lbz r7,HSTATE_PTID(r13)
2808 addi r6,r5,VCORE_NAPPING_THREADS
2814 stb r0,HSTATE_NAPPING(r13)
2816 /* See if the wake reason saved in r3 means we need to exit */
2817 stw r12, VCPU_TRAP(r4)
2821 b maybe_reenter_guest
2823 /* cede when already previously prodded case */
2826 stb r0,VCPU_PRODDED(r3)
2827 sync /* order testing prodded vs. clearing ceded */
2828 stb r0,VCPU_CEDED(r3)
2832 /* we've ceded but we want to give control to the host */
2834 ld r9, HSTATE_KVM_VCPU(r13)
2835 #ifdef CONFIG_KVM_XICS
2836 /* are we using XIVE with single escalation? */
2837 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2840 li r6, XIVE_ESB_SET_PQ_00
2842 * If we still have a pending escalation, abort the cede,
2843 * and we must set PQ to 10 rather than 00 so that we don't
2844 * potentially end up with two entries for the escalation
2845 * interrupt in the XIVE interrupt queue. In that case
2846 * we also don't want to set xive_esc_on to 1 here in
2847 * case we race with xive_esc_irq().
2849 lbz r5, VCPU_XIVE_ESC_ON(r9)
2853 stb r0, VCPU_CEDED(r9)
2854 li r6, XIVE_ESB_SET_PQ_10
2857 stb r0, VCPU_XIVE_ESC_ON(r9)
2858 /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
2860 5: /* Enable XIVE escalation */
2862 andi. r0, r0, MSR_DR /* in real mode? */
2866 1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
2869 #endif /* CONFIG_KVM_XICS */
2870 3: b guest_exit_cont
2872 /* Try to do machine check recovery in real mode */
2873 machine_check_realmode:
2874 mr r3, r9 /* get vcpu pointer */
2875 bl kvmppc_realmode_machine_check
2877 /* all machine checks go to virtual mode for further handling */
2878 ld r9, HSTATE_KVM_VCPU(r13)
2879 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2883 * Call C code to handle a HMI in real mode.
2884 * Only the primary thread does the call, secondary threads are handled
2885 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns.
2886 * r9 points to the vcpu on entry
2889 lbz r0, HSTATE_PTID(r13)
2892 bl kvmppc_realmode_hmi_handler
2893 ld r9, HSTATE_KVM_VCPU(r13)
2894 li r12, BOOK3S_INTERRUPT_HMI
2898 * Check the reason we woke from nap, and take appropriate action.
2900 * 0 if nothing needs to be done
2901 * 1 if something happened that needs to be handled by the host
2902 * -1 if there was a guest wakeup (IPI or msgsnd)
2903 * -2 if we handled a PCI passthrough interrupt (returned by
2904 * kvmppc_read_intr only)
2906 * Also sets r12 to the interrupt vector for any interrupt that needs
2907 * to be handled now by the host (0x500 for external interrupt), or zero.
2908 * Modifies all volatile registers (since it may call a C function).
2909 * This routine calls kvmppc_read_intr, a C function, if an external
2910 * interrupt is pending.
2912 kvmppc_check_wake_reason:
2915 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2917 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2918 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2919 cmpwi r6, 8 /* was it an external interrupt? */
2920 beq 7f /* if so, see what it was */
2923 cmpwi r6, 6 /* was it the decrementer? */
2926 cmpwi r6, 5 /* privileged doorbell? */
2928 cmpwi r6, 3 /* hypervisor doorbell? */
2930 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2931 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2933 li r3, 1 /* anything else, return 1 */
2936 /* hypervisor doorbell */
2937 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2940 * Clear the doorbell as we will invoke the handler
2941 * explicitly in the guest exit path.
2943 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2945 /* see if it's a host IPI */
2950 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2951 lbz r0, HSTATE_HOST_IPI(r13)
2954 /* if not, return -1 */
2958 /* Woken up due to Hypervisor maintenance interrupt */
2959 4: li r12, BOOK3S_INTERRUPT_HMI
2963 /* external interrupt - create a stack frame so we can call C */
2965 std r0, PPC_LR_STKOFF(r1)
2966 stdu r1, -PPC_MIN_STKFRM(r1)
2969 li r12, BOOK3S_INTERRUPT_EXTERNAL
2974 * Return code of 2 means PCI passthrough interrupt, but
2975 * we need to return back to host to complete handling the
2976 * interrupt. Trap reason is expected in r12 by guest
2979 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2981 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2982 addi r1, r1, PPC_MIN_STKFRM
2987 * Save away FP, VMX and VSX registers.
2989 * N.B. r30 and r31 are volatile across this function,
2990 * thus it is not callable from C.
2997 #ifdef CONFIG_ALTIVEC
2999 oris r8,r8,MSR_VEC@h
3000 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3004 oris r8,r8,MSR_VSX@h
3005 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3008 addi r3,r3,VCPU_FPRS
3010 #ifdef CONFIG_ALTIVEC
3012 addi r3,r31,VCPU_VRS
3014 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3016 mfspr r6,SPRN_VRSAVE
3017 stw r6,VCPU_VRSAVE(r31)
3022 * Load up FP, VMX and VSX registers
3024 * N.B. r30 and r31 are volatile across this function,
3025 * thus it is not callable from C.
3032 #ifdef CONFIG_ALTIVEC
3034 oris r8,r8,MSR_VEC@h
3035 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3039 oris r8,r8,MSR_VSX@h
3040 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3043 addi r3,r4,VCPU_FPRS
3045 #ifdef CONFIG_ALTIVEC
3047 addi r3,r31,VCPU_VRS
3049 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3051 lwz r7,VCPU_VRSAVE(r31)
3052 mtspr SPRN_VRSAVE,r7
3057 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3059 * Save transactional state and TM-related registers.
3060 * Called with r3 pointing to the vcpu struct and r4 containing
3061 * the guest MSR value.
3062 * r5 is non-zero iff non-volatile register state needs to be maintained.
3063 * If r5 == 0, this can modify all checkpointed registers, but
3064 * restores r1 and r2 before exit.
3066 _GLOBAL_TOC(kvmppc_save_tm_hv)
3067 EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv)
3068 /* See if we need to handle fake suspend mode */
3071 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3073 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3075 beq __kvmppc_save_tm
3077 /* The following code handles the fake_suspend = 1 case */
3079 std r0, PPC_LR_STKOFF(r1)
3080 stdu r1, -PPC_MIN_STKFRM(r1)
3085 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3088 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3091 bl pnv_power9_force_smt4_catch
3092 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3095 /* We have to treclaim here because that's the only way to do S->N */
3096 li r3, TM_CAUSE_KVM_RESCHED
3100 * We were in fake suspend, so we are not going to save the
3101 * register state as the guest checkpointed state (since
3102 * we already have it), therefore we can now use any volatile GPR.
3103 * In fact treclaim in fake suspend state doesn't modify
3108 bl pnv_power9_force_smt4_release
3109 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3113 mfspr r3, SPRN_PSSCR
3114 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3115 li r0, PSSCR_FAKE_SUSPEND
3117 mtspr SPRN_PSSCR, r3
3119 /* Don't save TEXASR, use value from last exit in real suspend state */
3120 ld r9, HSTATE_KVM_VCPU(r13)
3121 mfspr r5, SPRN_TFHAR
3122 mfspr r6, SPRN_TFIAR
3123 std r5, VCPU_TFHAR(r9)
3124 std r6, VCPU_TFIAR(r9)
3126 addi r1, r1, PPC_MIN_STKFRM
3127 ld r0, PPC_LR_STKOFF(r1)
3132 * Restore transactional state and TM-related registers.
3133 * Called with r3 pointing to the vcpu struct
3134 * and r4 containing the guest MSR value.
3135 * r5 is non-zero iff non-volatile register state needs to be maintained.
3136 * This potentially modifies all checkpointed registers.
3137 * It restores r1 and r2 from the PACA.
3139 _GLOBAL_TOC(kvmppc_restore_tm_hv)
3140 EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv)
3142 * If we are doing TM emulation for the guest on a POWER9 DD2,
3143 * then we don't actually do a trechkpt -- we either set up
3144 * fake-suspend mode, or emulate a TM rollback.
3147 b __kvmppc_restore_tm
3148 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3150 std r0, PPC_LR_STKOFF(r1)
3153 stb r0, HSTATE_FAKE_SUSPEND(r13)
3155 /* Turn on TM so we can restore TM SPRs */
3158 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
3162 * The user may change these outside of a transaction, so they must
3163 * always be context switched.
3165 ld r5, VCPU_TFHAR(r3)
3166 ld r6, VCPU_TFIAR(r3)
3167 ld r7, VCPU_TEXASR(r3)
3168 mtspr SPRN_TFHAR, r5
3169 mtspr SPRN_TFIAR, r6
3170 mtspr SPRN_TEXASR, r7
3172 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
3173 beqlr /* TM not active in guest */
3175 /* Make sure the failure summary is set */
3176 oris r7, r7, (TEXASR_FS)@h
3177 mtspr SPRN_TEXASR, r7
3179 cmpwi r5, 1 /* check for suspended state */
3181 stb r5, HSTATE_FAKE_SUSPEND(r13)
3182 b 9f /* and return */
3183 10: stdu r1, -PPC_MIN_STKFRM(r1)
3184 /* guest is in transactional state, so simulate rollback */
3185 bl kvmhv_emulate_tm_rollback
3187 addi r1, r1, PPC_MIN_STKFRM
3188 9: ld r0, PPC_LR_STKOFF(r1)
3191 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
3194 * We come here if we get any exception or interrupt while we are
3195 * executing host real mode code while in guest MMU context.
3196 * r12 is (CR << 32) | vector
3197 * r13 points to our PACA
3198 * r12 is saved in HSTATE_SCRATCH0(r13)
3199 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3200 * r9 is saved in HSTATE_SCRATCH2(r13)
3201 * r13 is saved in HSPRG1
3202 * cfar is saved in HSTATE_CFAR(r13)
3203 * ppr is saved in HSTATE_PPR(r13)
3205 kvmppc_bad_host_intr:
3207 * Switch to the emergency stack, but start half-way down in
3208 * case we were already on it.
3212 ld r1, PACAEMERGSP(r13)
3213 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3226 mfspr r3, SPRN_HSRR0
3227 mfspr r4, SPRN_HSRR1
3229 mfspr r6, SPRN_HDSISR
3231 1: mfspr r3, SPRN_SRR0
3234 mfspr r6, SPRN_DSISR
3239 ld r9, HSTATE_SCRATCH2(r13)
3240 ld r12, HSTATE_SCRATCH0(r13)
3245 ld r5, HSTATE_CFAR(r13)
3246 std r5, ORIG_GPR3(r1)
3248 #ifdef CONFIG_RELOCATABLE
3249 ld r4, HSTATE_SCRATCH1(r13)
3254 lbz r6, PACAIRQSOFTMASK(r13)
3260 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3261 std r3, STACK_FRAME_OVERHEAD-16(r1)
3264 * On POWER9 do a minimal restore of the MMU and call C code,
3265 * which will print a message and panic.
3266 * XXX On POWER7 and POWER8, we just spin here since we don't
3267 * know what the other threads are doing (and we don't want to
3268 * coordinate with them) - but at least we now have register state
3269 * in memory that we might be able to look at from another CPU.
3273 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3274 ld r9, HSTATE_KVM_VCPU(r13)
3275 ld r10, VCPU_KVM(r9)
3280 mtspr SPRN_CIABR, r0
3281 mtspr SPRN_DAWRX, r0
3283 BEGIN_MMU_FTR_SECTION
3285 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3290 ld r8, PACA_SLBSHADOWPTR(r13)
3291 .rept SLB_NUM_BOLTED
3292 li r3, SLBSHADOW_SAVEAREA
3296 andis. r7, r5, SLB_ESID_V@h
3302 4: lwz r7, KVM_HOST_LPID(r10)
3305 ld r8, KVM_HOST_LPCR(r10)
3308 li r0, KVM_GUEST_MODE_NONE
3309 stb r0, HSTATE_IN_GUEST(r13)
3312 * Turn on the MMU and jump to C code
3316 addi r3, r3, 9f - 5b
3318 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
3319 ld r4, PACAKMSR(r13)
3323 9: addi r3, r1, STACK_FRAME_OVERHEAD
3324 bl kvmppc_bad_interrupt
3328 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3329 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3330 * r11 has the guest MSR value (in/out)
3331 * r9 has a vcpu pointer (in)
3332 * r0 is used as a scratch register
3334 kvmppc_msr_interrupt:
3335 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3336 cmpwi r0, 2 /* Check if we are in transactional state.. */
3337 ld r11, VCPU_INTR_MSR(r9)
3339 /* ... if transactional, change to suspended */
3341 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3345 * Load up guest PMU state. R3 points to the vcpu struct.
3347 _GLOBAL(kvmhv_load_guest_pmu)
3348 EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
3352 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3353 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3356 ld r3, VCPU_MMCR(r4)
3357 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3358 cmpwi r5, MMCR0_PMAO
3359 beql kvmppc_fix_pmao
3360 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3361 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
3362 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
3363 lwz r6, VCPU_PMC + 8(r4)
3364 lwz r7, VCPU_PMC + 12(r4)
3365 lwz r8, VCPU_PMC + 16(r4)
3366 lwz r9, VCPU_PMC + 20(r4)
3373 ld r3, VCPU_MMCR(r4)
3374 ld r5, VCPU_MMCR + 8(r4)
3375 ld r6, VCPU_MMCR + 16(r4)
3376 ld r7, VCPU_SIAR(r4)
3377 ld r8, VCPU_SDAR(r4)
3378 mtspr SPRN_MMCR1, r5
3379 mtspr SPRN_MMCRA, r6
3383 ld r5, VCPU_MMCR + 24(r4)
3384 ld r6, VCPU_SIER(r4)
3385 mtspr SPRN_MMCR2, r5
3387 BEGIN_FTR_SECTION_NESTED(96)
3388 lwz r7, VCPU_PMC + 24(r4)
3389 lwz r8, VCPU_PMC + 28(r4)
3390 ld r9, VCPU_MMCR + 32(r4)
3391 mtspr SPRN_SPMC1, r7
3392 mtspr SPRN_SPMC2, r8
3393 mtspr SPRN_MMCRS, r9
3394 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3395 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3396 mtspr SPRN_MMCR0, r3
3402 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
3404 _GLOBAL(kvmhv_load_host_pmu)
3405 EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
3407 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
3409 beq 23f /* skip if not */
3411 ld r3, HSTATE_MMCR0(r13)
3412 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3413 cmpwi r4, MMCR0_PMAO
3414 beql kvmppc_fix_pmao
3415 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3416 lwz r3, HSTATE_PMC1(r13)
3417 lwz r4, HSTATE_PMC2(r13)
3418 lwz r5, HSTATE_PMC3(r13)
3419 lwz r6, HSTATE_PMC4(r13)
3420 lwz r8, HSTATE_PMC5(r13)
3421 lwz r9, HSTATE_PMC6(r13)
3428 ld r3, HSTATE_MMCR0(r13)
3429 ld r4, HSTATE_MMCR1(r13)
3430 ld r5, HSTATE_MMCRA(r13)
3431 ld r6, HSTATE_SIAR(r13)
3432 ld r7, HSTATE_SDAR(r13)
3433 mtspr SPRN_MMCR1, r4
3434 mtspr SPRN_MMCRA, r5
3438 ld r8, HSTATE_MMCR2(r13)
3439 ld r9, HSTATE_SIER(r13)
3440 mtspr SPRN_MMCR2, r8
3442 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3443 mtspr SPRN_MMCR0, r3
3449 * Save guest PMU state into the vcpu struct.
3450 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
3452 _GLOBAL(kvmhv_save_guest_pmu)
3453 EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
3458 * POWER8 seems to have a hardware bug where setting
3459 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
3460 * when some counters are already negative doesn't seem
3461 * to cause a performance monitor alert (and hence interrupt).
3462 * The effect of this is that when saving the PMU state,
3463 * if there is no PMU alert pending when we read MMCR0
3464 * before freezing the counters, but one becomes pending
3465 * before we read the counters, we lose it.
3466 * To work around this, we need a way to freeze the counters
3467 * before reading MMCR0. Normally, freezing the counters
3468 * is done by writing MMCR0 (to set MMCR0[FC]) which
3469 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
3470 * we can also freeze the counters using MMCR2, by writing
3471 * 1s to all the counter freeze condition bits (there are
3472 * 9 bits each for 6 counters).
3474 li r3, -1 /* set all freeze bits */
3476 mfspr r10, SPRN_MMCR2
3477 mtspr SPRN_MMCR2, r3
3479 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3481 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3482 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
3483 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3484 mfspr r6, SPRN_MMCRA
3485 /* Clear MMCRA in order to disable SDAR updates */
3487 mtspr SPRN_MMCRA, r7
3489 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */
3491 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
3493 21: mfspr r5, SPRN_MMCR1
3496 std r4, VCPU_MMCR(r9)
3497 std r5, VCPU_MMCR + 8(r9)
3498 std r6, VCPU_MMCR + 16(r9)
3500 std r10, VCPU_MMCR + 24(r9)
3501 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3502 std r7, VCPU_SIAR(r9)
3503 std r8, VCPU_SDAR(r9)
3510 stw r3, VCPU_PMC(r9)
3511 stw r4, VCPU_PMC + 4(r9)
3512 stw r5, VCPU_PMC + 8(r9)
3513 stw r6, VCPU_PMC + 12(r9)
3514 stw r7, VCPU_PMC + 16(r9)
3515 stw r8, VCPU_PMC + 20(r9)
3518 std r5, VCPU_SIER(r9)
3519 BEGIN_FTR_SECTION_NESTED(96)
3520 mfspr r6, SPRN_SPMC1
3521 mfspr r7, SPRN_SPMC2
3522 mfspr r8, SPRN_MMCRS
3523 stw r6, VCPU_PMC + 24(r9)
3524 stw r7, VCPU_PMC + 28(r9)
3525 std r8, VCPU_MMCR + 32(r9)
3527 mtspr SPRN_MMCRS, r4
3528 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3529 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3533 * This works around a hardware bug on POWER8E processors, where
3534 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3535 * performance monitor interrupt. Instead, when we need to have
3536 * an interrupt pending, we have to arrange for a counter to overflow.
3540 mtspr SPRN_MMCR2, r3
3541 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3542 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3543 mtspr SPRN_MMCR0, r3
3550 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3552 * Start timing an activity
3553 * r3 = pointer to time accumulation struct, r4 = vcpu
3556 ld r5, HSTATE_KVM_VCORE(r13)
3557 ld r6, VCORE_TB_OFFSET_APPL(r5)
3559 subf r5, r6, r5 /* subtract current timebase offset */
3560 std r3, VCPU_CUR_ACTIVITY(r4)
3561 std r5, VCPU_ACTIVITY_START(r4)
3565 * Accumulate time to one activity and start another.
3566 * r3 = pointer to new time accumulation struct, r4 = vcpu
3568 kvmhv_accumulate_time:
3569 ld r5, HSTATE_KVM_VCORE(r13)
3570 ld r8, VCORE_TB_OFFSET_APPL(r5)
3571 ld r5, VCPU_CUR_ACTIVITY(r4)
3572 ld r6, VCPU_ACTIVITY_START(r4)
3573 std r3, VCPU_CUR_ACTIVITY(r4)
3575 subf r7, r8, r7 /* subtract current timebase offset */
3576 std r7, VCPU_ACTIVITY_START(r4)
3580 ld r8, TAS_SEQCOUNT(r5)
3583 std r8, TAS_SEQCOUNT(r5)
3585 ld r7, TAS_TOTAL(r5)
3587 std r7, TAS_TOTAL(r5)
3593 3: std r3, TAS_MIN(r5)
3599 std r8, TAS_SEQCOUNT(r5)