]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: PPC: Book3S: Replace reset_msr mmu op with inject_interrupt arch op
authorNicholas Piggin <npiggin@gmail.com>
Wed, 2 Oct 2019 06:00:22 +0000 (16:00 +1000)
committerPaul Mackerras <paulus@ozlabs.org>
Tue, 22 Oct 2019 05:29:02 +0000 (16:29 +1100)
reset_msr sets the MSR for interrupt injection, but it's cleaner and
more flexible to provide a single op to set both MSR and PC for the
interrupt.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_32_mmu.c
arch/powerpc/kvm/book3s_64_mmu.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr.c

index 6fe6ad64cba57649f77d2cd3d0443097ee857e35..4273e799203d2d7ce565fac7bfec3a398e62b341 100644 (file)
@@ -401,7 +401,6 @@ struct kvmppc_mmu {
        u32  (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum);
        int  (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
                      struct kvmppc_pte *pte, bool data, bool iswrite);
-       void (*reset_msr)(struct kvm_vcpu *vcpu);
        void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
        int  (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
        u64  (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
index ee62776e5433cc72316bf323f43d3c5df99d97f2..d63f649fe713d2d24776e209e3c4f697c8899bb6 100644 (file)
@@ -271,6 +271,7 @@ struct kvmppc_ops {
                           union kvmppc_one_reg *val);
        void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
        void (*vcpu_put)(struct kvm_vcpu *vcpu);
+       void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
        void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
        int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
        struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
index a2336c452905f46099652b9431adb53af1820bce..58a59ee998e292148f2318b94b16a57d8b706378 100644 (file)
@@ -74,27 +74,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { NULL }
 };
 
-void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
-{
-       if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
-               ulong pc = kvmppc_get_pc(vcpu);
-               ulong lr = kvmppc_get_lr(vcpu);
-               if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
-                       kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
-               if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
-                       kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
-               vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
-       }
-}
-EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
-
-static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
-{
-       if (!is_kvmppc_hv_enabled(vcpu->kvm))
-               return to_book3s(vcpu)->hior;
-       return 0;
-}
-
 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
                        unsigned long pending_now, unsigned long old_pending)
 {
@@ -134,11 +113,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
 
 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
 {
-       kvmppc_unfixup_split_real(vcpu);
-       kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
-       kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & SRR1_MSR_BITS) | flags);
-       kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
-       vcpu->arch.mmu.reset_msr(vcpu);
+       vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
 }
 
 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
index 18f244aad7aaa46c1eb45db0f6a283c3d11d9be0..f21e73492ce3d9f9ec26f572547847479b1b74ea 100644 (file)
@@ -90,11 +90,6 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
        return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
 }
 
-static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
-{
-       kvmppc_set_msr(vcpu, 0);
-}
-
 static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
                                      u32 sre, gva_t eaddr,
                                      bool primary)
@@ -406,7 +401,6 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu)
        mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin;
        mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin;
        mmu->xlate = kvmppc_mmu_book3s_32_xlate;
-       mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr;
        mmu->tlbie = kvmppc_mmu_book3s_32_tlbie;
        mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid;
        mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp;
index 5f63a5f7f24f811e124c9283fb67b25504214e5d..599133256a9541f33b3dd210699c02c21bcaa682 100644 (file)
 #define dprintk(X...) do { } while(0)
 #endif
 
-static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
-{
-       unsigned long msr = vcpu->arch.intr_msr;
-       unsigned long cur_msr = kvmppc_get_msr(vcpu);
-
-       /* If transactional, change to suspend mode on IRQ delivery */
-       if (MSR_TM_TRANSACTIONAL(cur_msr))
-               msr |= MSR_TS_S;
-       else
-               msr |= cur_msr & MSR_TS_MASK;
-
-       kvmppc_set_msr(vcpu, msr);
-}
-
 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
                                struct kvm_vcpu *vcpu,
                                gva_t eaddr)
@@ -676,7 +662,6 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
        mmu->slbie = kvmppc_mmu_book3s_64_slbie;
        mmu->slbia = kvmppc_mmu_book3s_64_slbia;
        mmu->xlate = kvmppc_mmu_book3s_64_xlate;
-       mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
        mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
        mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
        mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
index f2b9aea43216e37e2ff1b936566a2d61ffb8c294..4c37e97c75a1de14463d25348781416c0515fb81 100644 (file)
@@ -275,18 +275,6 @@ int kvmppc_mmu_hv_init(void)
        return 0;
 }
 
-static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
-{
-       unsigned long msr = vcpu->arch.intr_msr;
-
-       /* If transactional, change to suspend mode on IRQ delivery */
-       if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
-               msr |= MSR_TS_S;
-       else
-               msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
-       kvmppc_set_msr(vcpu, msr);
-}
-
 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
                                long pte_index, unsigned long pteh,
                                unsigned long ptel, unsigned long *pte_idx_ret)
@@ -2162,7 +2150,6 @@ void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
        vcpu->arch.slb_nr = 32;         /* POWER7/POWER8 */
 
        mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
-       mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
 
        vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
 }
index 709cf1fd4cf466773da3a0021cf03777e4a58ef6..94a0a9911b275e75d57814314ce0a933752a37c2 100644 (file)
@@ -338,6 +338,27 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
        spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 }
 
+static void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
+{
+       unsigned long msr, pc, new_msr, new_pc;
+
+       msr = kvmppc_get_msr(vcpu);
+       pc = kvmppc_get_pc(vcpu);
+       new_msr = vcpu->arch.intr_msr;
+       new_pc = vec;
+
+       /* If transactional, change to suspend mode on IRQ delivery */
+       if (MSR_TM_TRANSACTIONAL(msr))
+               new_msr |= MSR_TS_S;
+       else
+               new_msr |= msr & MSR_TS_MASK;
+
+       kvmppc_set_srr0(vcpu, pc);
+       kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
+       kvmppc_set_pc(vcpu, new_pc);
+       kvmppc_set_msr(vcpu, new_msr);
+}
+
 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
 {
        /*
@@ -5401,6 +5422,7 @@ static struct kvmppc_ops kvm_ops_hv = {
        .set_one_reg = kvmppc_set_one_reg_hv,
        .vcpu_load   = kvmppc_core_vcpu_load_hv,
        .vcpu_put    = kvmppc_core_vcpu_put_hv,
+       .inject_interrupt = kvmppc_inject_interrupt_hv,
        .set_msr     = kvmppc_set_msr_hv,
        .vcpu_run    = kvmppc_vcpu_run_hv,
        .vcpu_create = kvmppc_core_vcpu_create_hv,
index cc65af8fe6f7e7d8d0c835ec10e660a2a7646f61..ce4fcf76e53e919d622296f8bf4d0369e50cc701 100644 (file)
@@ -90,7 +90,43 @@ static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
        kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
 }
 
-void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
+static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
+               ulong pc = kvmppc_get_pc(vcpu);
+               ulong lr = kvmppc_get_lr(vcpu);
+               if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
+                       kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
+               if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
+                       kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
+               vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
+       }
+}
+
+static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
+{
+       unsigned long msr, pc, new_msr, new_pc;
+
+       kvmppc_unfixup_split_real(vcpu);
+
+       msr = kvmppc_get_msr(vcpu);
+       pc = kvmppc_get_pc(vcpu);
+       new_msr = vcpu->arch.intr_msr;
+       new_pc = to_book3s(vcpu)->hior + vec;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+       /* If transactional, change to suspend mode on IRQ delivery */
+       if (MSR_TM_TRANSACTIONAL(msr))
+               new_msr |= MSR_TS_S;
+       else
+               new_msr |= msr & MSR_TS_MASK;
+#endif
+
+       kvmppc_set_srr0(vcpu, pc);
+       kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
+       kvmppc_set_pc(vcpu, new_pc);
+       kvmppc_set_msr(vcpu, new_msr);
+}
 
 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
 {
@@ -1761,6 +1797,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
 #else
        /* default to book3s_32 (750) */
        vcpu->arch.pvr = 0x84202;
+       vcpu->arch.intr_msr = 0;
 #endif
        kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
        vcpu->arch.slb_nr = 64;
@@ -2058,6 +2095,7 @@ static struct kvmppc_ops kvm_ops_pr = {
        .set_one_reg = kvmppc_set_one_reg_pr,
        .vcpu_load   = kvmppc_core_vcpu_load_pr,
        .vcpu_put    = kvmppc_core_vcpu_put_pr,
+       .inject_interrupt = kvmppc_inject_interrupt_pr,
        .set_msr     = kvmppc_set_msr_pr,
        .vcpu_run    = kvmppc_vcpu_run_pr,
        .vcpu_create = kvmppc_core_vcpu_create_pr,