]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/powerpc/kvm/book3s_hv.c
Merge tag 'powerpc-4.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
[linux.git] / arch / powerpc / kvm / book3s_hv.c
index 9cb9448163c4bf7021822d6632fb6c94452187ed..81e2ea882d977bd338445565f2a78257e274abf1 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/reg.h>
 #include <asm/ppc-opcode.h>
 #include <asm/asm-prototypes.h>
+#include <asm/debug.h>
 #include <asm/disassemble.h>
 #include <asm/cputable.h>
 #include <asm/cacheflush.h>
@@ -170,7 +171,7 @@ static bool kvmppc_ipi_thread(int cpu)
 
 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
        if (cpu >= 0 && cpu < nr_cpu_ids) {
-               if (paca[cpu].kvm_hstate.xics_phys) {
+               if (paca_ptrs[cpu]->kvm_hstate.xics_phys) {
                        xics_wake_cpu(cpu);
                        return true;
                }
@@ -498,7 +499,8 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
                 * use 640 bytes of the structure though, so we should accept
                 * clients that set a size of 640.
                 */
-               if (len < 640)
+               BUILD_BUG_ON(sizeof(struct lppaca) != 640);
+               if (len < sizeof(struct lppaca))
                        break;
                vpap = &tvcpu->arch.vpa;
                err = 0;
@@ -741,6 +743,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
        case H_SET_MODE_RESOURCE_SET_DAWR:
                if (!kvmppc_power8_compatible(vcpu))
                        return H_P2;
+               if (!ppc_breakpoint_available())
+                       return H_P2;
                if (mflags)
                        return H_UNSUPPORTED_FLAG_START;
                if (value2 & DABRX_HYP)
@@ -1206,6 +1210,19 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
                        r = RESUME_GUEST;
                }
                break;
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       case BOOK3S_INTERRUPT_HV_SOFTPATCH:
+               /*
+                * This occurs for various TM-related instructions that
+                * we need to emulate on POWER9 DD2.2.  We have already
+                * handled the cases where the guest was in real-suspend
+                * mode and was transitioning to transactional state.
+                */
+               r = kvmhv_p9_tm_emulation(vcpu);
+               break;
+#endif
+
        case BOOK3S_INTERRUPT_HV_RM_HARD:
                r = RESUME_PASSTHROUGH;
                break;
@@ -1978,7 +1995,9 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
         * turn off the HFSCR bit, which causes those instructions to trap.
         */
        vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
-       if (!cpu_has_feature(CPU_FTR_TM))
+       if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+               vcpu->arch.hfscr |= HFSCR_TM;
+       else if (!cpu_has_feature(CPU_FTR_TM_COMP))
                vcpu->arch.hfscr &= ~HFSCR_TM;
        if (cpu_has_feature(CPU_FTR_ARCH_300))
                vcpu->arch.hfscr &= ~HFSCR_MSGP;
@@ -2140,7 +2159,7 @@ static int kvmppc_grab_hwthread(int cpu)
        struct paca_struct *tpaca;
        long timeout = 10000;
 
-       tpaca = &paca[cpu];
+       tpaca = paca_ptrs[cpu];
 
        /* Ensure the thread won't go into the kernel if it wakes */
        tpaca->kvm_hstate.kvm_vcpu = NULL;
@@ -2173,7 +2192,7 @@ static void kvmppc_release_hwthread(int cpu)
 {
        struct paca_struct *tpaca;
 
-       tpaca = &paca[cpu];
+       tpaca = paca_ptrs[cpu];
        tpaca->kvm_hstate.hwthread_req = 0;
        tpaca->kvm_hstate.kvm_vcpu = NULL;
        tpaca->kvm_hstate.kvm_vcore = NULL;
@@ -2239,9 +2258,10 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
                vcpu->arch.thread_cpu = cpu;
                cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
        }
-       tpaca = &paca[cpu];
+       tpaca = paca_ptrs[cpu];
        tpaca->kvm_hstate.kvm_vcpu = vcpu;
        tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
+       tpaca->kvm_hstate.fake_suspend = 0;
        /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
        smp_wmb();
        tpaca->kvm_hstate.kvm_vcore = vc;
@@ -2264,7 +2284,7 @@ static void kvmppc_wait_for_nap(int n_threads)
                 * for any threads that still have a non-NULL vcore ptr.
                 */
                for (i = 1; i < n_threads; ++i)
-                       if (paca[cpu + i].kvm_hstate.kvm_vcore)
+                       if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
                                break;
                if (i == n_threads) {
                        HMT_medium();
@@ -2274,7 +2294,7 @@ static void kvmppc_wait_for_nap(int n_threads)
        }
        HMT_medium();
        for (i = 1; i < n_threads; ++i)
-               if (paca[cpu + i].kvm_hstate.kvm_vcore)
+               if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
                        pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
 }
 
@@ -2806,9 +2826,11 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        }
 
        for (thr = 0; thr < controlled_threads; ++thr) {
-               paca[pcpu + thr].kvm_hstate.tid = thr;
-               paca[pcpu + thr].kvm_hstate.napping = 0;
-               paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
+               struct paca_struct *paca = paca_ptrs[pcpu + thr];
+
+               paca->kvm_hstate.tid = thr;
+               paca->kvm_hstate.napping = 0;
+               paca->kvm_hstate.kvm_split_mode = sip;
        }
 
        /* Initiate micro-threading (split-core) on POWER8 if required */
@@ -2923,7 +2945,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        } else if (hpt_on_radix) {
                /* Wait for all threads to have seen final sync */
                for (thr = 1; thr < controlled_threads; ++thr) {
-                       while (paca[pcpu + thr].kvm_hstate.kvm_split_mode) {
+                       struct paca_struct *paca = paca_ptrs[pcpu + thr];
+
+                       while (paca->kvm_hstate.kvm_split_mode) {
                                HMT_low();
                                barrier();
                        }
@@ -4388,7 +4412,7 @@ static int kvm_init_subcore_bitmap(void)
                int node = cpu_to_node(first_cpu);
 
                /* Ignore if it is already allocated. */
-               if (paca[first_cpu].sibling_subcore_state)
+               if (paca_ptrs[first_cpu]->sibling_subcore_state)
                        continue;
 
                sibling_subcore_state =
@@ -4403,7 +4427,8 @@ static int kvm_init_subcore_bitmap(void)
                for (j = 0; j < threads_per_core; j++) {
                        int cpu = first_cpu + j;
 
-                       paca[cpu].sibling_subcore_state = sibling_subcore_state;
+                       paca_ptrs[cpu]->sibling_subcore_state =
+                                               sibling_subcore_state;
                }
        }
        return 0;
@@ -4430,7 +4455,7 @@ static int kvmppc_book3s_init_hv(void)
 
        /*
         * We need a way of accessing the XICS interrupt controller,
-        * either directly, via paca[cpu].kvm_hstate.xics_phys, or
+        * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or
         * indirectly, via OPAL.
         */
 #ifdef CONFIG_SMP