From: Paul Mackerras Date: Thu, 9 Nov 2017 03:30:24 +0000 (+1100) Subject: Merge branch 'kvm-ppc-fixes' into kvm-ppc-next X-Git-Tag: v4.15-rc1~79^2~1^2~1 X-Git-Url: https://asedeno.scripts.mit.edu/gitweb/?a=commitdiff_plain;h=072df8130c6b602c8ee219f7b06394680cafad2f;p=linux.git Merge branch 'kvm-ppc-fixes' into kvm-ppc-next This merges in a couple of fixes from the kvm-ppc-fixes branch that modify the same areas of code as some commits from the kvm-ppc-next branch, in order to resolve the conflicts. Signed-off-by: Paul Mackerras --- 072df8130c6b602c8ee219f7b06394680cafad2f diff --cc arch/powerpc/kvm/book3s_64_mmu_hv.c index 6aec8a22aeff,59247af5fd45..235319c2574e --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@@ -651,6 -646,16 +651,16 @@@ int kvmppc_book3s_hv_page_fault(struct hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); hnow_r = hpte_new_to_old_r(hnow_r); } + + /* + * If the HPT is being resized, don't update the HPTE, + * instead let the guest retry after the resize operation is complete. - * The synchronization for hpte_setup_done test vs. set is provided ++ * The synchronization for mmu_ready test vs. set is provided + * by the HPTE lock. + */ - if (!kvm->arch.hpte_setup_done) ++ if (!kvm->arch.mmu_ready) + goto out_unlock; + if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || rev->guest_rpte != hpte[2]) /* HPTE has been changed under us; let the guest retry */ diff --cc arch/powerpc/kvm/book3s_hv.c index fff62fdf1464,8d43cf205d34..ca0d4d938d6a --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@@ -2717,11 -2705,14 +2717,13 @@@ static noinline void kvmppc_run_core(st * Hard-disable interrupts, and check resched flag and signals. * If we need to reschedule or deliver a signal, clean up * and return without going into the guest(s). - * If the hpte_setup_done flag has been cleared, don't go into the ++ * If the mmu_ready flag has been cleared, don't go into the + * guest because that means a HPT resize operation is in progress. */ local_irq_disable(); hard_irq_disable(); if (lazy_irq_pending() || need_resched() || - recheck_signals(&core_info)) { - recheck_signals(&core_info) || - (!kvm_is_radix(vc->kvm) && !vc->kvm->arch.hpte_setup_done)) { ++ recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) { local_irq_enable(); vc->vcore_state = VCORE_INACTIVE; /* Unlock all except the primary vcore */ @@@ -3174,6 -3135,20 +3176,30 @@@ static int kvmppc_run_vcpu(struct kvm_r while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && !signal_pending(current)) { - /* See if the HPT and VRMA are ready to go */ - if (!kvm_is_radix(vcpu->kvm) && - !vcpu->kvm->arch.hpte_setup_done) { ++ /* See if the MMU is ready to go */ ++ if (!vcpu->kvm->arch.mmu_ready) { + spin_unlock(&vc->lock); - r = kvmppc_hv_setup_htab_rma(vcpu); ++ mutex_lock(&vcpu->kvm->lock); ++ r = 0; ++ if (!vcpu->kvm->arch.mmu_ready) { ++ if (!kvm_is_radix(vcpu->kvm)) ++ r = kvmppc_hv_setup_htab_rma(vcpu); ++ if (!r) { ++ if (cpu_has_feature(CPU_FTR_ARCH_300)) ++ kvmppc_setup_partition_table(vcpu->kvm); ++ vcpu->kvm->arch.mmu_ready = 1; ++ } ++ } ++ mutex_unlock(&vcpu->kvm->lock); + spin_lock(&vc->lock); + if (r) { + kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; + kvm_run->fail_entry.hardware_entry_failure_reason = 0; + vcpu->arch.ret = r; + break; + } + } + if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) kvmppc_vcore_end_preempt(vc); @@@ -3288,29 -3262,10 +3314,11 @@@ static int kvmppc_vcpu_run_hv(struct kv return -EINTR; } - atomic_inc(&vcpu->kvm->arch.vcpus_running); - /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */ + kvm = vcpu->kvm; + atomic_inc(&kvm->arch.vcpus_running); + /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */ smp_mb(); - /* On the first time here, set up MMU if necessary */ - if (!vcpu->kvm->arch.mmu_ready) { - mutex_lock(&kvm->lock); - r = 0; - if (!kvm->arch.mmu_ready) { - if (!kvm_is_radix(vcpu->kvm)) - r = kvmppc_hv_setup_htab_rma(vcpu); - if (!r) { - if (cpu_has_feature(CPU_FTR_ARCH_300)) - kvmppc_setup_partition_table(kvm); - kvm->arch.mmu_ready = 1; - } - } - mutex_unlock(&kvm->lock); - if (r) - goto out; - } - flush_all_to_thread(current); /* Save userspace EBB and other register values */ diff --cc arch/powerpc/kvm/powerpc.c index a0b7f094de78,ee279c7f4802..6b6c53c42ac9 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@@ -643,8 -644,7 +643,8 @@@ int kvm_vm_ioctl_check_extension(struc break; #endif case KVM_CAP_PPC_HTM: - r = is_kvmppc_hv_enabled(kvm) && - r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled; ++ r = hv_enabled && + (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP); break; default: r = 0;