]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: MIPS: Remove duplicated ASIDs from vcpu
authorJames Hogan <james.hogan@imgtec.com>
Tue, 11 Oct 2016 22:14:39 +0000 (23:14 +0100)
committerJames Hogan <james.hogan@imgtec.com>
Fri, 3 Feb 2017 15:20:45 +0000 (15:20 +0000)
The kvm_vcpu_arch structure contains both mm_structs for allocating MMU
contexts (primarily the ASID) but it also copies the resulting ASIDs
into guest_{user,kernel}_asid[] arrays which are referenced from uasm
generated code.

This duplication doesn't seem to serve any purpose, and it gets in the
way of generalising the ASID handling across guest kernel/user modes, so
lets just extract the ASID straight out of the mm_struct on demand, and
in fact there are convenient cpu_context() and cpu_asid() macros for
doing so.

To reduce the verbosity of this code we do also add kern_mm and user_mm
local variables where the kernel and user mm_structs are used.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
arch/mips/include/asm/kvm_host.h
arch/mips/kvm/emulate.c
arch/mips/kvm/entry.c
arch/mips/kvm/mips.c
arch/mips/kvm/mmu.c
arch/mips/kvm/tlb.c
arch/mips/kvm/trap_emul.c

index 1c70b5224151c742122e05c5eec7a0a043c23b6a..923f81dc61158e31c56f811771c87ee04d70f6e4 100644 (file)
@@ -321,9 +321,7 @@ struct kvm_vcpu_arch {
        /* S/W Based TLB for guest */
        struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
 
-       /* Cached guest kernel/user ASIDs */
-       u32 guest_user_asid[NR_CPUS];
-       u32 guest_kernel_asid[NR_CPUS];
+       /* Guest kernel/user [partial] mm */
        struct mm_struct guest_kernel_mm, guest_user_mm;
 
        /* Guest ASID of last user mode execution */
index aa0937423e287b06e007b2251977ff10e23c63b2..060acc5b3378ef74e151296e3e5eb8222b78833c 100644 (file)
@@ -856,6 +856,8 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
                                          struct kvm_mips_tlb *tlb)
 {
+       struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+       struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
        int cpu, i;
        bool user;
 
@@ -879,8 +881,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
                if (i == cpu)
                        continue;
                if (user)
-                       vcpu->arch.guest_user_asid[i] = 0;
-               vcpu->arch.guest_kernel_asid[i] = 0;
+                       cpu_context(i, user_mm) = 0;
+               cpu_context(i, kern_mm) = 0;
        }
 
        preempt_enable();
@@ -1056,6 +1058,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
                                           struct kvm_vcpu *vcpu)
 {
        struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
        enum emulation_result er = EMULATE_DONE;
        u32 rt, rd, sel;
        unsigned long curr_pc;
@@ -1178,13 +1181,11 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
                                         */
                                        preempt_disable();
                                        cpu = smp_processor_id();
-                                       kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm,
+                                       kvm_get_new_mmu_context(kern_mm,
                                                                cpu, vcpu);
-                                       vcpu->arch.guest_kernel_asid[cpu] =
-                                               vcpu->arch.guest_kernel_mm.context.asid[cpu];
                                        for_each_possible_cpu(i)
                                                if (i != cpu)
-                                                       vcpu->arch.guest_kernel_asid[i] = 0;
+                                                       cpu_context(i, kern_mm) = 0;
                                        preempt_enable();
                                }
                                kvm_write_c0_guest_entryhi(cop0,
index e92fb190e2d628e878b209a14e8c8be551510654..f81888704caa0546cc4d158b6ef8d769fe22c052 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/kvm_host.h>
+#include <linux/log2.h>
 #include <asm/msa.h>
 #include <asm/setup.h>
 #include <asm/uasm.h>
@@ -286,23 +287,26 @@ static void *kvm_mips_build_enter_guest(void *addr)
        uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
        uasm_i_xori(&p, T0, T0, KSU_USER);
        uasm_il_bnez(&p, &r, T0, label_kernel_asid);
-        UASM_i_ADDIU(&p, T1, K1,
-                     offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
+        UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
+                                          guest_kernel_mm.context.asid));
        /* else user */
-       UASM_i_ADDIU(&p, T1, K1,
-                    offsetof(struct kvm_vcpu_arch, guest_user_asid));
+       UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
+                                         guest_user_mm.context.asid));
        uasm_l_kernel_asid(&l, p);
 
        /* t1: contains the base of the ASID array, need to get the cpu id  */
        /* smp_processor_id */
        uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
-       /* x4 */
-       uasm_i_sll(&p, T2, T2, 2);
+       /* index the ASID array */
+       uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
        UASM_i_ADDU(&p, T3, T1, T2);
-       uasm_i_lw(&p, K0, 0, T3);
+       UASM_i_LW(&p, K0, 0, T3);
 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
-       /* x sizeof(struct cpuinfo_mips)/4 */
-       uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
+       /*
+        * reuse ASID array offset
+        * cpuinfo_mips is a multiple of sizeof(long)
+        */
+       uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
        uasm_i_mul(&p, T2, T2, T3);
 
        UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
index f9e305f7ad7165557a5293c218241fc5b080f332..85bc54f35695ec1a3cf99e018ec252a35498275d 100644 (file)
@@ -413,6 +413,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 /* Must be called with preemption disabled, just before entering guest */
 static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
 {
+       struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
        struct mips_coproc *cop0 = vcpu->arch.cop0;
        int i, cpu = smp_processor_id();
        unsigned int gasid;
@@ -426,13 +427,10 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
        if (!KVM_GUEST_KERNEL_MODE(vcpu)) {
                gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
                if (gasid != vcpu->arch.last_user_gasid) {
-                       kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu,
-                                               vcpu);
-                       vcpu->arch.guest_user_asid[cpu] =
-                               vcpu->arch.guest_user_mm.context.asid[cpu];
+                       kvm_get_new_mmu_context(user_mm, cpu, vcpu);
                        for_each_possible_cpu(i)
                                if (i != cpu)
-                                       vcpu->arch.guest_user_asid[cpu] = 0;
+                                       cpu_context(i, user_mm) = 0;
                        vcpu->arch.last_user_gasid = gasid;
                }
        }
index df013538113f677ba670c35fbefd381b59f5a3e4..27d6d0dbfeb43f42f6290dadd7a7461ba4b14b24 100644 (file)
 
 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
 {
+       struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
        int cpu = smp_processor_id();
 
-       return vcpu->arch.guest_kernel_asid[cpu] &
-                       cpu_asid_mask(&cpu_data[cpu]);
+       return cpu_asid(cpu, kern_mm);
 }
 
 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
 {
+       struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
        int cpu = smp_processor_id();
 
-       return vcpu->arch.guest_user_asid[cpu] &
-                       cpu_asid_mask(&cpu_data[cpu]);
+       return cpu_asid(cpu, user_mm);
 }
 
 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
index 254377d8e0b9921ff56dab3c2ef17d0996d95c57..ba490130b5e7aa9e44c3b486bb624b982fffaae1 100644 (file)
@@ -38,18 +38,18 @@ EXPORT_SYMBOL_GPL(kvm_mips_instance);
 
 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
 {
+       struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
        int cpu = smp_processor_id();
 
-       return vcpu->arch.guest_kernel_asid[cpu] &
-                       cpu_asid_mask(&cpu_data[cpu]);
+       return cpu_asid(cpu, kern_mm);
 }
 
 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
 {
+       struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
        int cpu = smp_processor_id();
 
-       return vcpu->arch.guest_user_asid[cpu] &
-                       cpu_asid_mask(&cpu_data[cpu]);
+       return cpu_asid(cpu, user_mm);
 }
 
 inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
index 494a90221b5e4b190965f675f4b3d7fa94beede7..c7854d32fd64e02c0225b682d6d84ca9055e4879 100644 (file)
@@ -635,32 +635,29 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
 
 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
-       unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
+       struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
+       struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
 
        /* Allocate new kernel and user ASIDs if needed */
 
-       if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
+       if ((cpu_context(cpu, kern_mm) ^ asid_cache(cpu)) &
                                                asid_version_mask(cpu)) {
-               kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
-               vcpu->arch.guest_kernel_asid[cpu] =
-                   vcpu->arch.guest_kernel_mm.context.asid[cpu];
+               kvm_get_new_mmu_context(kern_mm, cpu, vcpu);
 
                kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
                          cpu_context(cpu, current->mm));
-               kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
-                         cpu, vcpu->arch.guest_kernel_asid[cpu]);
+               kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#lx\n",
+                         cpu, cpu_context(cpu, kern_mm));
        }
 
-       if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
+       if ((cpu_context(cpu, user_mm) ^ asid_cache(cpu)) &
                                                asid_version_mask(cpu)) {
-               kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
-               vcpu->arch.guest_user_asid[cpu] =
-                   vcpu->arch.guest_user_mm.context.asid[cpu];
+               kvm_get_new_mmu_context(user_mm, cpu, vcpu);
 
                kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
                          cpu_context(cpu, current->mm));
-               kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
-                         vcpu->arch.guest_user_asid[cpu]);
+               kvm_debug("[%d]: Allocated new ASID for Guest User: %#lx\n",
+                         cpu, cpu_context(cpu, user_mm));
        }
 
        /*
@@ -670,11 +667,9 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
         */
        if (current->flags & PF_VCPU) {
                if (KVM_GUEST_KERNEL_MODE(vcpu))
-                       write_c0_entryhi(vcpu->arch.guest_kernel_asid[cpu] &
-                                        asid_mask);
+                       write_c0_entryhi(cpu_asid(cpu, kern_mm));
                else
-                       write_c0_entryhi(vcpu->arch.guest_user_asid[cpu] &
-                                        asid_mask);
+                       write_c0_entryhi(cpu_asid(cpu, user_mm));
                ehb();
        }