2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/vmalloc.h>
16 #include <asm/mmu_context.h>
17 #include <asm/pgalloc.h>
19 #include "interrupt.h"
21 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
24 gva_t kseg = KSEGX(gva);
26 if ((kseg == CKSEG0) || (kseg == CKSEG1))
29 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
30 kvm_mips_dump_host_tlbs();
31 gpa = KVM_INVALID_ADDR;
34 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
39 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
41 struct mips_coproc *cop0 = vcpu->arch.cop0;
42 struct kvm_run *run = vcpu->run;
43 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
44 u32 cause = vcpu->arch.host_cp0_cause;
45 enum emulation_result er = EMULATE_DONE;
46 int ret = RESUME_GUEST;
48 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
50 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
51 (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
53 * Unusable/no FPU in guest:
54 * deliver guest COP1 Unusable Exception
56 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
58 /* Restore FPU state */
63 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
72 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
77 run->exit_reason = KVM_EXIT_INTR;
87 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
89 struct kvm_run *run = vcpu->run;
90 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
91 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
92 u32 cause = vcpu->arch.host_cp0_cause;
93 enum emulation_result er = EMULATE_DONE;
94 int ret = RESUME_GUEST;
96 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
97 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
98 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
99 cause, opc, badvaddr);
100 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
102 if (er == EMULATE_DONE)
105 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
108 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
110 * XXXKYMA: The guest kernel does not expect to get this fault
111 * when we are not using HIGHMEM. Need to address this in a
114 kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n",
115 cause, opc, badvaddr);
116 kvm_mips_dump_host_tlbs();
117 kvm_arch_vcpu_dump_regs(vcpu);
118 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
121 kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
122 cause, opc, badvaddr);
123 kvm_mips_dump_host_tlbs();
124 kvm_arch_vcpu_dump_regs(vcpu);
125 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
131 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
133 struct kvm_run *run = vcpu->run;
134 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
135 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
136 u32 cause = vcpu->arch.host_cp0_cause;
137 enum emulation_result er = EMULATE_DONE;
138 int ret = RESUME_GUEST;
140 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
141 && KVM_GUEST_KERNEL_MODE(vcpu)) {
142 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
143 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
146 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
147 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
148 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
149 store ? "ST" : "LD", cause, opc, badvaddr);
152 * User Address (UA) fault, this could happen if
153 * (1) TLB entry not present/valid in both Guest and shadow host
154 * TLBs, in this case we pass on the fault to the guest
155 * kernel and let it handle it.
156 * (2) TLB entry is present in the Guest TLB but not in the
157 * shadow, in this case we inject the TLB from the Guest TLB
158 * into the shadow host TLB
161 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
162 if (er == EMULATE_DONE)
165 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
168 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
170 * All KSEG0 faults are handled by KVM, as the guest kernel does
171 * not expect to ever get them
173 if (kvm_mips_handle_kseg0_tlb_fault
174 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
175 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
178 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
179 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
181 * With EVA we may get a TLB exception instead of an address
182 * error when the guest performs MMIO to KSeg1 addresses.
184 kvm_debug("Emulate %s MMIO space\n",
185 store ? "Store to" : "Load from");
186 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
187 if (er == EMULATE_FAIL) {
188 kvm_err("Emulate %s MMIO space failed\n",
189 store ? "Store to" : "Load from");
190 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
193 run->exit_reason = KVM_EXIT_MMIO;
197 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
198 store ? "ST" : "LD", cause, opc, badvaddr);
199 kvm_mips_dump_host_tlbs();
200 kvm_arch_vcpu_dump_regs(vcpu);
201 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
207 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
209 return kvm_trap_emul_handle_tlb_miss(vcpu, true);
212 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
214 return kvm_trap_emul_handle_tlb_miss(vcpu, false);
217 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
219 struct kvm_run *run = vcpu->run;
220 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
221 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
222 u32 cause = vcpu->arch.host_cp0_cause;
223 enum emulation_result er = EMULATE_DONE;
224 int ret = RESUME_GUEST;
226 if (KVM_GUEST_KERNEL_MODE(vcpu)
227 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
228 kvm_debug("Emulate Store to MMIO space\n");
229 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
230 if (er == EMULATE_FAIL) {
231 kvm_err("Emulate Store to MMIO space failed\n");
232 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
235 run->exit_reason = KVM_EXIT_MMIO;
239 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
240 cause, opc, badvaddr);
241 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
247 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
249 struct kvm_run *run = vcpu->run;
250 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
251 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
252 u32 cause = vcpu->arch.host_cp0_cause;
253 enum emulation_result er = EMULATE_DONE;
254 int ret = RESUME_GUEST;
256 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
257 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
258 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
259 if (er == EMULATE_FAIL) {
260 kvm_err("Emulate Load from MMIO space failed\n");
261 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
264 run->exit_reason = KVM_EXIT_MMIO;
268 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
269 cause, opc, badvaddr);
270 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
277 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
279 struct kvm_run *run = vcpu->run;
280 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
281 u32 cause = vcpu->arch.host_cp0_cause;
282 enum emulation_result er = EMULATE_DONE;
283 int ret = RESUME_GUEST;
285 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
286 if (er == EMULATE_DONE)
289 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
295 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
297 struct kvm_run *run = vcpu->run;
298 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
299 u32 cause = vcpu->arch.host_cp0_cause;
300 enum emulation_result er = EMULATE_DONE;
301 int ret = RESUME_GUEST;
303 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
304 if (er == EMULATE_DONE)
307 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
313 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
315 struct kvm_run *run = vcpu->run;
316 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
317 u32 cause = vcpu->arch.host_cp0_cause;
318 enum emulation_result er = EMULATE_DONE;
319 int ret = RESUME_GUEST;
321 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
322 if (er == EMULATE_DONE)
325 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
331 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
333 struct kvm_run *run = vcpu->run;
334 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
335 u32 cause = vcpu->arch.host_cp0_cause;
336 enum emulation_result er = EMULATE_DONE;
337 int ret = RESUME_GUEST;
339 er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
340 if (er == EMULATE_DONE) {
343 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
349 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
351 struct kvm_run *run = vcpu->run;
352 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
353 u32 cause = vcpu->arch.host_cp0_cause;
354 enum emulation_result er = EMULATE_DONE;
355 int ret = RESUME_GUEST;
357 er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
358 if (er == EMULATE_DONE) {
361 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
367 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
369 struct kvm_run *run = vcpu->run;
370 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
371 u32 cause = vcpu->arch.host_cp0_cause;
372 enum emulation_result er = EMULATE_DONE;
373 int ret = RESUME_GUEST;
375 er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
376 if (er == EMULATE_DONE) {
379 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
386 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
387 * @vcpu: Virtual CPU context.
389 * Handle when the guest attempts to use MSA when it is disabled.
391 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
393 struct mips_coproc *cop0 = vcpu->arch.cop0;
394 struct kvm_run *run = vcpu->run;
395 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
396 u32 cause = vcpu->arch.host_cp0_cause;
397 enum emulation_result er = EMULATE_DONE;
398 int ret = RESUME_GUEST;
400 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
401 (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
403 * No MSA in guest, or FPU enabled and not in FR=1 mode,
404 * guest reserved instruction exception
406 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
407 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
408 /* MSA disabled by guest, guest MSA disabled exception */
409 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
411 /* Restore MSA/FPU state */
422 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
432 static int kvm_trap_emul_vm_init(struct kvm *kvm)
437 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
439 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
440 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
442 vcpu->arch.kscratch_enabled = 0xfc;
445 * Allocate GVA -> HPA page tables.
446 * MIPS doesn't use the mm_struct pointer argument.
448 kern_mm->pgd = pgd_alloc(kern_mm);
452 user_mm->pgd = pgd_alloc(user_mm);
454 pgd_free(kern_mm, kern_mm->pgd);
461 static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
463 /* Don't free host kernel page tables copied from init_mm.pgd */
464 const unsigned long end = 0x80000000;
465 unsigned long pgd_va, pud_va, pmd_va;
471 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
472 if (pgd_none(pgd[i]))
475 pgd_va = (unsigned long)i << PGDIR_SHIFT;
478 pud = pud_offset(pgd + i, 0);
479 for (j = 0; j < PTRS_PER_PUD; j++) {
480 if (pud_none(pud[j]))
483 pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
486 pmd = pmd_offset(pud + j, 0);
487 for (k = 0; k < PTRS_PER_PMD; k++) {
488 if (pmd_none(pmd[k]))
491 pmd_va = pud_va | (k << PMD_SHIFT);
494 pte = pte_offset(pmd + k, 0);
495 pte_free_kernel(NULL, pte);
504 static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
506 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
507 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
510 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
512 struct mips_coproc *cop0 = vcpu->arch.cop0;
514 int vcpu_id = vcpu->vcpu_id;
517 * Arch specific stuff, set up config registers properly so that the
518 * guest will come up as expected
520 #ifndef CONFIG_CPU_MIPSR6
521 /* r2-r5, simulate a MIPS 24kc */
522 kvm_write_c0_guest_prid(cop0, 0x00019300);
524 /* r6+, simulate a generic QEMU machine */
525 kvm_write_c0_guest_prid(cop0, 0x00010000);
528 * Have config1, Cacheable, noncoherent, write-back, write allocate.
529 * Endianness, arch revision & virtually tagged icache should match
532 config = read_c0_config() & MIPS_CONF_AR;
533 config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
534 #ifdef CONFIG_CPU_BIG_ENDIAN
537 if (cpu_has_vtag_icache)
538 config |= MIPS_CONF_VI;
539 kvm_write_c0_guest_config(cop0, config);
541 /* Read the cache characteristics from the host Config1 Register */
542 config1 = (read_c0_config1() & ~0x7f);
544 /* Set up MMU size */
545 config1 &= ~(0x3f << 25);
546 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
548 /* We unset some bits that we aren't emulating */
549 config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
550 MIPS_CONF1_WR | MIPS_CONF1_CA);
551 kvm_write_c0_guest_config1(cop0, config1);
553 /* Have config3, no tertiary/secondary caches implemented */
554 kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
555 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
557 /* Have config4, UserLocal */
558 kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
561 kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
564 kvm_write_c0_guest_config5(cop0, 0);
566 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
567 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
570 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
572 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
574 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
575 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
576 (vcpu_id & MIPS_EBASE_CPUNUM));
581 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
586 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
592 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
593 const struct kvm_one_reg *reg,
597 case KVM_REG_MIPS_CP0_COUNT:
598 *v = kvm_mips_read_count(vcpu);
600 case KVM_REG_MIPS_COUNT_CTL:
601 *v = vcpu->arch.count_ctl;
603 case KVM_REG_MIPS_COUNT_RESUME:
604 *v = ktime_to_ns(vcpu->arch.count_resume);
606 case KVM_REG_MIPS_COUNT_HZ:
607 *v = vcpu->arch.count_hz;
615 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
616 const struct kvm_one_reg *reg,
619 struct mips_coproc *cop0 = vcpu->arch.cop0;
621 unsigned int cur, change;
624 case KVM_REG_MIPS_CP0_COUNT:
625 kvm_mips_write_count(vcpu, v);
627 case KVM_REG_MIPS_CP0_COMPARE:
628 kvm_mips_write_compare(vcpu, v, false);
630 case KVM_REG_MIPS_CP0_CAUSE:
632 * If the timer is stopped or started (DC bit) it must look
633 * atomic with changes to the interrupt pending bits (TI, IRQ5).
634 * A timer interrupt should not happen in between.
636 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
638 /* disable timer first */
639 kvm_mips_count_disable_cause(vcpu);
640 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
642 /* enable timer last */
643 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
644 kvm_mips_count_enable_cause(vcpu);
647 kvm_write_c0_guest_cause(cop0, v);
650 case KVM_REG_MIPS_CP0_CONFIG:
651 /* read-only for now */
653 case KVM_REG_MIPS_CP0_CONFIG1:
654 cur = kvm_read_c0_guest_config1(cop0);
655 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
658 kvm_write_c0_guest_config1(cop0, v);
661 case KVM_REG_MIPS_CP0_CONFIG2:
662 /* read-only for now */
664 case KVM_REG_MIPS_CP0_CONFIG3:
665 cur = kvm_read_c0_guest_config3(cop0);
666 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
669 kvm_write_c0_guest_config3(cop0, v);
672 case KVM_REG_MIPS_CP0_CONFIG4:
673 cur = kvm_read_c0_guest_config4(cop0);
674 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
677 kvm_write_c0_guest_config4(cop0, v);
680 case KVM_REG_MIPS_CP0_CONFIG5:
681 cur = kvm_read_c0_guest_config5(cop0);
682 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
685 kvm_write_c0_guest_config5(cop0, v);
688 case KVM_REG_MIPS_COUNT_CTL:
689 ret = kvm_mips_set_count_ctl(vcpu, v);
691 case KVM_REG_MIPS_COUNT_RESUME:
692 ret = kvm_mips_set_count_resume(vcpu, v);
694 case KVM_REG_MIPS_COUNT_HZ:
695 ret = kvm_mips_set_count_hz(vcpu, v);
703 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
705 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
706 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
707 struct mm_struct *mm;
709 /* Allocate new kernel and user ASIDs if needed */
711 if ((cpu_context(cpu, kern_mm) ^ asid_cache(cpu)) &
712 asid_version_mask(cpu)) {
713 kvm_get_new_mmu_context(kern_mm, cpu, vcpu);
715 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
716 cpu_context(cpu, current->mm));
717 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#lx\n",
718 cpu, cpu_context(cpu, kern_mm));
721 if ((cpu_context(cpu, user_mm) ^ asid_cache(cpu)) &
722 asid_version_mask(cpu)) {
723 kvm_get_new_mmu_context(user_mm, cpu, vcpu);
725 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
726 cpu_context(cpu, current->mm));
727 kvm_debug("[%d]: Allocated new ASID for Guest User: %#lx\n",
728 cpu, cpu_context(cpu, user_mm));
732 * Were we in guest context? If so then the pre-empted ASID is
733 * no longer valid, we need to set it to what it should be based
734 * on the mode of the Guest (Kernel/User)
736 if (current->flags & PF_VCPU) {
737 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
738 write_c0_entryhi(cpu_asid(cpu, mm));
739 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
740 kvm_mips_suspend_mm(cpu);
747 static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
751 if (current->flags & PF_VCPU) {
752 /* Restore normal Linux process memory map */
753 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
754 asid_version_mask(cpu))) {
755 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
756 cpu_context(cpu, current->mm));
757 get_new_mmu_context(current->mm, cpu);
759 write_c0_entryhi(cpu_asid(cpu, current->mm));
760 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
761 kvm_mips_resume_mm(cpu);
768 static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
769 struct kvm_vcpu *vcpu)
771 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
772 struct mips_coproc *cop0 = vcpu->arch.cop0;
773 int i, cpu = smp_processor_id();
777 * Lazy host ASID regeneration for guest user mode.
778 * If the guest ASID has changed since the last guest usermode
779 * execution, regenerate the host ASID so as to invalidate stale TLB
782 if (!KVM_GUEST_KERNEL_MODE(vcpu)) {
783 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
784 if (gasid != vcpu->arch.last_user_gasid) {
785 kvm_get_new_mmu_context(user_mm, cpu, vcpu);
786 for_each_possible_cpu(i)
788 cpu_context(i, user_mm) = 0;
789 vcpu->arch.last_user_gasid = gasid;
794 static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
796 int cpu = smp_processor_id();
799 /* Check if we have any exceptions/interrupts pending */
800 kvm_mips_deliver_interrupts(vcpu,
801 kvm_read_c0_guest_cause(vcpu->arch.cop0));
803 kvm_trap_emul_vcpu_reenter(run, vcpu);
805 /* Disable hardware page table walking while in guest */
809 * While in guest context we're in the guest's address space, not the
810 * host process address space, so we need to be careful not to confuse
811 * e.g. cache management IPIs.
813 kvm_mips_suspend_mm(cpu);
815 r = vcpu->arch.vcpu_run(run, vcpu);
817 /* We may have migrated while handling guest exits */
818 cpu = smp_processor_id();
820 /* Restore normal Linux process memory map */
821 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
822 asid_version_mask(cpu)))
823 get_new_mmu_context(current->mm, cpu);
824 write_c0_entryhi(cpu_asid(cpu, current->mm));
825 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
826 kvm_mips_resume_mm(cpu);
833 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
835 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
836 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
837 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
838 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
839 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
840 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
841 .handle_syscall = kvm_trap_emul_handle_syscall,
842 .handle_res_inst = kvm_trap_emul_handle_res_inst,
843 .handle_break = kvm_trap_emul_handle_break,
844 .handle_trap = kvm_trap_emul_handle_trap,
845 .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
846 .handle_fpe = kvm_trap_emul_handle_fpe,
847 .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
849 .vm_init = kvm_trap_emul_vm_init,
850 .vcpu_init = kvm_trap_emul_vcpu_init,
851 .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
852 .vcpu_setup = kvm_trap_emul_vcpu_setup,
853 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
854 .queue_timer_int = kvm_mips_queue_timer_int_cb,
855 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
856 .queue_io_int = kvm_mips_queue_io_int_cb,
857 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
858 .irq_deliver = kvm_mips_irq_deliver_cb,
859 .irq_clear = kvm_mips_irq_clear_cb,
860 .num_regs = kvm_trap_emul_num_regs,
861 .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
862 .get_one_reg = kvm_trap_emul_get_one_reg,
863 .set_one_reg = kvm_trap_emul_set_one_reg,
864 .vcpu_load = kvm_trap_emul_vcpu_load,
865 .vcpu_put = kvm_trap_emul_vcpu_put,
866 .vcpu_run = kvm_trap_emul_vcpu_run,
867 .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
870 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
872 *install_callbacks = &kvm_trap_emul_callbacks;