2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <asm/mmu_context.h>
18 #include <asm/pgalloc.h>
20 #include "interrupt.h"
22 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
25 gva_t kseg = KSEGX(gva);
27 if ((kseg == CKSEG0) || (kseg == CKSEG1))
30 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31 kvm_mips_dump_host_tlbs();
32 gpa = KVM_INVALID_ADDR;
35 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
40 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
42 struct mips_coproc *cop0 = vcpu->arch.cop0;
43 struct kvm_run *run = vcpu->run;
44 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
45 u32 cause = vcpu->arch.host_cp0_cause;
46 enum emulation_result er = EMULATE_DONE;
47 int ret = RESUME_GUEST;
49 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
51 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
52 (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
54 * Unusable/no FPU in guest:
55 * deliver guest COP1 Unusable Exception
57 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
59 /* Restore FPU state */
64 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
73 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
78 run->exit_reason = KVM_EXIT_INTR;
88 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
90 struct kvm_run *run = vcpu->run;
91 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
92 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
93 u32 cause = vcpu->arch.host_cp0_cause;
94 enum emulation_result er = EMULATE_DONE;
95 int ret = RESUME_GUEST;
97 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
98 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
99 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
100 cause, opc, badvaddr);
101 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
103 if (er == EMULATE_DONE)
106 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
109 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
111 * XXXKYMA: The guest kernel does not expect to get this fault
112 * when we are not using HIGHMEM. Need to address this in a
115 kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n",
116 cause, opc, badvaddr);
117 kvm_mips_dump_host_tlbs();
118 kvm_arch_vcpu_dump_regs(vcpu);
119 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
122 kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
123 cause, opc, badvaddr);
124 kvm_mips_dump_host_tlbs();
125 kvm_arch_vcpu_dump_regs(vcpu);
126 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
132 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
134 struct kvm_run *run = vcpu->run;
135 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
136 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
137 u32 cause = vcpu->arch.host_cp0_cause;
138 enum emulation_result er = EMULATE_DONE;
139 int ret = RESUME_GUEST;
141 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
142 && KVM_GUEST_KERNEL_MODE(vcpu)) {
143 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
144 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
147 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
148 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
149 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
150 store ? "ST" : "LD", cause, opc, badvaddr);
153 * User Address (UA) fault, this could happen if
154 * (1) TLB entry not present/valid in both Guest and shadow host
155 * TLBs, in this case we pass on the fault to the guest
156 * kernel and let it handle it.
157 * (2) TLB entry is present in the Guest TLB but not in the
158 * shadow, in this case we inject the TLB from the Guest TLB
159 * into the shadow host TLB
162 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
163 if (er == EMULATE_DONE)
166 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
169 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
171 * All KSEG0 faults are handled by KVM, as the guest kernel does
172 * not expect to ever get them
174 if (kvm_mips_handle_kseg0_tlb_fault
175 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
176 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
179 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
180 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
181 /* A code fetch fault doesn't count as an MMIO */
182 if (!store && kvm_is_ifetch_fault(&vcpu->arch)) {
183 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
188 * With EVA we may get a TLB exception instead of an address
189 * error when the guest performs MMIO to KSeg1 addresses.
191 kvm_debug("Emulate %s MMIO space\n",
192 store ? "Store to" : "Load from");
193 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
194 if (er == EMULATE_FAIL) {
195 kvm_err("Emulate %s MMIO space failed\n",
196 store ? "Store to" : "Load from");
197 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
200 run->exit_reason = KVM_EXIT_MMIO;
204 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
205 store ? "ST" : "LD", cause, opc, badvaddr);
206 kvm_mips_dump_host_tlbs();
207 kvm_arch_vcpu_dump_regs(vcpu);
208 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
214 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
216 return kvm_trap_emul_handle_tlb_miss(vcpu, true);
219 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
221 return kvm_trap_emul_handle_tlb_miss(vcpu, false);
224 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
226 struct kvm_run *run = vcpu->run;
227 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
228 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
229 u32 cause = vcpu->arch.host_cp0_cause;
230 enum emulation_result er = EMULATE_DONE;
231 int ret = RESUME_GUEST;
233 if (KVM_GUEST_KERNEL_MODE(vcpu)
234 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
235 kvm_debug("Emulate Store to MMIO space\n");
236 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
237 if (er == EMULATE_FAIL) {
238 kvm_err("Emulate Store to MMIO space failed\n");
239 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
242 run->exit_reason = KVM_EXIT_MMIO;
246 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
247 cause, opc, badvaddr);
248 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
254 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
256 struct kvm_run *run = vcpu->run;
257 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
258 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
259 u32 cause = vcpu->arch.host_cp0_cause;
260 enum emulation_result er = EMULATE_DONE;
261 int ret = RESUME_GUEST;
263 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
264 /* A code fetch fault doesn't count as an MMIO */
265 if (kvm_is_ifetch_fault(&vcpu->arch)) {
266 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
270 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
271 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
272 if (er == EMULATE_FAIL) {
273 kvm_err("Emulate Load from MMIO space failed\n");
274 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
277 run->exit_reason = KVM_EXIT_MMIO;
281 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
282 cause, opc, badvaddr);
283 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
290 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
292 struct kvm_run *run = vcpu->run;
293 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
294 u32 cause = vcpu->arch.host_cp0_cause;
295 enum emulation_result er = EMULATE_DONE;
296 int ret = RESUME_GUEST;
298 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
299 if (er == EMULATE_DONE)
302 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
308 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
310 struct kvm_run *run = vcpu->run;
311 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
312 u32 cause = vcpu->arch.host_cp0_cause;
313 enum emulation_result er = EMULATE_DONE;
314 int ret = RESUME_GUEST;
316 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
317 if (er == EMULATE_DONE)
320 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
326 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
328 struct kvm_run *run = vcpu->run;
329 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
330 u32 cause = vcpu->arch.host_cp0_cause;
331 enum emulation_result er = EMULATE_DONE;
332 int ret = RESUME_GUEST;
334 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
335 if (er == EMULATE_DONE)
338 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
344 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
346 struct kvm_run *run = vcpu->run;
347 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
348 u32 cause = vcpu->arch.host_cp0_cause;
349 enum emulation_result er = EMULATE_DONE;
350 int ret = RESUME_GUEST;
352 er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
353 if (er == EMULATE_DONE) {
356 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
362 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
364 struct kvm_run *run = vcpu->run;
365 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
366 u32 cause = vcpu->arch.host_cp0_cause;
367 enum emulation_result er = EMULATE_DONE;
368 int ret = RESUME_GUEST;
370 er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
371 if (er == EMULATE_DONE) {
374 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
380 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
382 struct kvm_run *run = vcpu->run;
383 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
384 u32 cause = vcpu->arch.host_cp0_cause;
385 enum emulation_result er = EMULATE_DONE;
386 int ret = RESUME_GUEST;
388 er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
389 if (er == EMULATE_DONE) {
392 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
399 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
400 * @vcpu: Virtual CPU context.
402 * Handle when the guest attempts to use MSA when it is disabled.
404 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
406 struct mips_coproc *cop0 = vcpu->arch.cop0;
407 struct kvm_run *run = vcpu->run;
408 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
409 u32 cause = vcpu->arch.host_cp0_cause;
410 enum emulation_result er = EMULATE_DONE;
411 int ret = RESUME_GUEST;
413 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
414 (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
416 * No MSA in guest, or FPU enabled and not in FR=1 mode,
417 * guest reserved instruction exception
419 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
420 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
421 /* MSA disabled by guest, guest MSA disabled exception */
422 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
424 /* Restore MSA/FPU state */
435 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
445 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
447 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
448 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
450 vcpu->arch.kscratch_enabled = 0xfc;
453 * Allocate GVA -> HPA page tables.
454 * MIPS doesn't use the mm_struct pointer argument.
456 kern_mm->pgd = pgd_alloc(kern_mm);
460 user_mm->pgd = pgd_alloc(user_mm);
462 pgd_free(kern_mm, kern_mm->pgd);
469 static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
471 /* Don't free host kernel page tables copied from init_mm.pgd */
472 const unsigned long end = 0x80000000;
473 unsigned long pgd_va, pud_va, pmd_va;
479 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
480 if (pgd_none(pgd[i]))
483 pgd_va = (unsigned long)i << PGDIR_SHIFT;
486 pud = pud_offset(pgd + i, 0);
487 for (j = 0; j < PTRS_PER_PUD; j++) {
488 if (pud_none(pud[j]))
491 pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
494 pmd = pmd_offset(pud + j, 0);
495 for (k = 0; k < PTRS_PER_PMD; k++) {
496 if (pmd_none(pmd[k]))
499 pmd_va = pud_va | (k << PMD_SHIFT);
502 pte = pte_offset(pmd + k, 0);
503 pte_free_kernel(NULL, pte);
512 static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
514 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
515 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
518 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
520 struct mips_coproc *cop0 = vcpu->arch.cop0;
522 int vcpu_id = vcpu->vcpu_id;
525 * Arch specific stuff, set up config registers properly so that the
526 * guest will come up as expected
528 #ifndef CONFIG_CPU_MIPSR6
529 /* r2-r5, simulate a MIPS 24kc */
530 kvm_write_c0_guest_prid(cop0, 0x00019300);
532 /* r6+, simulate a generic QEMU machine */
533 kvm_write_c0_guest_prid(cop0, 0x00010000);
536 * Have config1, Cacheable, noncoherent, write-back, write allocate.
537 * Endianness, arch revision & virtually tagged icache should match
540 config = read_c0_config() & MIPS_CONF_AR;
541 config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
542 #ifdef CONFIG_CPU_BIG_ENDIAN
545 if (cpu_has_vtag_icache)
546 config |= MIPS_CONF_VI;
547 kvm_write_c0_guest_config(cop0, config);
549 /* Read the cache characteristics from the host Config1 Register */
550 config1 = (read_c0_config1() & ~0x7f);
552 /* Set up MMU size */
553 config1 &= ~(0x3f << 25);
554 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
556 /* We unset some bits that we aren't emulating */
557 config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
558 MIPS_CONF1_WR | MIPS_CONF1_CA);
559 kvm_write_c0_guest_config1(cop0, config1);
561 /* Have config3, no tertiary/secondary caches implemented */
562 kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
563 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
565 /* Have config4, UserLocal */
566 kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
569 kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
572 kvm_write_c0_guest_config5(cop0, 0);
574 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
575 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
578 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
580 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
582 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
583 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
584 (vcpu_id & MIPS_EBASE_CPUNUM));
589 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
594 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
600 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
601 const struct kvm_one_reg *reg,
605 case KVM_REG_MIPS_CP0_COUNT:
606 *v = kvm_mips_read_count(vcpu);
608 case KVM_REG_MIPS_COUNT_CTL:
609 *v = vcpu->arch.count_ctl;
611 case KVM_REG_MIPS_COUNT_RESUME:
612 *v = ktime_to_ns(vcpu->arch.count_resume);
614 case KVM_REG_MIPS_COUNT_HZ:
615 *v = vcpu->arch.count_hz;
623 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
624 const struct kvm_one_reg *reg,
627 struct mips_coproc *cop0 = vcpu->arch.cop0;
629 unsigned int cur, change;
632 case KVM_REG_MIPS_CP0_COUNT:
633 kvm_mips_write_count(vcpu, v);
635 case KVM_REG_MIPS_CP0_COMPARE:
636 kvm_mips_write_compare(vcpu, v, false);
638 case KVM_REG_MIPS_CP0_CAUSE:
640 * If the timer is stopped or started (DC bit) it must look
641 * atomic with changes to the interrupt pending bits (TI, IRQ5).
642 * A timer interrupt should not happen in between.
644 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
646 /* disable timer first */
647 kvm_mips_count_disable_cause(vcpu);
648 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
650 /* enable timer last */
651 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
652 kvm_mips_count_enable_cause(vcpu);
655 kvm_write_c0_guest_cause(cop0, v);
658 case KVM_REG_MIPS_CP0_CONFIG:
659 /* read-only for now */
661 case KVM_REG_MIPS_CP0_CONFIG1:
662 cur = kvm_read_c0_guest_config1(cop0);
663 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
666 kvm_write_c0_guest_config1(cop0, v);
669 case KVM_REG_MIPS_CP0_CONFIG2:
670 /* read-only for now */
672 case KVM_REG_MIPS_CP0_CONFIG3:
673 cur = kvm_read_c0_guest_config3(cop0);
674 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
677 kvm_write_c0_guest_config3(cop0, v);
680 case KVM_REG_MIPS_CP0_CONFIG4:
681 cur = kvm_read_c0_guest_config4(cop0);
682 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
685 kvm_write_c0_guest_config4(cop0, v);
688 case KVM_REG_MIPS_CP0_CONFIG5:
689 cur = kvm_read_c0_guest_config5(cop0);
690 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
693 kvm_write_c0_guest_config5(cop0, v);
696 case KVM_REG_MIPS_COUNT_CTL:
697 ret = kvm_mips_set_count_ctl(vcpu, v);
699 case KVM_REG_MIPS_COUNT_RESUME:
700 ret = kvm_mips_set_count_resume(vcpu, v);
702 case KVM_REG_MIPS_COUNT_HZ:
703 ret = kvm_mips_set_count_hz(vcpu, v);
711 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
713 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
714 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
715 struct mm_struct *mm;
717 /* Allocate new kernel and user ASIDs if needed */
719 if ((cpu_context(cpu, kern_mm) ^ asid_cache(cpu)) &
720 asid_version_mask(cpu)) {
721 get_new_mmu_context(kern_mm, cpu);
723 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
724 cpu_context(cpu, current->mm));
725 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#lx\n",
726 cpu, cpu_context(cpu, kern_mm));
729 if ((cpu_context(cpu, user_mm) ^ asid_cache(cpu)) &
730 asid_version_mask(cpu)) {
731 get_new_mmu_context(user_mm, cpu);
733 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
734 cpu_context(cpu, current->mm));
735 kvm_debug("[%d]: Allocated new ASID for Guest User: %#lx\n",
736 cpu, cpu_context(cpu, user_mm));
740 * Were we in guest context? If so then the pre-empted ASID is
741 * no longer valid, we need to set it to what it should be based
742 * on the mode of the Guest (Kernel/User)
744 if (current->flags & PF_VCPU) {
745 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
746 write_c0_entryhi(cpu_asid(cpu, mm));
747 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
748 kvm_mips_suspend_mm(cpu);
755 static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
759 if (current->flags & PF_VCPU) {
760 /* Restore normal Linux process memory map */
761 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
762 asid_version_mask(cpu))) {
763 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
764 cpu_context(cpu, current->mm));
765 get_new_mmu_context(current->mm, cpu);
767 write_c0_entryhi(cpu_asid(cpu, current->mm));
768 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
769 kvm_mips_resume_mm(cpu);
776 static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
779 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
780 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
781 struct mm_struct *mm;
784 if (likely(!vcpu->requests))
787 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
789 * Both kernel & user GVA mappings must be invalidated. The
790 * caller is just about to check whether the ASID is stale
791 * anyway so no need to reload it here.
793 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
794 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
795 for_each_possible_cpu(i) {
796 cpu_context(i, kern_mm) = 0;
797 cpu_context(i, user_mm) = 0;
800 /* Generate new ASID for current mode */
802 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
803 get_new_mmu_context(mm, cpu);
805 write_c0_entryhi(cpu_asid(cpu, mm));
806 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
812 static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
813 struct kvm_vcpu *vcpu)
815 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
816 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
817 struct mm_struct *mm;
818 struct mips_coproc *cop0 = vcpu->arch.cop0;
819 int i, cpu = smp_processor_id();
823 * No need to reload ASID, IRQs are disabled already so there's no rush,
824 * and we'll check if we need to regenerate below anyway before
825 * re-entering the guest.
827 kvm_trap_emul_check_requests(vcpu, cpu, false);
829 if (KVM_GUEST_KERNEL_MODE(vcpu)) {
835 * Lazy host ASID regeneration / PT flush for guest user mode.
836 * If the guest ASID has changed since the last guest usermode
837 * execution, invalidate the stale TLB entries and flush GVA PT
840 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
841 if (gasid != vcpu->arch.last_user_gasid) {
842 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
843 for_each_possible_cpu(i)
844 cpu_context(i, user_mm) = 0;
845 vcpu->arch.last_user_gasid = gasid;
850 * Check if ASID is stale. This may happen due to a TLB flush request or
851 * a lazy user MM invalidation.
853 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
854 asid_version_mask(cpu))
855 get_new_mmu_context(mm, cpu);
858 static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
860 int cpu = smp_processor_id();
863 /* Check if we have any exceptions/interrupts pending */
864 kvm_mips_deliver_interrupts(vcpu,
865 kvm_read_c0_guest_cause(vcpu->arch.cop0));
867 kvm_trap_emul_vcpu_reenter(run, vcpu);
870 * We use user accessors to access guest memory, but we don't want to
871 * invoke Linux page faulting.
875 /* Disable hardware page table walking while in guest */
879 * While in guest context we're in the guest's address space, not the
880 * host process address space, so we need to be careful not to confuse
881 * e.g. cache management IPIs.
883 kvm_mips_suspend_mm(cpu);
885 r = vcpu->arch.vcpu_run(run, vcpu);
887 /* We may have migrated while handling guest exits */
888 cpu = smp_processor_id();
890 /* Restore normal Linux process memory map */
891 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
892 asid_version_mask(cpu)))
893 get_new_mmu_context(current->mm, cpu);
894 write_c0_entryhi(cpu_asid(cpu, current->mm));
895 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
896 kvm_mips_resume_mm(cpu);
905 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
907 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
908 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
909 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
910 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
911 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
912 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
913 .handle_syscall = kvm_trap_emul_handle_syscall,
914 .handle_res_inst = kvm_trap_emul_handle_res_inst,
915 .handle_break = kvm_trap_emul_handle_break,
916 .handle_trap = kvm_trap_emul_handle_trap,
917 .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
918 .handle_fpe = kvm_trap_emul_handle_fpe,
919 .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
921 .vcpu_init = kvm_trap_emul_vcpu_init,
922 .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
923 .vcpu_setup = kvm_trap_emul_vcpu_setup,
924 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
925 .queue_timer_int = kvm_mips_queue_timer_int_cb,
926 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
927 .queue_io_int = kvm_mips_queue_io_int_cb,
928 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
929 .irq_deliver = kvm_mips_irq_deliver_cb,
930 .irq_clear = kvm_mips_irq_clear_cb,
931 .num_regs = kvm_trap_emul_num_regs,
932 .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
933 .get_one_reg = kvm_trap_emul_get_one_reg,
934 .set_one_reg = kvm_trap_emul_set_one_reg,
935 .vcpu_load = kvm_trap_emul_vcpu_load,
936 .vcpu_put = kvm_trap_emul_vcpu_put,
937 .vcpu_run = kvm_trap_emul_vcpu_run,
938 .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
941 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
943 *install_callbacks = &kvm_trap_emul_callbacks;