]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/mips/kvm/trap_emul.c
08327de4323a1d8546a8b276afee10f83069df1c
[linux.git] / arch / mips / kvm / trap_emul.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <asm/mmu_context.h>
18 #include <asm/pgalloc.h>
19
20 #include "interrupt.h"
21
22 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23 {
24         gpa_t gpa;
25         gva_t kseg = KSEGX(gva);
26         gva_t gkseg = KVM_GUEST_KSEGX(gva);
27
28         if ((kseg == CKSEG0) || (kseg == CKSEG1))
29                 gpa = CPHYSADDR(gva);
30         else if (gkseg == KVM_GUEST_KSEG0)
31                 gpa = KVM_GUEST_CPHYSADDR(gva);
32         else {
33                 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
34                 kvm_mips_dump_host_tlbs();
35                 gpa = KVM_INVALID_ADDR;
36         }
37
38         kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
39
40         return gpa;
41 }
42
43 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
44 {
45         struct mips_coproc *cop0 = vcpu->arch.cop0;
46         struct kvm_run *run = vcpu->run;
47         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
48         u32 cause = vcpu->arch.host_cp0_cause;
49         enum emulation_result er = EMULATE_DONE;
50         int ret = RESUME_GUEST;
51
52         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
53                 /* FPU Unusable */
54                 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
55                     (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
56                         /*
57                          * Unusable/no FPU in guest:
58                          * deliver guest COP1 Unusable Exception
59                          */
60                         er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
61                 } else {
62                         /* Restore FPU state */
63                         kvm_own_fpu(vcpu);
64                         er = EMULATE_DONE;
65                 }
66         } else {
67                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
68         }
69
70         switch (er) {
71         case EMULATE_DONE:
72                 ret = RESUME_GUEST;
73                 break;
74
75         case EMULATE_FAIL:
76                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
77                 ret = RESUME_HOST;
78                 break;
79
80         case EMULATE_WAIT:
81                 run->exit_reason = KVM_EXIT_INTR;
82                 ret = RESUME_HOST;
83                 break;
84
85         default:
86                 BUG();
87         }
88         return ret;
89 }
90
91 static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
92                              struct kvm_vcpu *vcpu)
93 {
94         enum emulation_result er;
95         union mips_instruction inst;
96         int err;
97
98         /* A code fetch fault doesn't count as an MMIO */
99         if (kvm_is_ifetch_fault(&vcpu->arch)) {
100                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
101                 return RESUME_HOST;
102         }
103
104         /* Fetch the instruction. */
105         if (cause & CAUSEF_BD)
106                 opc += 1;
107         err = kvm_get_badinstr(opc, vcpu, &inst.word);
108         if (err) {
109                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
110                 return RESUME_HOST;
111         }
112
113         /* Emulate the load */
114         er = kvm_mips_emulate_load(inst, cause, run, vcpu);
115         if (er == EMULATE_FAIL) {
116                 kvm_err("Emulate load from MMIO space failed\n");
117                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
118         } else {
119                 run->exit_reason = KVM_EXIT_MMIO;
120         }
121         return RESUME_HOST;
122 }
123
124 static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
125                               struct kvm_vcpu *vcpu)
126 {
127         enum emulation_result er;
128         union mips_instruction inst;
129         int err;
130
131         /* Fetch the instruction. */
132         if (cause & CAUSEF_BD)
133                 opc += 1;
134         err = kvm_get_badinstr(opc, vcpu, &inst.word);
135         if (err) {
136                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
137                 return RESUME_HOST;
138         }
139
140         /* Emulate the store */
141         er = kvm_mips_emulate_store(inst, cause, run, vcpu);
142         if (er == EMULATE_FAIL) {
143                 kvm_err("Emulate store to MMIO space failed\n");
144                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
145         } else {
146                 run->exit_reason = KVM_EXIT_MMIO;
147         }
148         return RESUME_HOST;
149 }
150
151 static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
152                                struct kvm_vcpu *vcpu, bool store)
153 {
154         if (store)
155                 return kvm_mips_bad_store(cause, opc, run, vcpu);
156         else
157                 return kvm_mips_bad_load(cause, opc, run, vcpu);
158 }
159
160 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
161 {
162         struct mips_coproc *cop0 = vcpu->arch.cop0;
163         struct kvm_run *run = vcpu->run;
164         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
165         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
166         u32 cause = vcpu->arch.host_cp0_cause;
167         struct kvm_mips_tlb *tlb;
168         unsigned long entryhi;
169         int index;
170
171         if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
172             || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
173                 /*
174                  * First find the mapping in the guest TLB. If the failure to
175                  * write was due to the guest TLB, it should be up to the guest
176                  * to handle it.
177                  */
178                 entryhi = (badvaddr & VPN2_MASK) |
179                           (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
180                 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
181
182                 /*
183                  * These should never happen.
184                  * They would indicate stale host TLB entries.
185                  */
186                 if (unlikely(index < 0)) {
187                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
188                         return RESUME_HOST;
189                 }
190                 tlb = vcpu->arch.guest_tlb + index;
191                 if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
192                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
193                         return RESUME_HOST;
194                 }
195
196                 /*
197                  * Guest entry not dirty? That would explain the TLB modified
198                  * exception. Relay that on to the guest so it can handle it.
199                  */
200                 if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
201                         kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
202                         return RESUME_GUEST;
203                 }
204
205                 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
206                                                          true))
207                         /* Not writable, needs handling as MMIO */
208                         return kvm_mips_bad_store(cause, opc, run, vcpu);
209                 return RESUME_GUEST;
210         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
211                 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
212                         /* Not writable, needs handling as MMIO */
213                         return kvm_mips_bad_store(cause, opc, run, vcpu);
214                 return RESUME_GUEST;
215         } else {
216                 /* host kernel addresses are all handled as MMIO */
217                 return kvm_mips_bad_store(cause, opc, run, vcpu);
218         }
219 }
220
221 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
222 {
223         struct kvm_run *run = vcpu->run;
224         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
225         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
226         u32 cause = vcpu->arch.host_cp0_cause;
227         enum emulation_result er = EMULATE_DONE;
228         int ret = RESUME_GUEST;
229
230         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
231             && KVM_GUEST_KERNEL_MODE(vcpu)) {
232                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
233                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
234                         ret = RESUME_HOST;
235                 }
236         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
237                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
238                 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
239                           store ? "ST" : "LD", cause, opc, badvaddr);
240
241                 /*
242                  * User Address (UA) fault, this could happen if
243                  * (1) TLB entry not present/valid in both Guest and shadow host
244                  *     TLBs, in this case we pass on the fault to the guest
245                  *     kernel and let it handle it.
246                  * (2) TLB entry is present in the Guest TLB but not in the
247                  *     shadow, in this case we inject the TLB from the Guest TLB
248                  *     into the shadow host TLB
249                  */
250
251                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
252                 if (er == EMULATE_DONE)
253                         ret = RESUME_GUEST;
254                 else {
255                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
256                         ret = RESUME_HOST;
257                 }
258         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
259                 /*
260                  * All KSEG0 faults are handled by KVM, as the guest kernel does
261                  * not expect to ever get them
262                  */
263                 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
264                         ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
265         } else if (KVM_GUEST_KERNEL_MODE(vcpu)
266                    && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
267                 /*
268                  * With EVA we may get a TLB exception instead of an address
269                  * error when the guest performs MMIO to KSeg1 addresses.
270                  */
271                 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
272         } else {
273                 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
274                         store ? "ST" : "LD", cause, opc, badvaddr);
275                 kvm_mips_dump_host_tlbs();
276                 kvm_arch_vcpu_dump_regs(vcpu);
277                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278                 ret = RESUME_HOST;
279         }
280         return ret;
281 }
282
283 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
284 {
285         return kvm_trap_emul_handle_tlb_miss(vcpu, true);
286 }
287
288 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
289 {
290         return kvm_trap_emul_handle_tlb_miss(vcpu, false);
291 }
292
293 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
294 {
295         struct kvm_run *run = vcpu->run;
296         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
297         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
298         u32 cause = vcpu->arch.host_cp0_cause;
299         int ret = RESUME_GUEST;
300
301         if (KVM_GUEST_KERNEL_MODE(vcpu)
302             && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
303                 ret = kvm_mips_bad_store(cause, opc, run, vcpu);
304         } else {
305                 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
306                         cause, opc, badvaddr);
307                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
308                 ret = RESUME_HOST;
309         }
310         return ret;
311 }
312
313 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
314 {
315         struct kvm_run *run = vcpu->run;
316         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
317         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
318         u32 cause = vcpu->arch.host_cp0_cause;
319         int ret = RESUME_GUEST;
320
321         if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
322                 ret = kvm_mips_bad_load(cause, opc, run, vcpu);
323         } else {
324                 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
325                         cause, opc, badvaddr);
326                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
327                 ret = RESUME_HOST;
328         }
329         return ret;
330 }
331
332 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
333 {
334         struct kvm_run *run = vcpu->run;
335         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
336         u32 cause = vcpu->arch.host_cp0_cause;
337         enum emulation_result er = EMULATE_DONE;
338         int ret = RESUME_GUEST;
339
340         er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
341         if (er == EMULATE_DONE)
342                 ret = RESUME_GUEST;
343         else {
344                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
345                 ret = RESUME_HOST;
346         }
347         return ret;
348 }
349
350 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
351 {
352         struct kvm_run *run = vcpu->run;
353         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
354         u32 cause = vcpu->arch.host_cp0_cause;
355         enum emulation_result er = EMULATE_DONE;
356         int ret = RESUME_GUEST;
357
358         er = kvm_mips_handle_ri(cause, opc, run, vcpu);
359         if (er == EMULATE_DONE)
360                 ret = RESUME_GUEST;
361         else {
362                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
363                 ret = RESUME_HOST;
364         }
365         return ret;
366 }
367
368 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
369 {
370         struct kvm_run *run = vcpu->run;
371         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
372         u32 cause = vcpu->arch.host_cp0_cause;
373         enum emulation_result er = EMULATE_DONE;
374         int ret = RESUME_GUEST;
375
376         er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
377         if (er == EMULATE_DONE)
378                 ret = RESUME_GUEST;
379         else {
380                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
381                 ret = RESUME_HOST;
382         }
383         return ret;
384 }
385
386 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
387 {
388         struct kvm_run *run = vcpu->run;
389         u32 __user *opc = (u32 __user *)vcpu->arch.pc;
390         u32 cause = vcpu->arch.host_cp0_cause;
391         enum emulation_result er = EMULATE_DONE;
392         int ret = RESUME_GUEST;
393
394         er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
395         if (er == EMULATE_DONE) {
396                 ret = RESUME_GUEST;
397         } else {
398                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
399                 ret = RESUME_HOST;
400         }
401         return ret;
402 }
403
404 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
405 {
406         struct kvm_run *run = vcpu->run;
407         u32 __user *opc = (u32 __user *)vcpu->arch.pc;
408         u32 cause = vcpu->arch.host_cp0_cause;
409         enum emulation_result er = EMULATE_DONE;
410         int ret = RESUME_GUEST;
411
412         er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
413         if (er == EMULATE_DONE) {
414                 ret = RESUME_GUEST;
415         } else {
416                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
417                 ret = RESUME_HOST;
418         }
419         return ret;
420 }
421
422 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
423 {
424         struct kvm_run *run = vcpu->run;
425         u32 __user *opc = (u32 __user *)vcpu->arch.pc;
426         u32 cause = vcpu->arch.host_cp0_cause;
427         enum emulation_result er = EMULATE_DONE;
428         int ret = RESUME_GUEST;
429
430         er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
431         if (er == EMULATE_DONE) {
432                 ret = RESUME_GUEST;
433         } else {
434                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
435                 ret = RESUME_HOST;
436         }
437         return ret;
438 }
439
440 /**
441  * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
442  * @vcpu:       Virtual CPU context.
443  *
444  * Handle when the guest attempts to use MSA when it is disabled.
445  */
446 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
447 {
448         struct mips_coproc *cop0 = vcpu->arch.cop0;
449         struct kvm_run *run = vcpu->run;
450         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
451         u32 cause = vcpu->arch.host_cp0_cause;
452         enum emulation_result er = EMULATE_DONE;
453         int ret = RESUME_GUEST;
454
455         if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
456             (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
457                 /*
458                  * No MSA in guest, or FPU enabled and not in FR=1 mode,
459                  * guest reserved instruction exception
460                  */
461                 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
462         } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
463                 /* MSA disabled by guest, guest MSA disabled exception */
464                 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
465         } else {
466                 /* Restore MSA/FPU state */
467                 kvm_own_msa(vcpu);
468                 er = EMULATE_DONE;
469         }
470
471         switch (er) {
472         case EMULATE_DONE:
473                 ret = RESUME_GUEST;
474                 break;
475
476         case EMULATE_FAIL:
477                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
478                 ret = RESUME_HOST;
479                 break;
480
481         default:
482                 BUG();
483         }
484         return ret;
485 }
486
487 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
488 {
489         struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
490         struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
491
492         /*
493          * Allocate GVA -> HPA page tables.
494          * MIPS doesn't use the mm_struct pointer argument.
495          */
496         kern_mm->pgd = pgd_alloc(kern_mm);
497         if (!kern_mm->pgd)
498                 return -ENOMEM;
499
500         user_mm->pgd = pgd_alloc(user_mm);
501         if (!user_mm->pgd) {
502                 pgd_free(kern_mm, kern_mm->pgd);
503                 return -ENOMEM;
504         }
505
506         return 0;
507 }
508
509 static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
510 {
511         /* Don't free host kernel page tables copied from init_mm.pgd */
512         const unsigned long end = 0x80000000;
513         unsigned long pgd_va, pud_va, pmd_va;
514         pud_t *pud;
515         pmd_t *pmd;
516         pte_t *pte;
517         int i, j, k;
518
519         for (i = 0; i < USER_PTRS_PER_PGD; i++) {
520                 if (pgd_none(pgd[i]))
521                         continue;
522
523                 pgd_va = (unsigned long)i << PGDIR_SHIFT;
524                 if (pgd_va >= end)
525                         break;
526                 pud = pud_offset(pgd + i, 0);
527                 for (j = 0; j < PTRS_PER_PUD; j++) {
528                         if (pud_none(pud[j]))
529                                 continue;
530
531                         pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
532                         if (pud_va >= end)
533                                 break;
534                         pmd = pmd_offset(pud + j, 0);
535                         for (k = 0; k < PTRS_PER_PMD; k++) {
536                                 if (pmd_none(pmd[k]))
537                                         continue;
538
539                                 pmd_va = pud_va | (k << PMD_SHIFT);
540                                 if (pmd_va >= end)
541                                         break;
542                                 pte = pte_offset(pmd + k, 0);
543                                 pte_free_kernel(NULL, pte);
544                         }
545                         pmd_free(NULL, pmd);
546                 }
547                 pud_free(NULL, pud);
548         }
549         pgd_free(NULL, pgd);
550 }
551
552 static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
553 {
554         kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
555         kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
556 }
557
558 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
559 {
560         struct mips_coproc *cop0 = vcpu->arch.cop0;
561         u32 config, config1;
562         int vcpu_id = vcpu->vcpu_id;
563
564         /*
565          * Arch specific stuff, set up config registers properly so that the
566          * guest will come up as expected
567          */
568 #ifndef CONFIG_CPU_MIPSR6
569         /* r2-r5, simulate a MIPS 24kc */
570         kvm_write_c0_guest_prid(cop0, 0x00019300);
571 #else
572         /* r6+, simulate a generic QEMU machine */
573         kvm_write_c0_guest_prid(cop0, 0x00010000);
574 #endif
575         /*
576          * Have config1, Cacheable, noncoherent, write-back, write allocate.
577          * Endianness, arch revision & virtually tagged icache should match
578          * host.
579          */
580         config = read_c0_config() & MIPS_CONF_AR;
581         config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
582 #ifdef CONFIG_CPU_BIG_ENDIAN
583         config |= CONF_BE;
584 #endif
585         if (cpu_has_vtag_icache)
586                 config |= MIPS_CONF_VI;
587         kvm_write_c0_guest_config(cop0, config);
588
589         /* Read the cache characteristics from the host Config1 Register */
590         config1 = (read_c0_config1() & ~0x7f);
591
592         /* Set up MMU size */
593         config1 &= ~(0x3f << 25);
594         config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
595
596         /* We unset some bits that we aren't emulating */
597         config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
598                      MIPS_CONF1_WR | MIPS_CONF1_CA);
599         kvm_write_c0_guest_config1(cop0, config1);
600
601         /* Have config3, no tertiary/secondary caches implemented */
602         kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
603         /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
604
605         /* Have config4, UserLocal */
606         kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
607
608         /* Have config5 */
609         kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
610
611         /* No config6 */
612         kvm_write_c0_guest_config5(cop0, 0);
613
614         /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
615         kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
616
617         /*
618          * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
619          */
620         kvm_write_c0_guest_intctl(cop0, 0xFC000000);
621
622         /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
623         kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
624                                        (vcpu_id & MIPS_EBASE_CPUNUM));
625
626         return 0;
627 }
628
629 static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
630 {
631         /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
632         kvm_flush_remote_tlbs(kvm);
633 }
634
635 static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
636                                         const struct kvm_memory_slot *slot)
637 {
638         kvm_trap_emul_flush_shadow_all(kvm);
639 }
640
641 static u64 kvm_trap_emul_get_one_regs[] = {
642         KVM_REG_MIPS_CP0_INDEX,
643         KVM_REG_MIPS_CP0_CONTEXT,
644         KVM_REG_MIPS_CP0_USERLOCAL,
645         KVM_REG_MIPS_CP0_PAGEMASK,
646         KVM_REG_MIPS_CP0_WIRED,
647         KVM_REG_MIPS_CP0_HWRENA,
648         KVM_REG_MIPS_CP0_BADVADDR,
649         KVM_REG_MIPS_CP0_COUNT,
650         KVM_REG_MIPS_CP0_ENTRYHI,
651         KVM_REG_MIPS_CP0_COMPARE,
652         KVM_REG_MIPS_CP0_STATUS,
653         KVM_REG_MIPS_CP0_CAUSE,
654         KVM_REG_MIPS_CP0_EPC,
655         KVM_REG_MIPS_CP0_PRID,
656         KVM_REG_MIPS_CP0_CONFIG,
657         KVM_REG_MIPS_CP0_CONFIG1,
658         KVM_REG_MIPS_CP0_CONFIG2,
659         KVM_REG_MIPS_CP0_CONFIG3,
660         KVM_REG_MIPS_CP0_CONFIG4,
661         KVM_REG_MIPS_CP0_CONFIG5,
662         KVM_REG_MIPS_CP0_CONFIG7,
663         KVM_REG_MIPS_CP0_ERROREPC,
664         KVM_REG_MIPS_CP0_KSCRATCH1,
665         KVM_REG_MIPS_CP0_KSCRATCH2,
666         KVM_REG_MIPS_CP0_KSCRATCH3,
667         KVM_REG_MIPS_CP0_KSCRATCH4,
668         KVM_REG_MIPS_CP0_KSCRATCH5,
669         KVM_REG_MIPS_CP0_KSCRATCH6,
670
671         KVM_REG_MIPS_COUNT_CTL,
672         KVM_REG_MIPS_COUNT_RESUME,
673         KVM_REG_MIPS_COUNT_HZ,
674 };
675
676 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
677 {
678         return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
679 }
680
681 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
682                                           u64 __user *indices)
683 {
684         if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
685                          sizeof(kvm_trap_emul_get_one_regs)))
686                 return -EFAULT;
687         indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
688
689         return 0;
690 }
691
692 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
693                                      const struct kvm_one_reg *reg,
694                                      s64 *v)
695 {
696         struct mips_coproc *cop0 = vcpu->arch.cop0;
697
698         switch (reg->id) {
699         case KVM_REG_MIPS_CP0_INDEX:
700                 *v = (long)kvm_read_c0_guest_index(cop0);
701                 break;
702         case KVM_REG_MIPS_CP0_CONTEXT:
703                 *v = (long)kvm_read_c0_guest_context(cop0);
704                 break;
705         case KVM_REG_MIPS_CP0_USERLOCAL:
706                 *v = (long)kvm_read_c0_guest_userlocal(cop0);
707                 break;
708         case KVM_REG_MIPS_CP0_PAGEMASK:
709                 *v = (long)kvm_read_c0_guest_pagemask(cop0);
710                 break;
711         case KVM_REG_MIPS_CP0_WIRED:
712                 *v = (long)kvm_read_c0_guest_wired(cop0);
713                 break;
714         case KVM_REG_MIPS_CP0_HWRENA:
715                 *v = (long)kvm_read_c0_guest_hwrena(cop0);
716                 break;
717         case KVM_REG_MIPS_CP0_BADVADDR:
718                 *v = (long)kvm_read_c0_guest_badvaddr(cop0);
719                 break;
720         case KVM_REG_MIPS_CP0_ENTRYHI:
721                 *v = (long)kvm_read_c0_guest_entryhi(cop0);
722                 break;
723         case KVM_REG_MIPS_CP0_COMPARE:
724                 *v = (long)kvm_read_c0_guest_compare(cop0);
725                 break;
726         case KVM_REG_MIPS_CP0_STATUS:
727                 *v = (long)kvm_read_c0_guest_status(cop0);
728                 break;
729         case KVM_REG_MIPS_CP0_CAUSE:
730                 *v = (long)kvm_read_c0_guest_cause(cop0);
731                 break;
732         case KVM_REG_MIPS_CP0_EPC:
733                 *v = (long)kvm_read_c0_guest_epc(cop0);
734                 break;
735         case KVM_REG_MIPS_CP0_PRID:
736                 *v = (long)kvm_read_c0_guest_prid(cop0);
737                 break;
738         case KVM_REG_MIPS_CP0_CONFIG:
739                 *v = (long)kvm_read_c0_guest_config(cop0);
740                 break;
741         case KVM_REG_MIPS_CP0_CONFIG1:
742                 *v = (long)kvm_read_c0_guest_config1(cop0);
743                 break;
744         case KVM_REG_MIPS_CP0_CONFIG2:
745                 *v = (long)kvm_read_c0_guest_config2(cop0);
746                 break;
747         case KVM_REG_MIPS_CP0_CONFIG3:
748                 *v = (long)kvm_read_c0_guest_config3(cop0);
749                 break;
750         case KVM_REG_MIPS_CP0_CONFIG4:
751                 *v = (long)kvm_read_c0_guest_config4(cop0);
752                 break;
753         case KVM_REG_MIPS_CP0_CONFIG5:
754                 *v = (long)kvm_read_c0_guest_config5(cop0);
755                 break;
756         case KVM_REG_MIPS_CP0_CONFIG7:
757                 *v = (long)kvm_read_c0_guest_config7(cop0);
758                 break;
759         case KVM_REG_MIPS_CP0_COUNT:
760                 *v = kvm_mips_read_count(vcpu);
761                 break;
762         case KVM_REG_MIPS_COUNT_CTL:
763                 *v = vcpu->arch.count_ctl;
764                 break;
765         case KVM_REG_MIPS_COUNT_RESUME:
766                 *v = ktime_to_ns(vcpu->arch.count_resume);
767                 break;
768         case KVM_REG_MIPS_COUNT_HZ:
769                 *v = vcpu->arch.count_hz;
770                 break;
771         case KVM_REG_MIPS_CP0_ERROREPC:
772                 *v = (long)kvm_read_c0_guest_errorepc(cop0);
773                 break;
774         case KVM_REG_MIPS_CP0_KSCRATCH1:
775                 *v = (long)kvm_read_c0_guest_kscratch1(cop0);
776                 break;
777         case KVM_REG_MIPS_CP0_KSCRATCH2:
778                 *v = (long)kvm_read_c0_guest_kscratch2(cop0);
779                 break;
780         case KVM_REG_MIPS_CP0_KSCRATCH3:
781                 *v = (long)kvm_read_c0_guest_kscratch3(cop0);
782                 break;
783         case KVM_REG_MIPS_CP0_KSCRATCH4:
784                 *v = (long)kvm_read_c0_guest_kscratch4(cop0);
785                 break;
786         case KVM_REG_MIPS_CP0_KSCRATCH5:
787                 *v = (long)kvm_read_c0_guest_kscratch5(cop0);
788                 break;
789         case KVM_REG_MIPS_CP0_KSCRATCH6:
790                 *v = (long)kvm_read_c0_guest_kscratch6(cop0);
791                 break;
792         default:
793                 return -EINVAL;
794         }
795         return 0;
796 }
797
798 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
799                                      const struct kvm_one_reg *reg,
800                                      s64 v)
801 {
802         struct mips_coproc *cop0 = vcpu->arch.cop0;
803         int ret = 0;
804         unsigned int cur, change;
805
806         switch (reg->id) {
807         case KVM_REG_MIPS_CP0_INDEX:
808                 kvm_write_c0_guest_index(cop0, v);
809                 break;
810         case KVM_REG_MIPS_CP0_CONTEXT:
811                 kvm_write_c0_guest_context(cop0, v);
812                 break;
813         case KVM_REG_MIPS_CP0_USERLOCAL:
814                 kvm_write_c0_guest_userlocal(cop0, v);
815                 break;
816         case KVM_REG_MIPS_CP0_PAGEMASK:
817                 kvm_write_c0_guest_pagemask(cop0, v);
818                 break;
819         case KVM_REG_MIPS_CP0_WIRED:
820                 kvm_write_c0_guest_wired(cop0, v);
821                 break;
822         case KVM_REG_MIPS_CP0_HWRENA:
823                 kvm_write_c0_guest_hwrena(cop0, v);
824                 break;
825         case KVM_REG_MIPS_CP0_BADVADDR:
826                 kvm_write_c0_guest_badvaddr(cop0, v);
827                 break;
828         case KVM_REG_MIPS_CP0_ENTRYHI:
829                 kvm_write_c0_guest_entryhi(cop0, v);
830                 break;
831         case KVM_REG_MIPS_CP0_STATUS:
832                 kvm_write_c0_guest_status(cop0, v);
833                 break;
834         case KVM_REG_MIPS_CP0_EPC:
835                 kvm_write_c0_guest_epc(cop0, v);
836                 break;
837         case KVM_REG_MIPS_CP0_PRID:
838                 kvm_write_c0_guest_prid(cop0, v);
839                 break;
840         case KVM_REG_MIPS_CP0_COUNT:
841                 kvm_mips_write_count(vcpu, v);
842                 break;
843         case KVM_REG_MIPS_CP0_COMPARE:
844                 kvm_mips_write_compare(vcpu, v, false);
845                 break;
846         case KVM_REG_MIPS_CP0_CAUSE:
847                 /*
848                  * If the timer is stopped or started (DC bit) it must look
849                  * atomic with changes to the interrupt pending bits (TI, IRQ5).
850                  * A timer interrupt should not happen in between.
851                  */
852                 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
853                         if (v & CAUSEF_DC) {
854                                 /* disable timer first */
855                                 kvm_mips_count_disable_cause(vcpu);
856                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
857                         } else {
858                                 /* enable timer last */
859                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
860                                 kvm_mips_count_enable_cause(vcpu);
861                         }
862                 } else {
863                         kvm_write_c0_guest_cause(cop0, v);
864                 }
865                 break;
866         case KVM_REG_MIPS_CP0_CONFIG:
867                 /* read-only for now */
868                 break;
869         case KVM_REG_MIPS_CP0_CONFIG1:
870                 cur = kvm_read_c0_guest_config1(cop0);
871                 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
872                 if (change) {
873                         v = cur ^ change;
874                         kvm_write_c0_guest_config1(cop0, v);
875                 }
876                 break;
877         case KVM_REG_MIPS_CP0_CONFIG2:
878                 /* read-only for now */
879                 break;
880         case KVM_REG_MIPS_CP0_CONFIG3:
881                 cur = kvm_read_c0_guest_config3(cop0);
882                 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
883                 if (change) {
884                         v = cur ^ change;
885                         kvm_write_c0_guest_config3(cop0, v);
886                 }
887                 break;
888         case KVM_REG_MIPS_CP0_CONFIG4:
889                 cur = kvm_read_c0_guest_config4(cop0);
890                 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
891                 if (change) {
892                         v = cur ^ change;
893                         kvm_write_c0_guest_config4(cop0, v);
894                 }
895                 break;
896         case KVM_REG_MIPS_CP0_CONFIG5:
897                 cur = kvm_read_c0_guest_config5(cop0);
898                 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
899                 if (change) {
900                         v = cur ^ change;
901                         kvm_write_c0_guest_config5(cop0, v);
902                 }
903                 break;
904         case KVM_REG_MIPS_CP0_CONFIG7:
905                 /* writes ignored */
906                 break;
907         case KVM_REG_MIPS_COUNT_CTL:
908                 ret = kvm_mips_set_count_ctl(vcpu, v);
909                 break;
910         case KVM_REG_MIPS_COUNT_RESUME:
911                 ret = kvm_mips_set_count_resume(vcpu, v);
912                 break;
913         case KVM_REG_MIPS_COUNT_HZ:
914                 ret = kvm_mips_set_count_hz(vcpu, v);
915                 break;
916         case KVM_REG_MIPS_CP0_ERROREPC:
917                 kvm_write_c0_guest_errorepc(cop0, v);
918                 break;
919         case KVM_REG_MIPS_CP0_KSCRATCH1:
920                 kvm_write_c0_guest_kscratch1(cop0, v);
921                 break;
922         case KVM_REG_MIPS_CP0_KSCRATCH2:
923                 kvm_write_c0_guest_kscratch2(cop0, v);
924                 break;
925         case KVM_REG_MIPS_CP0_KSCRATCH3:
926                 kvm_write_c0_guest_kscratch3(cop0, v);
927                 break;
928         case KVM_REG_MIPS_CP0_KSCRATCH4:
929                 kvm_write_c0_guest_kscratch4(cop0, v);
930                 break;
931         case KVM_REG_MIPS_CP0_KSCRATCH5:
932                 kvm_write_c0_guest_kscratch5(cop0, v);
933                 break;
934         case KVM_REG_MIPS_CP0_KSCRATCH6:
935                 kvm_write_c0_guest_kscratch6(cop0, v);
936                 break;
937         default:
938                 return -EINVAL;
939         }
940         return ret;
941 }
942
943 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
944 {
945         struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
946         struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
947         struct mm_struct *mm;
948
949         /*
950          * Were we in guest context? If so, restore the appropriate ASID based
951          * on the mode of the Guest (Kernel/User).
952          */
953         if (current->flags & PF_VCPU) {
954                 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
955                 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
956                     asid_version_mask(cpu))
957                         get_new_mmu_context(mm, cpu);
958                 write_c0_entryhi(cpu_asid(cpu, mm));
959                 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
960                 kvm_mips_suspend_mm(cpu);
961                 ehb();
962         }
963
964         return 0;
965 }
966
967 static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
968 {
969         kvm_lose_fpu(vcpu);
970
971         if (current->flags & PF_VCPU) {
972                 /* Restore normal Linux process memory map */
973                 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
974                      asid_version_mask(cpu)))
975                         get_new_mmu_context(current->mm, cpu);
976                 write_c0_entryhi(cpu_asid(cpu, current->mm));
977                 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
978                 kvm_mips_resume_mm(cpu);
979                 ehb();
980         }
981
982         return 0;
983 }
984
985 static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
986                                          bool reload_asid)
987 {
988         struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
989         struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
990         struct mm_struct *mm;
991         int i;
992
993         if (likely(!vcpu->requests))
994                 return;
995
996         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
997                 /*
998                  * Both kernel & user GVA mappings must be invalidated. The
999                  * caller is just about to check whether the ASID is stale
1000                  * anyway so no need to reload it here.
1001                  */
1002                 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
1003                 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
1004                 for_each_possible_cpu(i) {
1005                         cpu_context(i, kern_mm) = 0;
1006                         cpu_context(i, user_mm) = 0;
1007                 }
1008
1009                 /* Generate new ASID for current mode */
1010                 if (reload_asid) {
1011                         mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1012                         get_new_mmu_context(mm, cpu);
1013                         htw_stop();
1014                         write_c0_entryhi(cpu_asid(cpu, mm));
1015                         TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
1016                         htw_start();
1017                 }
1018         }
1019 }
1020
1021 /**
1022  * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1023  * @vcpu:       VCPU pointer.
1024  *
1025  * Call before a GVA space access outside of guest mode, to ensure that
1026  * asynchronous TLB flush requests are handled or delayed until completion of
1027  * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1028  *
1029  * Should be called with IRQs already enabled.
1030  */
1031 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
1032 {
1033         /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1034         WARN_ON_ONCE(irqs_disabled());
1035
1036         /*
1037          * The caller is about to access the GVA space, so we set the mode to
1038          * force TLB flush requests to send an IPI, and also disable IRQs to
1039          * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1040          */
1041         local_irq_disable();
1042
1043         /*
1044          * Make sure the read of VCPU requests is not reordered ahead of the
1045          * write to vcpu->mode, or we could miss a TLB flush request while
1046          * the requester sees the VCPU as outside of guest mode and not needing
1047          * an IPI.
1048          */
1049         smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
1050
1051         /*
1052          * If a TLB flush has been requested (potentially while
1053          * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1054          * before accessing the GVA space, and be sure to reload the ASID if
1055          * necessary as it'll be immediately used.
1056          *
1057          * TLB flush requests after this check will trigger an IPI due to the
1058          * mode change above, which will be delayed due to IRQs disabled.
1059          */
1060         kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
1061 }
1062
1063 /**
1064  * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1065  * @vcpu:       VCPU pointer.
1066  *
1067  * Called after a GVA space access outside of guest mode. Should have a matching
1068  * call to kvm_trap_emul_gva_lockless_begin().
1069  */
1070 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
1071 {
1072         /*
1073          * Make sure the write to vcpu->mode is not reordered in front of GVA
1074          * accesses, or a TLB flush requester may not think it necessary to send
1075          * an IPI.
1076          */
1077         smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
1078
1079         /*
1080          * Now that the access to GVA space is complete, its safe for pending
1081          * TLB flush request IPIs to be handled (which indicates completion).
1082          */
1083         local_irq_enable();
1084 }
1085
1086 static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
1087                                        struct kvm_vcpu *vcpu)
1088 {
1089         struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1090         struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1091         struct mm_struct *mm;
1092         struct mips_coproc *cop0 = vcpu->arch.cop0;
1093         int i, cpu = smp_processor_id();
1094         unsigned int gasid;
1095
1096         /*
1097          * No need to reload ASID, IRQs are disabled already so there's no rush,
1098          * and we'll check if we need to regenerate below anyway before
1099          * re-entering the guest.
1100          */
1101         kvm_trap_emul_check_requests(vcpu, cpu, false);
1102
1103         if (KVM_GUEST_KERNEL_MODE(vcpu)) {
1104                 mm = kern_mm;
1105         } else {
1106                 mm = user_mm;
1107
1108                 /*
1109                  * Lazy host ASID regeneration / PT flush for guest user mode.
1110                  * If the guest ASID has changed since the last guest usermode
1111                  * execution, invalidate the stale TLB entries and flush GVA PT
1112                  * entries too.
1113                  */
1114                 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
1115                 if (gasid != vcpu->arch.last_user_gasid) {
1116                         kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
1117                         for_each_possible_cpu(i)
1118                                 cpu_context(i, user_mm) = 0;
1119                         vcpu->arch.last_user_gasid = gasid;
1120                 }
1121         }
1122
1123         /*
1124          * Check if ASID is stale. This may happen due to a TLB flush request or
1125          * a lazy user MM invalidation.
1126          */
1127         if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
1128             asid_version_mask(cpu))
1129                 get_new_mmu_context(mm, cpu);
1130 }
1131
1132 static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1133 {
1134         int cpu = smp_processor_id();
1135         int r;
1136
1137         /* Check if we have any exceptions/interrupts pending */
1138         kvm_mips_deliver_interrupts(vcpu,
1139                                     kvm_read_c0_guest_cause(vcpu->arch.cop0));
1140
1141         kvm_trap_emul_vcpu_reenter(run, vcpu);
1142
1143         /*
1144          * We use user accessors to access guest memory, but we don't want to
1145          * invoke Linux page faulting.
1146          */
1147         pagefault_disable();
1148
1149         /* Disable hardware page table walking while in guest */
1150         htw_stop();
1151
1152         /*
1153          * While in guest context we're in the guest's address space, not the
1154          * host process address space, so we need to be careful not to confuse
1155          * e.g. cache management IPIs.
1156          */
1157         kvm_mips_suspend_mm(cpu);
1158
1159         r = vcpu->arch.vcpu_run(run, vcpu);
1160
1161         /* We may have migrated while handling guest exits */
1162         cpu = smp_processor_id();
1163
1164         /* Restore normal Linux process memory map */
1165         if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
1166              asid_version_mask(cpu)))
1167                 get_new_mmu_context(current->mm, cpu);
1168         write_c0_entryhi(cpu_asid(cpu, current->mm));
1169         TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
1170         kvm_mips_resume_mm(cpu);
1171
1172         htw_start();
1173
1174         pagefault_enable();
1175
1176         return r;
1177 }
1178
1179 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
1180         /* exit handlers */
1181         .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
1182         .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
1183         .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
1184         .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
1185         .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
1186         .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
1187         .handle_syscall = kvm_trap_emul_handle_syscall,
1188         .handle_res_inst = kvm_trap_emul_handle_res_inst,
1189         .handle_break = kvm_trap_emul_handle_break,
1190         .handle_trap = kvm_trap_emul_handle_trap,
1191         .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
1192         .handle_fpe = kvm_trap_emul_handle_fpe,
1193         .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
1194
1195         .vcpu_init = kvm_trap_emul_vcpu_init,
1196         .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
1197         .vcpu_setup = kvm_trap_emul_vcpu_setup,
1198         .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
1199         .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
1200         .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
1201         .queue_timer_int = kvm_mips_queue_timer_int_cb,
1202         .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
1203         .queue_io_int = kvm_mips_queue_io_int_cb,
1204         .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
1205         .irq_deliver = kvm_mips_irq_deliver_cb,
1206         .irq_clear = kvm_mips_irq_clear_cb,
1207         .num_regs = kvm_trap_emul_num_regs,
1208         .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
1209         .get_one_reg = kvm_trap_emul_get_one_reg,
1210         .set_one_reg = kvm_trap_emul_set_one_reg,
1211         .vcpu_load = kvm_trap_emul_vcpu_load,
1212         .vcpu_put = kvm_trap_emul_vcpu_put,
1213         .vcpu_run = kvm_trap_emul_vcpu_run,
1214         .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
1215 };
1216
1217 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
1218 {
1219         *install_callbacks = &kvm_trap_emul_callbacks;
1220         return 0;
1221 }