2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cacheops.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
32 #include "interrupt.h"
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
41 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
44 unsigned int dspcontrol;
45 union mips_instruction insn;
46 struct kvm_vcpu_arch *arch = &vcpu->arch;
52 kvm_err("%s: unaligned epc\n", __func__);
56 /* Read the instruction */
57 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
61 switch (insn.i_format.opcode) {
62 /* jr and jalr are in r_format format. */
64 switch (insn.r_format.func) {
66 arch->gprs[insn.r_format.rd] = epc + 8;
69 nextpc = arch->gprs[insn.r_format.rs];
77 * This group contains:
78 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
82 switch (insn.i_format.rt) {
85 if ((long)arch->gprs[insn.i_format.rs] < 0)
86 epc = epc + 4 + (insn.i_format.simmediate << 2);
94 if ((long)arch->gprs[insn.i_format.rs] >= 0)
95 epc = epc + 4 + (insn.i_format.simmediate << 2);
103 arch->gprs[31] = epc + 8;
104 if ((long)arch->gprs[insn.i_format.rs] < 0)
105 epc = epc + 4 + (insn.i_format.simmediate << 2);
113 arch->gprs[31] = epc + 8;
114 if ((long)arch->gprs[insn.i_format.rs] >= 0)
115 epc = epc + 4 + (insn.i_format.simmediate << 2);
122 kvm_err("%s: DSP branch but not DSP ASE\n",
127 dspcontrol = rddsp(0x01);
129 if (dspcontrol >= 32)
130 epc = epc + 4 + (insn.i_format.simmediate << 2);
140 /* These are unconditional and in j_format. */
142 arch->gprs[31] = instpc + 8;
147 epc |= (insn.j_format.target << 2);
151 /* These are conditional and in i_format. */
154 if (arch->gprs[insn.i_format.rs] ==
155 arch->gprs[insn.i_format.rt])
156 epc = epc + 4 + (insn.i_format.simmediate << 2);
164 if (arch->gprs[insn.i_format.rs] !=
165 arch->gprs[insn.i_format.rt])
166 epc = epc + 4 + (insn.i_format.simmediate << 2);
172 case blez_op: /* POP06 */
173 #ifndef CONFIG_CPU_MIPSR6
174 case blezl_op: /* removed in R6 */
176 if (insn.i_format.rt != 0)
178 if ((long)arch->gprs[insn.i_format.rs] <= 0)
179 epc = epc + 4 + (insn.i_format.simmediate << 2);
185 case bgtz_op: /* POP07 */
186 #ifndef CONFIG_CPU_MIPSR6
187 case bgtzl_op: /* removed in R6 */
189 if (insn.i_format.rt != 0)
191 if ((long)arch->gprs[insn.i_format.rs] > 0)
192 epc = epc + 4 + (insn.i_format.simmediate << 2);
198 /* And now the FPA/cp1 branch instructions. */
200 kvm_err("%s: unsupported cop1_op\n", __func__);
203 #ifdef CONFIG_CPU_MIPSR6
204 /* R6 added the following compact branches with forbidden slots */
205 case blezl_op: /* POP26 */
206 case bgtzl_op: /* POP27 */
207 /* only rt == 0 isn't compact branch */
208 if (insn.i_format.rt != 0)
213 /* only rs == rt == 0 is reserved, rest are compact branches */
214 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
219 /* only rs == 0 isn't compact branch */
220 if (insn.i_format.rs != 0)
225 * If we've hit an exception on the forbidden slot, then
226 * the branch must not have been taken.
233 /* Fall through - Compact branches not supported before R6 */
243 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
247 if (cause & CAUSEF_BD) {
248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
262 * kvm_get_badinstr() - Get bad instruction encoding.
263 * @opc: Guest pointer to faulting instruction.
264 * @vcpu: KVM VCPU information.
266 * Gets the instruction encoding of the faulting instruction, using the saved
267 * BadInstr register value if it exists, otherwise falling back to reading guest
270 * Returns: The instruction encoding of the faulting instruction.
272 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
274 if (cpu_has_badinstr) {
275 *out = vcpu->arch.host_cp0_badinstr;
278 return kvm_get_inst(opc, vcpu, out);
283 * kvm_get_badinstrp() - Get bad prior instruction encoding.
284 * @opc: Guest pointer to prior faulting instruction.
285 * @vcpu: KVM VCPU information.
287 * Gets the instruction encoding of the prior faulting instruction (the branch
288 * containing the delay slot which faulted), using the saved BadInstrP register
289 * value if it exists, otherwise falling back to reading guest memory at @opc.
291 * Returns: The instruction encoding of the prior faulting instruction.
293 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
295 if (cpu_has_badinstrp) {
296 *out = vcpu->arch.host_cp0_badinstrp;
299 return kvm_get_inst(opc, vcpu, out);
304 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
305 * @vcpu: Virtual CPU.
307 * Returns: 1 if the CP0_Count timer is disabled by either the guest
308 * CP0_Cause.DC bit or the count_ctl.DC bit.
309 * 0 otherwise (in which case CP0_Count timer is running).
311 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
313 struct mips_coproc *cop0 = vcpu->arch.cop0;
315 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
316 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
320 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
322 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
324 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
326 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
331 now_ns = ktime_to_ns(now);
332 delta = now_ns + vcpu->arch.count_dyn_bias;
334 if (delta >= vcpu->arch.count_period) {
335 /* If delta is out of safe range the bias needs adjusting */
336 periods = div64_s64(now_ns, vcpu->arch.count_period);
337 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
338 /* Recalculate delta with new bias */
339 delta = now_ns + vcpu->arch.count_dyn_bias;
343 * We've ensured that:
344 * delta < count_period
346 * Therefore the intermediate delta*count_hz will never overflow since
347 * at the boundary condition:
348 * delta = count_period
349 * delta = NSEC_PER_SEC * 2^32 / count_hz
350 * delta * count_hz = NSEC_PER_SEC * 2^32
352 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
356 * kvm_mips_count_time() - Get effective current time.
357 * @vcpu: Virtual CPU.
359 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
360 * except when the master disable bit is set in count_ctl, in which case it is
361 * count_resume, i.e. the time that the count was disabled.
363 * Returns: Effective monotonic ktime for CP0_Count.
365 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
367 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
368 return vcpu->arch.count_resume;
374 * kvm_mips_read_count_running() - Read the current count value as if running.
375 * @vcpu: Virtual CPU.
376 * @now: Kernel time to read CP0_Count at.
378 * Returns the current guest CP0_Count register at time @now and handles if the
379 * timer interrupt is pending and hasn't been handled yet.
381 * Returns: The current value of the guest CP0_Count register.
383 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
385 struct mips_coproc *cop0 = vcpu->arch.cop0;
386 ktime_t expires, threshold;
390 /* Calculate the biased and scaled guest CP0_Count */
391 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
392 compare = kvm_read_c0_guest_compare(cop0);
395 * Find whether CP0_Count has reached the closest timer interrupt. If
396 * not, we shouldn't inject it.
398 if ((s32)(count - compare) < 0)
402 * The CP0_Count we're going to return has already reached the closest
403 * timer interrupt. Quickly check if it really is a new interrupt by
404 * looking at whether the interval until the hrtimer expiry time is
405 * less than 1/4 of the timer period.
407 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
408 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
409 if (ktime_before(expires, threshold)) {
411 * Cancel it while we handle it so there's no chance of
412 * interference with the timeout handler.
414 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
416 /* Nothing should be waiting on the timeout */
417 kvm_mips_callbacks->queue_timer_int(vcpu);
420 * Restart the timer if it was running based on the expiry time
421 * we read, so that we don't push it back 2 periods.
424 expires = ktime_add_ns(expires,
425 vcpu->arch.count_period);
426 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
435 * kvm_mips_read_count() - Read the current count value.
436 * @vcpu: Virtual CPU.
438 * Read the current guest CP0_Count value, taking into account whether the timer
441 * Returns: The current guest CP0_Count value.
443 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
445 struct mips_coproc *cop0 = vcpu->arch.cop0;
447 /* If count disabled just read static copy of count */
448 if (kvm_mips_count_disabled(vcpu))
449 return kvm_read_c0_guest_count(cop0);
451 return kvm_mips_read_count_running(vcpu, ktime_get());
455 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
456 * @vcpu: Virtual CPU.
457 * @count: Output pointer for CP0_Count value at point of freeze.
459 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
460 * at the point it was frozen. It is guaranteed that any pending interrupts at
461 * the point it was frozen are handled, and none after that point.
463 * This is useful where the time/CP0_Count is needed in the calculation of the
466 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
468 * Returns: The ktime at the point of freeze.
470 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
474 /* stop hrtimer before finding time */
475 hrtimer_cancel(&vcpu->arch.comparecount_timer);
478 /* find count at this point and handle pending hrtimer */
479 *count = kvm_mips_read_count_running(vcpu, now);
485 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
486 * @vcpu: Virtual CPU.
487 * @now: ktime at point of resume.
488 * @count: CP0_Count at point of resume.
490 * Resumes the timer and updates the timer expiry based on @now and @count.
491 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
492 * parameters need to be changed.
494 * It is guaranteed that a timer interrupt immediately after resume will be
495 * handled, but not if CP_Compare is exactly at @count. That case is already
496 * handled by kvm_mips_freeze_timer().
498 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
500 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
501 ktime_t now, u32 count)
503 struct mips_coproc *cop0 = vcpu->arch.cop0;
508 /* Calculate timeout (wrap 0 to 2^32) */
509 compare = kvm_read_c0_guest_compare(cop0);
510 delta = (u64)(u32)(compare - count - 1) + 1;
511 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
512 expire = ktime_add_ns(now, delta);
514 /* Update hrtimer to use new timeout */
515 hrtimer_cancel(&vcpu->arch.comparecount_timer);
516 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
520 * kvm_mips_write_count() - Modify the count and update timer.
521 * @vcpu: Virtual CPU.
522 * @count: Guest CP0_Count value to set.
524 * Sets the CP0_Count value and updates the timer accordingly.
526 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
528 struct mips_coproc *cop0 = vcpu->arch.cop0;
532 now = kvm_mips_count_time(vcpu);
533 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
535 if (kvm_mips_count_disabled(vcpu))
536 /* The timer's disabled, adjust the static count */
537 kvm_write_c0_guest_count(cop0, count);
540 kvm_mips_resume_hrtimer(vcpu, now, count);
544 * kvm_mips_init_count() - Initialise timer.
545 * @vcpu: Virtual CPU.
547 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
548 * it going if it's enabled.
550 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
553 vcpu->arch.count_hz = 100*1000*1000;
554 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
555 vcpu->arch.count_hz);
556 vcpu->arch.count_dyn_bias = 0;
559 kvm_mips_write_count(vcpu, 0);
563 * kvm_mips_set_count_hz() - Update the frequency of the timer.
564 * @vcpu: Virtual CPU.
565 * @count_hz: Frequency of CP0_Count timer in Hz.
567 * Change the frequency of the CP0_Count timer. This is done atomically so that
568 * CP0_Count is continuous and no timer interrupt is lost.
570 * Returns: -EINVAL if @count_hz is out of range.
573 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
575 struct mips_coproc *cop0 = vcpu->arch.cop0;
580 /* ensure the frequency is in a sensible range... */
581 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
583 /* ... and has actually changed */
584 if (vcpu->arch.count_hz == count_hz)
587 /* Safely freeze timer so we can keep it continuous */
588 dc = kvm_mips_count_disabled(vcpu);
590 now = kvm_mips_count_time(vcpu);
591 count = kvm_read_c0_guest_count(cop0);
593 now = kvm_mips_freeze_hrtimer(vcpu, &count);
596 /* Update the frequency */
597 vcpu->arch.count_hz = count_hz;
598 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
599 vcpu->arch.count_dyn_bias = 0;
601 /* Calculate adjusted bias so dynamic count is unchanged */
602 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
604 /* Update and resume hrtimer */
606 kvm_mips_resume_hrtimer(vcpu, now, count);
611 * kvm_mips_write_compare() - Modify compare and update timer.
612 * @vcpu: Virtual CPU.
613 * @compare: New CP0_Compare value.
614 * @ack: Whether to acknowledge timer interrupt.
616 * Update CP0_Compare to a new value and update the timeout.
617 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
618 * any pending timer interrupt is preserved.
620 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
622 struct mips_coproc *cop0 = vcpu->arch.cop0;
624 u32 old_compare = kvm_read_c0_guest_compare(cop0);
628 /* if unchanged, must just be an ack */
629 if (old_compare == compare) {
632 kvm_mips_callbacks->dequeue_timer_int(vcpu);
633 kvm_write_c0_guest_compare(cop0, compare);
637 /* freeze_hrtimer() takes care of timer interrupts <= count */
638 dc = kvm_mips_count_disabled(vcpu);
640 now = kvm_mips_freeze_hrtimer(vcpu, &count);
643 kvm_mips_callbacks->dequeue_timer_int(vcpu);
645 kvm_write_c0_guest_compare(cop0, compare);
647 /* resume_hrtimer() takes care of timer interrupts > count */
649 kvm_mips_resume_hrtimer(vcpu, now, count);
653 * kvm_mips_count_disable() - Disable count.
654 * @vcpu: Virtual CPU.
656 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
657 * time will be handled but not after.
659 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
660 * count_ctl.DC has been set (count disabled).
662 * Returns: The time that the timer was stopped.
664 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
666 struct mips_coproc *cop0 = vcpu->arch.cop0;
671 hrtimer_cancel(&vcpu->arch.comparecount_timer);
673 /* Set the static count from the dynamic count, handling pending TI */
675 count = kvm_mips_read_count_running(vcpu, now);
676 kvm_write_c0_guest_count(cop0, count);
682 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
683 * @vcpu: Virtual CPU.
685 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
686 * before the final stop time will be handled if the timer isn't disabled by
687 * count_ctl.DC, but not after.
689 * Assumes CP0_Cause.DC is clear (count enabled).
691 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
693 struct mips_coproc *cop0 = vcpu->arch.cop0;
695 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
696 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
697 kvm_mips_count_disable(vcpu);
701 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
702 * @vcpu: Virtual CPU.
704 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
705 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
706 * potentially before even returning, so the caller should be careful with
707 * ordering of CP0_Cause modifications so as not to lose it.
709 * Assumes CP0_Cause.DC is set (count disabled).
711 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
713 struct mips_coproc *cop0 = vcpu->arch.cop0;
716 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
719 * Set the dynamic count to match the static count.
720 * This starts the hrtimer if count_ctl.DC allows it.
721 * Otherwise it conveniently updates the biases.
723 count = kvm_read_c0_guest_count(cop0);
724 kvm_mips_write_count(vcpu, count);
728 * kvm_mips_set_count_ctl() - Update the count control KVM register.
729 * @vcpu: Virtual CPU.
730 * @count_ctl: Count control register new value.
732 * Set the count control KVM register. The timer is updated accordingly.
734 * Returns: -EINVAL if reserved bits are set.
737 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
739 struct mips_coproc *cop0 = vcpu->arch.cop0;
740 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
745 /* Only allow defined bits to be changed */
746 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
749 /* Apply new value */
750 vcpu->arch.count_ctl = count_ctl;
752 /* Master CP0_Count disable */
753 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
754 /* Is CP0_Cause.DC already disabling CP0_Count? */
755 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
756 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
757 /* Just record the current time */
758 vcpu->arch.count_resume = ktime_get();
759 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
760 /* disable timer and record current time */
761 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
764 * Calculate timeout relative to static count at resume
765 * time (wrap 0 to 2^32).
767 count = kvm_read_c0_guest_count(cop0);
768 compare = kvm_read_c0_guest_compare(cop0);
769 delta = (u64)(u32)(compare - count - 1) + 1;
770 delta = div_u64(delta * NSEC_PER_SEC,
771 vcpu->arch.count_hz);
772 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
774 /* Handle pending interrupt */
776 if (ktime_compare(now, expire) >= 0)
777 /* Nothing should be waiting on the timeout */
778 kvm_mips_callbacks->queue_timer_int(vcpu);
780 /* Resume hrtimer without changing bias */
781 count = kvm_mips_read_count_running(vcpu, now);
782 kvm_mips_resume_hrtimer(vcpu, now, count);
790 * kvm_mips_set_count_resume() - Update the count resume KVM register.
791 * @vcpu: Virtual CPU.
792 * @count_resume: Count resume register new value.
794 * Set the count resume KVM register.
796 * Returns: -EINVAL if out of valid range (0..now).
799 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
802 * It doesn't make sense for the resume time to be in the future, as it
803 * would be possible for the next interrupt to be more than a full
804 * period in the future.
806 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
809 vcpu->arch.count_resume = ns_to_ktime(count_resume);
814 * kvm_mips_count_timeout() - Push timer forward on timeout.
815 * @vcpu: Virtual CPU.
817 * Handle an hrtimer event by push the hrtimer forward a period.
819 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
821 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
823 /* Add the Count period to the current expiry time */
824 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
825 vcpu->arch.count_period);
826 return HRTIMER_RESTART;
829 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
831 struct mips_coproc *cop0 = vcpu->arch.cop0;
832 enum emulation_result er = EMULATE_DONE;
834 if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
835 kvm_clear_c0_guest_status(cop0, ST0_ERL);
836 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
837 } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
838 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
839 kvm_read_c0_guest_epc(cop0));
840 kvm_clear_c0_guest_status(cop0, ST0_EXL);
841 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
844 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
852 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
854 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
855 vcpu->arch.pending_exceptions);
857 ++vcpu->stat.wait_exits;
858 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
859 if (!vcpu->arch.pending_exceptions) {
861 kvm_vcpu_block(vcpu);
864 * We we are runnable, then definitely go off to user space to
865 * check if any I/O interrupts are pending.
867 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
868 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
869 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
877 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
878 * we can catch this, if things ever change
880 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
882 struct mips_coproc *cop0 = vcpu->arch.cop0;
883 unsigned long pc = vcpu->arch.pc;
885 kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
890 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
891 * @vcpu: VCPU with changed mappings.
892 * @tlb: TLB entry being removed.
894 * This is called to indicate a single change in guest MMU mappings, so that we
895 * can arrange TLB flushes on this and other CPUs.
897 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
898 struct kvm_mips_tlb *tlb)
900 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
901 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
905 /* No need to flush for entries which are already invalid */
906 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
908 /* Don't touch host kernel page tables or TLB mappings */
909 if ((unsigned long)tlb->tlb_hi > 0x7fffffff)
911 /* User address space doesn't need flushing for KSeg2/3 changes */
912 user = tlb->tlb_hi < KVM_GUEST_KSEG0;
916 /* Invalidate page table entries */
917 kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
920 * Probe the shadow host TLB for the entry being overwritten, if one
921 * matches, invalidate it
923 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true);
925 /* Invalidate the whole ASID on other CPUs */
926 cpu = smp_processor_id();
927 for_each_possible_cpu(i) {
931 cpu_context(i, user_mm) = 0;
932 cpu_context(i, kern_mm) = 0;
938 /* Write Guest TLB Entry @ Index */
939 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
941 struct mips_coproc *cop0 = vcpu->arch.cop0;
942 int index = kvm_read_c0_guest_index(cop0);
943 struct kvm_mips_tlb *tlb = NULL;
944 unsigned long pc = vcpu->arch.pc;
946 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
947 kvm_debug("%s: illegal index: %d\n", __func__, index);
948 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
949 pc, index, kvm_read_c0_guest_entryhi(cop0),
950 kvm_read_c0_guest_entrylo0(cop0),
951 kvm_read_c0_guest_entrylo1(cop0),
952 kvm_read_c0_guest_pagemask(cop0));
953 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
956 tlb = &vcpu->arch.guest_tlb[index];
958 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
960 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
961 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
962 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
963 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
965 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
966 pc, index, kvm_read_c0_guest_entryhi(cop0),
967 kvm_read_c0_guest_entrylo0(cop0),
968 kvm_read_c0_guest_entrylo1(cop0),
969 kvm_read_c0_guest_pagemask(cop0));
974 /* Write Guest TLB Entry @ Random Index */
975 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
977 struct mips_coproc *cop0 = vcpu->arch.cop0;
978 struct kvm_mips_tlb *tlb = NULL;
979 unsigned long pc = vcpu->arch.pc;
982 get_random_bytes(&index, sizeof(index));
983 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
985 tlb = &vcpu->arch.guest_tlb[index];
987 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
989 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
990 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
991 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
992 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
994 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
995 pc, index, kvm_read_c0_guest_entryhi(cop0),
996 kvm_read_c0_guest_entrylo0(cop0),
997 kvm_read_c0_guest_entrylo1(cop0));
1002 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
1004 struct mips_coproc *cop0 = vcpu->arch.cop0;
1005 long entryhi = kvm_read_c0_guest_entryhi(cop0);
1006 unsigned long pc = vcpu->arch.pc;
1009 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1011 kvm_write_c0_guest_index(cop0, index);
1013 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
1016 return EMULATE_DONE;
1020 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
1021 * @vcpu: Virtual CPU.
1023 * Finds the mask of bits which are writable in the guest's Config1 CP0
1024 * register, by userland (currently read-only to the guest).
1026 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
1028 unsigned int mask = 0;
1030 /* Permit FPU to be present if FPU is supported */
1031 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
1032 mask |= MIPS_CONF1_FP;
1038 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
1039 * @vcpu: Virtual CPU.
1041 * Finds the mask of bits which are writable in the guest's Config3 CP0
1042 * register, by userland (currently read-only to the guest).
1044 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
1046 /* Config4 and ULRI are optional */
1047 unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
1049 /* Permit MSA to be present if MSA is supported */
1050 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
1051 mask |= MIPS_CONF3_MSA;
1057 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
1058 * @vcpu: Virtual CPU.
1060 * Finds the mask of bits which are writable in the guest's Config4 CP0
1061 * register, by userland (currently read-only to the guest).
1063 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
1065 /* Config5 is optional */
1066 unsigned int mask = MIPS_CONF_M;
1069 mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT;
1075 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
1076 * @vcpu: Virtual CPU.
1078 * Finds the mask of bits which are writable in the guest's Config5 CP0
1079 * register, by the guest itself.
1081 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
1083 unsigned int mask = 0;
1085 /* Permit MSAEn changes if MSA supported and enabled */
1086 if (kvm_mips_guest_has_msa(&vcpu->arch))
1087 mask |= MIPS_CONF5_MSAEN;
1090 * Permit guest FPU mode changes if FPU is enabled and the relevant
1091 * feature exists according to FIR register.
1093 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1095 mask |= MIPS_CONF5_FRE;
1096 /* We don't support UFR or UFE */
1102 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1103 u32 *opc, u32 cause,
1104 struct kvm_run *run,
1105 struct kvm_vcpu *vcpu)
1107 struct mips_coproc *cop0 = vcpu->arch.cop0;
1108 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1109 enum emulation_result er = EMULATE_DONE;
1111 unsigned long curr_pc;
1115 * Update PC and hold onto current PC in case there is
1116 * an error and we want to rollback the PC
1118 curr_pc = vcpu->arch.pc;
1119 er = update_pc(vcpu, cause);
1120 if (er == EMULATE_FAIL)
1123 if (inst.co_format.co) {
1124 switch (inst.co_format.func) {
1125 case tlbr_op: /* Read indexed TLB entry */
1126 er = kvm_mips_emul_tlbr(vcpu);
1128 case tlbwi_op: /* Write indexed */
1129 er = kvm_mips_emul_tlbwi(vcpu);
1131 case tlbwr_op: /* Write random */
1132 er = kvm_mips_emul_tlbwr(vcpu);
1134 case tlbp_op: /* TLB Probe */
1135 er = kvm_mips_emul_tlbp(vcpu);
1138 kvm_err("!!!COP0_RFE!!!\n");
1141 er = kvm_mips_emul_eret(vcpu);
1142 goto dont_update_pc;
1144 er = kvm_mips_emul_wait(vcpu);
1147 er = kvm_mips_emul_hypcall(vcpu, inst);
1151 rt = inst.c0r_format.rt;
1152 rd = inst.c0r_format.rd;
1153 sel = inst.c0r_format.sel;
1155 switch (inst.c0r_format.rs) {
1157 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1158 cop0->stat[rd][sel]++;
1161 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1162 vcpu->arch.gprs[rt] =
1163 (s32)kvm_mips_read_count(vcpu);
1164 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1165 vcpu->arch.gprs[rt] = 0x0;
1166 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1167 kvm_mips_trans_mfc0(inst, opc, vcpu);
1170 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
1172 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1173 kvm_mips_trans_mfc0(inst, opc, vcpu);
1177 trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
1178 KVM_TRACE_COP0(rd, sel),
1179 vcpu->arch.gprs[rt]);
1183 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1185 trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
1186 KVM_TRACE_COP0(rd, sel),
1187 vcpu->arch.gprs[rt]);
1191 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1192 cop0->stat[rd][sel]++;
1194 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
1195 KVM_TRACE_COP0(rd, sel),
1196 vcpu->arch.gprs[rt]);
1198 if ((rd == MIPS_CP0_TLB_INDEX)
1199 && (vcpu->arch.gprs[rt] >=
1200 KVM_MIPS_GUEST_TLB_SIZE)) {
1201 kvm_err("Invalid TLB Index: %ld",
1202 vcpu->arch.gprs[rt]);
1206 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1208 * Preserve core number, and keep the exception
1209 * base in guest KSeg0.
1211 kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
1212 vcpu->arch.gprs[rt]);
1213 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1215 vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
1216 if (((kvm_read_c0_guest_entryhi(cop0) &
1217 KVM_ENTRYHI_ASID) != nasid)) {
1218 trace_kvm_asid_change(vcpu,
1219 kvm_read_c0_guest_entryhi(cop0)
1224 * Flush entries from the GVA page
1226 * Guest user page table will get
1227 * flushed lazily on re-entry to guest
1228 * user if the guest ASID actually
1231 kvm_mips_flush_gva_pt(kern_mm->pgd,
1235 * Regenerate/invalidate kernel MMU
1237 * The user MMU context will be
1238 * regenerated lazily on re-entry to
1239 * guest user if the guest ASID actually
1243 cpu = smp_processor_id();
1244 get_new_mmu_context(kern_mm, cpu);
1245 for_each_possible_cpu(i)
1247 cpu_context(i, kern_mm) = 0;
1250 kvm_write_c0_guest_entryhi(cop0,
1251 vcpu->arch.gprs[rt]);
1253 /* Are we writing to COUNT */
1254 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1255 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1257 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1258 /* If we are writing to COMPARE */
1259 /* Clear pending timer interrupt, if any */
1260 kvm_mips_write_compare(vcpu,
1261 vcpu->arch.gprs[rt],
1263 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1264 unsigned int old_val, val, change;
1266 old_val = kvm_read_c0_guest_status(cop0);
1267 val = vcpu->arch.gprs[rt];
1268 change = val ^ old_val;
1270 /* Make sure that the NMI bit is never set */
1274 * Don't allow CU1 or FR to be set unless FPU
1275 * capability enabled and exists in guest
1278 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1279 val &= ~(ST0_CU1 | ST0_FR);
1282 * Also don't allow FR to be set if host doesn't
1285 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1289 /* Handle changes in FPU mode */
1293 * FPU and Vector register state is made
1294 * UNPREDICTABLE by a change of FR, so don't
1295 * even bother saving it.
1297 if (change & ST0_FR)
1301 * If MSA state is already live, it is undefined
1302 * how it interacts with FR=0 FPU state, and we
1303 * don't want to hit reserved instruction
1304 * exceptions trying to save the MSA state later
1305 * when CU=1 && FR=1, so play it safe and save
1308 if (change & ST0_CU1 && !(val & ST0_FR) &&
1309 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1313 * Propagate CU1 (FPU enable) changes
1314 * immediately if the FPU context is already
1315 * loaded. When disabling we leave the context
1316 * loaded so it can be quickly enabled again in
1319 if (change & ST0_CU1 &&
1320 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1321 change_c0_status(ST0_CU1, val);
1325 kvm_write_c0_guest_status(cop0, val);
1327 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1329 * If FPU present, we need CU1/FR bits to take
1330 * effect fairly soon.
1332 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1333 kvm_mips_trans_mtc0(inst, opc, vcpu);
1335 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1336 unsigned int old_val, val, change, wrmask;
1338 old_val = kvm_read_c0_guest_config5(cop0);
1339 val = vcpu->arch.gprs[rt];
1341 /* Only a few bits are writable in Config5 */
1342 wrmask = kvm_mips_config5_wrmask(vcpu);
1343 change = (val ^ old_val) & wrmask;
1344 val = old_val ^ change;
1347 /* Handle changes in FPU/MSA modes */
1351 * Propagate FRE changes immediately if the FPU
1352 * context is already loaded.
1354 if (change & MIPS_CONF5_FRE &&
1355 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1356 change_c0_config5(MIPS_CONF5_FRE, val);
1359 * Propagate MSAEn changes immediately if the
1360 * MSA context is already loaded. When disabling
1361 * we leave the context loaded so it can be
1362 * quickly enabled again in the near future.
1364 if (change & MIPS_CONF5_MSAEN &&
1365 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1366 change_c0_config5(MIPS_CONF5_MSAEN,
1371 kvm_write_c0_guest_config5(cop0, val);
1372 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1373 u32 old_cause, new_cause;
1375 old_cause = kvm_read_c0_guest_cause(cop0);
1376 new_cause = vcpu->arch.gprs[rt];
1377 /* Update R/W bits */
1378 kvm_change_c0_guest_cause(cop0, 0x08800300,
1380 /* DC bit enabling/disabling timer? */
1381 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1382 if (new_cause & CAUSEF_DC)
1383 kvm_mips_count_disable_cause(vcpu);
1385 kvm_mips_count_enable_cause(vcpu);
1387 } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
1388 u32 mask = MIPS_HWRENA_CPUNUM |
1389 MIPS_HWRENA_SYNCISTEP |
1393 if (kvm_read_c0_guest_config3(cop0) &
1395 mask |= MIPS_HWRENA_ULR;
1396 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
1398 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1399 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1400 kvm_mips_trans_mtc0(inst, opc, vcpu);
1406 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1407 vcpu->arch.pc, rt, rd, sel);
1408 trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
1409 KVM_TRACE_COP0(rd, sel),
1410 vcpu->arch.gprs[rt]);
1415 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1416 cop0->stat[MIPS_CP0_STATUS][0]++;
1419 vcpu->arch.gprs[rt] =
1420 kvm_read_c0_guest_status(cop0);
1422 if (inst.mfmc0_format.sc) {
1423 kvm_debug("[%#lx] mfmc0_op: EI\n",
1425 kvm_set_c0_guest_status(cop0, ST0_IE);
1427 kvm_debug("[%#lx] mfmc0_op: DI\n",
1429 kvm_clear_c0_guest_status(cop0, ST0_IE);
1436 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1438 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1440 * We don't support any shadow register sets, so
1441 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1447 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1448 vcpu->arch.gprs[rt]);
1449 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1453 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1454 vcpu->arch.pc, inst.c0r_format.rs);
1461 /* Rollback PC only if emulation was unsuccessful */
1462 if (er == EMULATE_FAIL)
1463 vcpu->arch.pc = curr_pc;
1467 * This is for special instructions whose emulation
1468 * updates the PC, so do not overwrite the PC under
1475 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1477 struct kvm_run *run,
1478 struct kvm_vcpu *vcpu)
1480 enum emulation_result er = EMULATE_DO_MMIO;
1483 void *data = run->mmio.data;
1484 unsigned long curr_pc;
1487 * Update PC and hold onto current PC in case there is
1488 * an error and we want to rollback the PC
1490 curr_pc = vcpu->arch.pc;
1491 er = update_pc(vcpu, cause);
1492 if (er == EMULATE_FAIL)
1495 rt = inst.i_format.rt;
1497 switch (inst.i_format.opcode) {
1500 if (bytes > sizeof(run->mmio.data)) {
1501 kvm_err("%s: bad MMIO length: %d\n", __func__,
1504 run->mmio.phys_addr =
1505 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1507 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1511 run->mmio.len = bytes;
1512 run->mmio.is_write = 1;
1513 vcpu->mmio_needed = 1;
1514 vcpu->mmio_is_write = 1;
1515 *(u8 *) data = vcpu->arch.gprs[rt];
1516 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1517 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1524 if (bytes > sizeof(run->mmio.data)) {
1525 kvm_err("%s: bad MMIO length: %d\n", __func__,
1528 run->mmio.phys_addr =
1529 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1531 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1536 run->mmio.len = bytes;
1537 run->mmio.is_write = 1;
1538 vcpu->mmio_needed = 1;
1539 vcpu->mmio_is_write = 1;
1540 *(u32 *) data = vcpu->arch.gprs[rt];
1542 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1543 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1544 vcpu->arch.gprs[rt], *(u32 *) data);
1549 if (bytes > sizeof(run->mmio.data)) {
1550 kvm_err("%s: bad MMIO length: %d\n", __func__,
1553 run->mmio.phys_addr =
1554 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1556 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1561 run->mmio.len = bytes;
1562 run->mmio.is_write = 1;
1563 vcpu->mmio_needed = 1;
1564 vcpu->mmio_is_write = 1;
1565 *(u16 *) data = vcpu->arch.gprs[rt];
1567 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1568 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1569 vcpu->arch.gprs[rt], *(u32 *) data);
1573 kvm_err("Store not yet supported (inst=0x%08x)\n",
1579 /* Rollback PC if emulation was unsuccessful */
1580 if (er == EMULATE_FAIL)
1581 vcpu->arch.pc = curr_pc;
1586 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1587 u32 cause, struct kvm_run *run,
1588 struct kvm_vcpu *vcpu)
1590 enum emulation_result er = EMULATE_DO_MMIO;
1591 unsigned long curr_pc;
1595 rt = inst.i_format.rt;
1596 op = inst.i_format.opcode;
1599 * Find the resume PC now while we have safe and easy access to the
1600 * prior branch instruction, and save it for
1601 * kvm_mips_complete_mmio_load() to restore later.
1603 curr_pc = vcpu->arch.pc;
1604 er = update_pc(vcpu, cause);
1605 if (er == EMULATE_FAIL)
1607 vcpu->arch.io_pc = vcpu->arch.pc;
1608 vcpu->arch.pc = curr_pc;
1610 vcpu->arch.io_gpr = rt;
1615 if (bytes > sizeof(run->mmio.data)) {
1616 kvm_err("%s: bad MMIO length: %d\n", __func__,
1621 run->mmio.phys_addr =
1622 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1624 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1629 run->mmio.len = bytes;
1630 run->mmio.is_write = 0;
1631 vcpu->mmio_needed = 1;
1632 vcpu->mmio_is_write = 0;
1638 if (bytes > sizeof(run->mmio.data)) {
1639 kvm_err("%s: bad MMIO length: %d\n", __func__,
1644 run->mmio.phys_addr =
1645 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1647 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1652 run->mmio.len = bytes;
1653 run->mmio.is_write = 0;
1654 vcpu->mmio_needed = 1;
1655 vcpu->mmio_is_write = 0;
1658 vcpu->mmio_needed = 2;
1660 vcpu->mmio_needed = 1;
1667 if (bytes > sizeof(run->mmio.data)) {
1668 kvm_err("%s: bad MMIO length: %d\n", __func__,
1673 run->mmio.phys_addr =
1674 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1676 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1681 run->mmio.len = bytes;
1682 run->mmio.is_write = 0;
1683 vcpu->mmio_is_write = 0;
1686 vcpu->mmio_needed = 2;
1688 vcpu->mmio_needed = 1;
1693 kvm_err("Load not yet supported (inst=0x%08x)\n",
1702 static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
1703 unsigned long curr_pc,
1705 struct kvm_run *run,
1706 struct kvm_vcpu *vcpu,
1712 /* Carefully attempt the cache operation */
1713 kvm_trap_emul_gva_lockless_begin(vcpu);
1715 kvm_trap_emul_gva_lockless_end(vcpu);
1718 return EMULATE_DONE;
1721 * Try to handle the fault and retry, maybe we just raced with a
1724 switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) {
1727 /* bad virtual or physical address */
1728 return EMULATE_FAIL;
1730 /* no matching guest TLB */
1731 vcpu->arch.host_cp0_badvaddr = addr;
1732 vcpu->arch.pc = curr_pc;
1733 kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu);
1734 return EMULATE_EXCEPT;
1735 case KVM_MIPS_TLBINV:
1736 /* invalid matching guest TLB */
1737 vcpu->arch.host_cp0_badvaddr = addr;
1738 vcpu->arch.pc = curr_pc;
1739 kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu);
1740 return EMULATE_EXCEPT;
1747 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1748 u32 *opc, u32 cause,
1749 struct kvm_run *run,
1750 struct kvm_vcpu *vcpu)
1752 enum emulation_result er = EMULATE_DONE;
1753 u32 cache, op_inst, op, base;
1755 struct kvm_vcpu_arch *arch = &vcpu->arch;
1757 unsigned long curr_pc;
1760 * Update PC and hold onto current PC in case there is
1761 * an error and we want to rollback the PC
1763 curr_pc = vcpu->arch.pc;
1764 er = update_pc(vcpu, cause);
1765 if (er == EMULATE_FAIL)
1768 base = inst.i_format.rs;
1769 op_inst = inst.i_format.rt;
1770 if (cpu_has_mips_r6)
1771 offset = inst.spec3_format.simmediate;
1773 offset = inst.i_format.simmediate;
1774 cache = op_inst & CacheOp_Cache;
1775 op = op_inst & CacheOp_Op;
1777 va = arch->gprs[base] + offset;
1779 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1780 cache, op, base, arch->gprs[base], offset);
1783 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1784 * invalidate the caches entirely by stepping through all the
1787 if (op == Index_Writeback_Inv) {
1788 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1789 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1790 arch->gprs[base], offset);
1792 if (cache == Cache_D)
1794 else if (cache == Cache_I)
1797 kvm_err("%s: unsupported CACHE INDEX operation\n",
1799 return EMULATE_FAIL;
1802 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1803 kvm_mips_trans_cache_index(inst, opc, vcpu);
1808 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1809 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1811 * Perform the dcache part of icache synchronisation on the
1814 er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
1815 curr_pc, va, run, vcpu, cause);
1816 if (er != EMULATE_DONE)
1818 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1820 * Replace the CACHE instruction, with a SYNCI, not the same,
1823 kvm_mips_trans_cache_va(inst, opc, vcpu);
1825 } else if (op_inst == Hit_Invalidate_I) {
1826 /* Perform the icache synchronisation on the guest's behalf */
1827 er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
1828 curr_pc, va, run, vcpu, cause);
1829 if (er != EMULATE_DONE)
1831 er = kvm_mips_guest_cache_op(protected_flush_icache_line,
1832 curr_pc, va, run, vcpu, cause);
1833 if (er != EMULATE_DONE)
1836 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1837 /* Replace the CACHE instruction, with a SYNCI */
1838 kvm_mips_trans_cache_va(inst, opc, vcpu);
1841 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1842 cache, op, base, arch->gprs[base], offset);
1847 /* Rollback PC only if emulation was unsuccessful */
1848 if (er == EMULATE_FAIL)
1849 vcpu->arch.pc = curr_pc;
1850 /* Guest exception needs guest to resume */
1851 if (er == EMULATE_EXCEPT)
1857 enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
1858 struct kvm_run *run,
1859 struct kvm_vcpu *vcpu)
1861 union mips_instruction inst;
1862 enum emulation_result er = EMULATE_DONE;
1865 /* Fetch the instruction. */
1866 if (cause & CAUSEF_BD)
1868 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1870 return EMULATE_FAIL;
1872 switch (inst.r_format.opcode) {
1874 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1879 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1886 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1889 #ifndef CONFIG_CPU_MIPSR6
1891 ++vcpu->stat.cache_exits;
1892 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1893 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1897 switch (inst.spec3_format.func) {
1899 ++vcpu->stat.cache_exits;
1900 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1901 er = kvm_mips_emulate_cache(inst, opc, cause, run,
1912 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1914 kvm_arch_vcpu_dump_regs(vcpu);
1923 * kvm_mips_guest_exception_base() - Find guest exception vector base address.
1925 * Returns: The base address of the current guest exception vector, taking
1926 * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account.
1928 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
1930 struct mips_coproc *cop0 = vcpu->arch.cop0;
1932 if (kvm_read_c0_guest_status(cop0) & ST0_BEV)
1933 return KVM_GUEST_CKSEG1ADDR(0x1fc00200);
1935 return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE;
1938 enum emulation_result kvm_mips_emulate_syscall(u32 cause,
1940 struct kvm_run *run,
1941 struct kvm_vcpu *vcpu)
1943 struct mips_coproc *cop0 = vcpu->arch.cop0;
1944 struct kvm_vcpu_arch *arch = &vcpu->arch;
1945 enum emulation_result er = EMULATE_DONE;
1947 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1949 kvm_write_c0_guest_epc(cop0, arch->pc);
1950 kvm_set_c0_guest_status(cop0, ST0_EXL);
1952 if (cause & CAUSEF_BD)
1953 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1955 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1957 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1959 kvm_change_c0_guest_cause(cop0, (0xff),
1960 (EXCCODE_SYS << CAUSEB_EXCCODE));
1962 /* Set PC to the exception entry point */
1963 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
1966 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1973 enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
1975 struct kvm_run *run,
1976 struct kvm_vcpu *vcpu)
1978 struct mips_coproc *cop0 = vcpu->arch.cop0;
1979 struct kvm_vcpu_arch *arch = &vcpu->arch;
1980 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1981 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1983 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1985 kvm_write_c0_guest_epc(cop0, arch->pc);
1986 kvm_set_c0_guest_status(cop0, ST0_EXL);
1988 if (cause & CAUSEF_BD)
1989 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1991 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1993 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1996 /* set pc to the exception entry point */
1997 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
2000 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
2003 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2006 kvm_change_c0_guest_cause(cop0, (0xff),
2007 (EXCCODE_TLBL << CAUSEB_EXCCODE));
2009 /* setup badvaddr, context and entryhi registers for the guest */
2010 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2011 /* XXXKYMA: is the context register used by linux??? */
2012 kvm_write_c0_guest_entryhi(cop0, entryhi);
2014 return EMULATE_DONE;
2017 enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
2019 struct kvm_run *run,
2020 struct kvm_vcpu *vcpu)
2022 struct mips_coproc *cop0 = vcpu->arch.cop0;
2023 struct kvm_vcpu_arch *arch = &vcpu->arch;
2024 unsigned long entryhi =
2025 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2026 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2028 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2030 kvm_write_c0_guest_epc(cop0, arch->pc);
2031 kvm_set_c0_guest_status(cop0, ST0_EXL);
2033 if (cause & CAUSEF_BD)
2034 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2036 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2038 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
2041 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
2045 /* set pc to the exception entry point */
2046 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2048 kvm_change_c0_guest_cause(cop0, (0xff),
2049 (EXCCODE_TLBL << CAUSEB_EXCCODE));
2051 /* setup badvaddr, context and entryhi registers for the guest */
2052 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2053 /* XXXKYMA: is the context register used by linux??? */
2054 kvm_write_c0_guest_entryhi(cop0, entryhi);
2056 return EMULATE_DONE;
2059 enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
2061 struct kvm_run *run,
2062 struct kvm_vcpu *vcpu)
2064 struct mips_coproc *cop0 = vcpu->arch.cop0;
2065 struct kvm_vcpu_arch *arch = &vcpu->arch;
2066 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2067 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2069 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2071 kvm_write_c0_guest_epc(cop0, arch->pc);
2072 kvm_set_c0_guest_status(cop0, ST0_EXL);
2074 if (cause & CAUSEF_BD)
2075 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2077 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2079 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2082 /* Set PC to the exception entry point */
2083 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
2085 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2087 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2090 kvm_change_c0_guest_cause(cop0, (0xff),
2091 (EXCCODE_TLBS << CAUSEB_EXCCODE));
2093 /* setup badvaddr, context and entryhi registers for the guest */
2094 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2095 /* XXXKYMA: is the context register used by linux??? */
2096 kvm_write_c0_guest_entryhi(cop0, entryhi);
2098 return EMULATE_DONE;
2101 enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
2103 struct kvm_run *run,
2104 struct kvm_vcpu *vcpu)
2106 struct mips_coproc *cop0 = vcpu->arch.cop0;
2107 struct kvm_vcpu_arch *arch = &vcpu->arch;
2108 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2109 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2111 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2113 kvm_write_c0_guest_epc(cop0, arch->pc);
2114 kvm_set_c0_guest_status(cop0, ST0_EXL);
2116 if (cause & CAUSEF_BD)
2117 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2119 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2121 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2124 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2128 /* Set PC to the exception entry point */
2129 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2131 kvm_change_c0_guest_cause(cop0, (0xff),
2132 (EXCCODE_TLBS << CAUSEB_EXCCODE));
2134 /* setup badvaddr, context and entryhi registers for the guest */
2135 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2136 /* XXXKYMA: is the context register used by linux??? */
2137 kvm_write_c0_guest_entryhi(cop0, entryhi);
2139 return EMULATE_DONE;
2142 enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
2144 struct kvm_run *run,
2145 struct kvm_vcpu *vcpu)
2147 struct mips_coproc *cop0 = vcpu->arch.cop0;
2148 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2149 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2150 struct kvm_vcpu_arch *arch = &vcpu->arch;
2152 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2154 kvm_write_c0_guest_epc(cop0, arch->pc);
2155 kvm_set_c0_guest_status(cop0, ST0_EXL);
2157 if (cause & CAUSEF_BD)
2158 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2160 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2162 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2165 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2169 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2171 kvm_change_c0_guest_cause(cop0, (0xff),
2172 (EXCCODE_MOD << CAUSEB_EXCCODE));
2174 /* setup badvaddr, context and entryhi registers for the guest */
2175 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2176 /* XXXKYMA: is the context register used by linux??? */
2177 kvm_write_c0_guest_entryhi(cop0, entryhi);
2179 return EMULATE_DONE;
2182 enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
2184 struct kvm_run *run,
2185 struct kvm_vcpu *vcpu)
2187 struct mips_coproc *cop0 = vcpu->arch.cop0;
2188 struct kvm_vcpu_arch *arch = &vcpu->arch;
2190 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2192 kvm_write_c0_guest_epc(cop0, arch->pc);
2193 kvm_set_c0_guest_status(cop0, ST0_EXL);
2195 if (cause & CAUSEF_BD)
2196 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2198 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2202 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2204 kvm_change_c0_guest_cause(cop0, (0xff),
2205 (EXCCODE_CPU << CAUSEB_EXCCODE));
2206 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2208 return EMULATE_DONE;
2211 enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
2213 struct kvm_run *run,
2214 struct kvm_vcpu *vcpu)
2216 struct mips_coproc *cop0 = vcpu->arch.cop0;
2217 struct kvm_vcpu_arch *arch = &vcpu->arch;
2218 enum emulation_result er = EMULATE_DONE;
2220 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2222 kvm_write_c0_guest_epc(cop0, arch->pc);
2223 kvm_set_c0_guest_status(cop0, ST0_EXL);
2225 if (cause & CAUSEF_BD)
2226 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2228 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2230 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2232 kvm_change_c0_guest_cause(cop0, (0xff),
2233 (EXCCODE_RI << CAUSEB_EXCCODE));
2235 /* Set PC to the exception entry point */
2236 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2239 kvm_err("Trying to deliver RI when EXL is already set\n");
2246 enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
2248 struct kvm_run *run,
2249 struct kvm_vcpu *vcpu)
2251 struct mips_coproc *cop0 = vcpu->arch.cop0;
2252 struct kvm_vcpu_arch *arch = &vcpu->arch;
2253 enum emulation_result er = EMULATE_DONE;
2255 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2257 kvm_write_c0_guest_epc(cop0, arch->pc);
2258 kvm_set_c0_guest_status(cop0, ST0_EXL);
2260 if (cause & CAUSEF_BD)
2261 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2263 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2265 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2267 kvm_change_c0_guest_cause(cop0, (0xff),
2268 (EXCCODE_BP << CAUSEB_EXCCODE));
2270 /* Set PC to the exception entry point */
2271 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2274 kvm_err("Trying to deliver BP when EXL is already set\n");
2281 enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
2283 struct kvm_run *run,
2284 struct kvm_vcpu *vcpu)
2286 struct mips_coproc *cop0 = vcpu->arch.cop0;
2287 struct kvm_vcpu_arch *arch = &vcpu->arch;
2288 enum emulation_result er = EMULATE_DONE;
2290 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2292 kvm_write_c0_guest_epc(cop0, arch->pc);
2293 kvm_set_c0_guest_status(cop0, ST0_EXL);
2295 if (cause & CAUSEF_BD)
2296 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2298 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2300 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2302 kvm_change_c0_guest_cause(cop0, (0xff),
2303 (EXCCODE_TR << CAUSEB_EXCCODE));
2305 /* Set PC to the exception entry point */
2306 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2309 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2316 enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
2318 struct kvm_run *run,
2319 struct kvm_vcpu *vcpu)
2321 struct mips_coproc *cop0 = vcpu->arch.cop0;
2322 struct kvm_vcpu_arch *arch = &vcpu->arch;
2323 enum emulation_result er = EMULATE_DONE;
2325 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2327 kvm_write_c0_guest_epc(cop0, arch->pc);
2328 kvm_set_c0_guest_status(cop0, ST0_EXL);
2330 if (cause & CAUSEF_BD)
2331 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2333 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2335 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2337 kvm_change_c0_guest_cause(cop0, (0xff),
2338 (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2340 /* Set PC to the exception entry point */
2341 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2344 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2351 enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
2353 struct kvm_run *run,
2354 struct kvm_vcpu *vcpu)
2356 struct mips_coproc *cop0 = vcpu->arch.cop0;
2357 struct kvm_vcpu_arch *arch = &vcpu->arch;
2358 enum emulation_result er = EMULATE_DONE;
2360 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2362 kvm_write_c0_guest_epc(cop0, arch->pc);
2363 kvm_set_c0_guest_status(cop0, ST0_EXL);
2365 if (cause & CAUSEF_BD)
2366 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2368 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2370 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2372 kvm_change_c0_guest_cause(cop0, (0xff),
2373 (EXCCODE_FPE << CAUSEB_EXCCODE));
2375 /* Set PC to the exception entry point */
2376 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2379 kvm_err("Trying to deliver FPE when EXL is already set\n");
2386 enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
2388 struct kvm_run *run,
2389 struct kvm_vcpu *vcpu)
2391 struct mips_coproc *cop0 = vcpu->arch.cop0;
2392 struct kvm_vcpu_arch *arch = &vcpu->arch;
2393 enum emulation_result er = EMULATE_DONE;
2395 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2397 kvm_write_c0_guest_epc(cop0, arch->pc);
2398 kvm_set_c0_guest_status(cop0, ST0_EXL);
2400 if (cause & CAUSEF_BD)
2401 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2403 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2405 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2407 kvm_change_c0_guest_cause(cop0, (0xff),
2408 (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2410 /* Set PC to the exception entry point */
2411 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2414 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2421 enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2422 struct kvm_run *run,
2423 struct kvm_vcpu *vcpu)
2425 struct mips_coproc *cop0 = vcpu->arch.cop0;
2426 struct kvm_vcpu_arch *arch = &vcpu->arch;
2427 enum emulation_result er = EMULATE_DONE;
2428 unsigned long curr_pc;
2429 union mips_instruction inst;
2433 * Update PC and hold onto current PC in case there is
2434 * an error and we want to rollback the PC
2436 curr_pc = vcpu->arch.pc;
2437 er = update_pc(vcpu, cause);
2438 if (er == EMULATE_FAIL)
2441 /* Fetch the instruction. */
2442 if (cause & CAUSEF_BD)
2444 err = kvm_get_badinstr(opc, vcpu, &inst.word);
2446 kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err);
2447 return EMULATE_FAIL;
2450 if (inst.r_format.opcode == spec3_op &&
2451 inst.r_format.func == rdhwr_op &&
2452 inst.r_format.rs == 0 &&
2453 (inst.r_format.re >> 3) == 0) {
2454 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2455 int rd = inst.r_format.rd;
2456 int rt = inst.r_format.rt;
2457 int sel = inst.r_format.re & 0x7;
2459 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2460 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2461 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2466 case MIPS_HWR_CPUNUM: /* CPU number */
2467 arch->gprs[rt] = vcpu->vcpu_id;
2469 case MIPS_HWR_SYNCISTEP: /* SYNCI length */
2470 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2471 current_cpu_data.icache.linesz);
2473 case MIPS_HWR_CC: /* Read count register */
2474 arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
2476 case MIPS_HWR_CCRES: /* Count register resolution */
2477 switch (current_cpu_data.cputype) {
2486 case MIPS_HWR_ULR: /* Read UserLocal register */
2487 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2491 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2495 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
2496 vcpu->arch.gprs[rt]);
2498 kvm_debug("Emulate RI not supported @ %p: %#x\n",
2503 return EMULATE_DONE;
2507 * Rollback PC (if in branch delay slot then the PC already points to
2508 * branch target), and pass the RI exception to the guest OS.
2510 vcpu->arch.pc = curr_pc;
2511 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2514 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2515 struct kvm_run *run)
2517 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2518 enum emulation_result er = EMULATE_DONE;
2520 if (run->mmio.len > sizeof(*gpr)) {
2521 kvm_err("Bad MMIO length: %d", run->mmio.len);
2526 /* Restore saved resume PC */
2527 vcpu->arch.pc = vcpu->arch.io_pc;
2529 switch (run->mmio.len) {
2531 *gpr = *(s32 *) run->mmio.data;
2535 if (vcpu->mmio_needed == 2)
2536 *gpr = *(s16 *) run->mmio.data;
2538 *gpr = *(u16 *)run->mmio.data;
2542 if (vcpu->mmio_needed == 2)
2543 *gpr = *(s8 *) run->mmio.data;
2545 *gpr = *(u8 *) run->mmio.data;
2553 static enum emulation_result kvm_mips_emulate_exc(u32 cause,
2555 struct kvm_run *run,
2556 struct kvm_vcpu *vcpu)
2558 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2559 struct mips_coproc *cop0 = vcpu->arch.cop0;
2560 struct kvm_vcpu_arch *arch = &vcpu->arch;
2561 enum emulation_result er = EMULATE_DONE;
2563 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2565 kvm_write_c0_guest_epc(cop0, arch->pc);
2566 kvm_set_c0_guest_status(cop0, ST0_EXL);
2568 if (cause & CAUSEF_BD)
2569 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2571 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2573 kvm_change_c0_guest_cause(cop0, (0xff),
2574 (exccode << CAUSEB_EXCCODE));
2576 /* Set PC to the exception entry point */
2577 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2578 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2580 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2581 exccode, kvm_read_c0_guest_epc(cop0),
2582 kvm_read_c0_guest_badvaddr(cop0));
2584 kvm_err("Trying to deliver EXC when EXL is already set\n");
2591 enum emulation_result kvm_mips_check_privilege(u32 cause,
2593 struct kvm_run *run,
2594 struct kvm_vcpu *vcpu)
2596 enum emulation_result er = EMULATE_DONE;
2597 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2598 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2600 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2609 case EXCCODE_MSAFPE:
2611 case EXCCODE_MSADIS:
2615 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2616 er = EMULATE_PRIV_FAIL;
2624 * We we are accessing Guest kernel space, then send an
2625 * address error exception to the guest
2627 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2628 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2631 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2632 er = EMULATE_PRIV_FAIL;
2638 * We we are accessing Guest kernel space, then send an
2639 * address error exception to the guest
2641 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2642 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2645 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2646 er = EMULATE_PRIV_FAIL;
2651 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2653 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2655 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2657 er = EMULATE_PRIV_FAIL;
2660 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2662 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2664 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2666 er = EMULATE_PRIV_FAIL;
2669 er = EMULATE_PRIV_FAIL;
2674 if (er == EMULATE_PRIV_FAIL)
2675 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2681 * User Address (UA) fault, this could happen if
2682 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2683 * case we pass on the fault to the guest kernel and let it handle it.
2684 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2685 * case we inject the TLB from the Guest TLB into the shadow host TLB
2687 enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2689 struct kvm_run *run,
2690 struct kvm_vcpu *vcpu,
2693 enum emulation_result er = EMULATE_DONE;
2694 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2695 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2698 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2699 vcpu->arch.host_cp0_badvaddr);
2702 * KVM would not have got the exception if this entry was valid in the
2703 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2704 * send the guest an exception. The guest exc handler should then inject
2705 * an entry into the guest TLB.
2707 index = kvm_mips_guest_tlb_lookup(vcpu,
2709 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
2712 if (exccode == EXCCODE_TLBL) {
2713 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2714 } else if (exccode == EXCCODE_TLBS) {
2715 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2717 kvm_err("%s: invalid exc code: %d\n", __func__,
2722 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2725 * Check if the entry is valid, if not then setup a TLB invalid
2726 * exception to the guest
2728 if (!TLB_IS_VALID(*tlb, va)) {
2729 if (exccode == EXCCODE_TLBL) {
2730 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2732 } else if (exccode == EXCCODE_TLBS) {
2733 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2736 kvm_err("%s: invalid exc code: %d\n", __func__,
2741 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2742 tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
2744 * OK we have a Guest TLB entry, now inject it into the
2747 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va,
2749 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2750 __func__, va, index, vcpu,