2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/kvm_host.h>
27 #include <linux/gfp.h>
28 #include <linux/module.h>
29 #include <linux/vmalloc.h>
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/dbell.h>
37 #include <asm/hw_irq.h>
44 #define CREATE_TRACE_POINTS
45 #include "trace_booke.h"
47 unsigned long kvmppc_booke_handlers;
49 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
50 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
52 struct kvm_stats_debugfs_item debugfs_entries[] = {
53 { "mmio", VCPU_STAT(mmio_exits) },
54 { "dcr", VCPU_STAT(dcr_exits) },
55 { "sig", VCPU_STAT(signal_exits) },
56 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
57 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
58 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
59 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
60 { "sysc", VCPU_STAT(syscall_exits) },
61 { "isi", VCPU_STAT(isi_exits) },
62 { "dsi", VCPU_STAT(dsi_exits) },
63 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
64 { "dec", VCPU_STAT(dec_exits) },
65 { "ext_intr", VCPU_STAT(ext_intr_exits) },
66 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
67 { "doorbell", VCPU_STAT(dbell_exits) },
68 { "guest doorbell", VCPU_STAT(gdbell_exits) },
69 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
73 /* TODO: use vcpu_printf() */
74 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
78 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
79 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
80 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
81 vcpu->arch.shared->srr1);
83 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
85 for (i = 0; i < 32; i += 4) {
86 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
87 kvmppc_get_gpr(vcpu, i),
88 kvmppc_get_gpr(vcpu, i+1),
89 kvmppc_get_gpr(vcpu, i+2),
90 kvmppc_get_gpr(vcpu, i+3));
95 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
99 kvmppc_save_guest_spe(vcpu);
100 vcpu->arch.shadow_msr &= ~MSR_SPE;
104 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
108 kvmppc_load_guest_spe(vcpu);
109 vcpu->arch.shadow_msr |= MSR_SPE;
113 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
115 if (vcpu->arch.shared->msr & MSR_SPE) {
116 if (!(vcpu->arch.shadow_msr & MSR_SPE))
117 kvmppc_vcpu_enable_spe(vcpu);
118 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
119 kvmppc_vcpu_disable_spe(vcpu);
123 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
128 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
130 #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
131 /* We always treat the FP bit as enabled from the host
132 perspective, so only need to adjust the shadow MSR */
133 vcpu->arch.shadow_msr &= ~MSR_FP;
134 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
138 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
140 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
141 #ifndef CONFIG_KVM_BOOKE_HV
142 vcpu->arch.shadow_msr &= ~MSR_DE;
143 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
146 /* Force enable debug interrupts when user space wants to debug */
147 if (vcpu->guest_debug) {
148 #ifdef CONFIG_KVM_BOOKE_HV
150 * Since there is no shadow MSR, sync MSR_DE into the guest
153 vcpu->arch.shared->msr |= MSR_DE;
155 vcpu->arch.shadow_msr |= MSR_DE;
156 vcpu->arch.shared->msr &= ~MSR_DE;
162 * Helper function for "full" MSR writes. No need to call this if only
163 * EE/CE/ME/DE/RI are changing.
165 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
167 u32 old_msr = vcpu->arch.shared->msr;
169 #ifdef CONFIG_KVM_BOOKE_HV
173 vcpu->arch.shared->msr = new_msr;
175 kvmppc_mmu_msr_notify(vcpu, old_msr);
176 kvmppc_vcpu_sync_spe(vcpu);
177 kvmppc_vcpu_sync_fpu(vcpu);
178 kvmppc_vcpu_sync_debug(vcpu);
181 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
182 unsigned int priority)
184 trace_kvm_booke_queue_irqprio(vcpu, priority);
185 set_bit(priority, &vcpu->arch.pending_exceptions);
188 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
189 ulong dear_flags, ulong esr_flags)
191 vcpu->arch.queued_dear = dear_flags;
192 vcpu->arch.queued_esr = esr_flags;
193 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
196 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
197 ulong dear_flags, ulong esr_flags)
199 vcpu->arch.queued_dear = dear_flags;
200 vcpu->arch.queued_esr = esr_flags;
201 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
204 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
207 vcpu->arch.queued_esr = esr_flags;
208 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
211 static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
214 vcpu->arch.queued_dear = dear_flags;
215 vcpu->arch.queued_esr = esr_flags;
216 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
219 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
221 vcpu->arch.queued_esr = esr_flags;
222 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
225 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
227 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
230 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
232 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
235 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
237 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
240 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
241 struct kvm_interrupt *irq)
243 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
245 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
246 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
248 kvmppc_booke_queue_irqprio(vcpu, prio);
251 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
253 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
254 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
257 static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
259 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
262 static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
264 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
267 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
269 kvmppc_set_srr0(vcpu, srr0);
270 kvmppc_set_srr1(vcpu, srr1);
273 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
275 vcpu->arch.csrr0 = srr0;
276 vcpu->arch.csrr1 = srr1;
279 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
281 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
282 vcpu->arch.dsrr0 = srr0;
283 vcpu->arch.dsrr1 = srr1;
285 set_guest_csrr(vcpu, srr0, srr1);
289 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
291 vcpu->arch.mcsrr0 = srr0;
292 vcpu->arch.mcsrr1 = srr1;
295 static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
297 #ifdef CONFIG_KVM_BOOKE_HV
298 return mfspr(SPRN_GDEAR);
300 return vcpu->arch.shared->dar;
304 static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
306 #ifdef CONFIG_KVM_BOOKE_HV
307 mtspr(SPRN_GDEAR, dear);
309 vcpu->arch.shared->dar = dear;
313 static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
315 #ifdef CONFIG_KVM_BOOKE_HV
316 return mfspr(SPRN_GESR);
318 return vcpu->arch.shared->esr;
322 static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
324 #ifdef CONFIG_KVM_BOOKE_HV
325 mtspr(SPRN_GESR, esr);
327 vcpu->arch.shared->esr = esr;
331 static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
333 #ifdef CONFIG_KVM_BOOKE_HV
334 return mfspr(SPRN_GEPR);
336 return vcpu->arch.epr;
340 /* Deliver the interrupt of the corresponding priority, if possible. */
341 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
342 unsigned int priority)
346 bool update_esr = false, update_dear = false, update_epr = false;
347 ulong crit_raw = vcpu->arch.shared->critical;
348 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
350 bool keep_irq = false;
351 enum int_class int_class;
352 ulong new_msr = vcpu->arch.shared->msr;
354 /* Truncate crit indicators in 32 bit mode */
355 if (!(vcpu->arch.shared->msr & MSR_SF)) {
356 crit_raw &= 0xffffffff;
357 crit_r1 &= 0xffffffff;
360 /* Critical section when crit == r1 */
361 crit = (crit_raw == crit_r1);
362 /* ... and we're in supervisor mode */
363 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
365 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
366 priority = BOOKE_IRQPRIO_EXTERNAL;
370 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
374 case BOOKE_IRQPRIO_DTLB_MISS:
375 case BOOKE_IRQPRIO_DATA_STORAGE:
376 case BOOKE_IRQPRIO_ALIGNMENT:
379 case BOOKE_IRQPRIO_INST_STORAGE:
380 case BOOKE_IRQPRIO_PROGRAM:
383 case BOOKE_IRQPRIO_ITLB_MISS:
384 case BOOKE_IRQPRIO_SYSCALL:
385 case BOOKE_IRQPRIO_FP_UNAVAIL:
386 case BOOKE_IRQPRIO_SPE_UNAVAIL:
387 case BOOKE_IRQPRIO_SPE_FP_DATA:
388 case BOOKE_IRQPRIO_SPE_FP_ROUND:
389 case BOOKE_IRQPRIO_AP_UNAVAIL:
391 msr_mask = MSR_CE | MSR_ME | MSR_DE;
392 int_class = INT_CLASS_NONCRIT;
394 case BOOKE_IRQPRIO_WATCHDOG:
395 case BOOKE_IRQPRIO_CRITICAL:
396 case BOOKE_IRQPRIO_DBELL_CRIT:
397 allowed = vcpu->arch.shared->msr & MSR_CE;
398 allowed = allowed && !crit;
400 int_class = INT_CLASS_CRIT;
402 case BOOKE_IRQPRIO_MACHINE_CHECK:
403 allowed = vcpu->arch.shared->msr & MSR_ME;
404 allowed = allowed && !crit;
405 int_class = INT_CLASS_MC;
407 case BOOKE_IRQPRIO_DECREMENTER:
408 case BOOKE_IRQPRIO_FIT:
411 case BOOKE_IRQPRIO_EXTERNAL:
412 case BOOKE_IRQPRIO_DBELL:
413 allowed = vcpu->arch.shared->msr & MSR_EE;
414 allowed = allowed && !crit;
415 msr_mask = MSR_CE | MSR_ME | MSR_DE;
416 int_class = INT_CLASS_NONCRIT;
418 case BOOKE_IRQPRIO_DEBUG:
419 allowed = vcpu->arch.shared->msr & MSR_DE;
420 allowed = allowed && !crit;
422 int_class = INT_CLASS_CRIT;
428 case INT_CLASS_NONCRIT:
429 set_guest_srr(vcpu, vcpu->arch.pc,
430 vcpu->arch.shared->msr);
433 set_guest_csrr(vcpu, vcpu->arch.pc,
434 vcpu->arch.shared->msr);
437 set_guest_dsrr(vcpu, vcpu->arch.pc,
438 vcpu->arch.shared->msr);
441 set_guest_mcsrr(vcpu, vcpu->arch.pc,
442 vcpu->arch.shared->msr);
446 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
447 if (update_esr == true)
448 set_guest_esr(vcpu, vcpu->arch.queued_esr);
449 if (update_dear == true)
450 set_guest_dear(vcpu, vcpu->arch.queued_dear);
451 if (update_epr == true) {
452 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
453 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
454 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
455 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
456 kvmppc_mpic_set_epr(vcpu);
461 #if defined(CONFIG_64BIT)
462 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
465 kvmppc_set_msr(vcpu, new_msr);
468 clear_bit(priority, &vcpu->arch.pending_exceptions);
471 #ifdef CONFIG_KVM_BOOKE_HV
473 * If an interrupt is pending but masked, raise a guest doorbell
474 * so that we are notified when the guest enables the relevant
477 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
478 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
479 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
480 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
481 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
482 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
489 * Return the number of jiffies until the next timeout. If the timeout is
490 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
491 * because the larger value can break the timer APIs.
493 static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
495 u64 tb, wdt_tb, wdt_ticks = 0;
497 u32 period = TCR_GET_WP(vcpu->arch.tcr);
499 wdt_tb = 1ULL << (63 - period);
502 * The watchdog timeout will hapeen when TB bit corresponding
503 * to watchdog will toggle from 0 to 1.
508 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
510 /* Convert timebase ticks to jiffies */
511 nr_jiffies = wdt_ticks;
513 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
516 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
519 static void arm_next_watchdog(struct kvm_vcpu *vcpu)
521 unsigned long nr_jiffies;
525 * If TSR_ENW and TSR_WIS are not set then no need to exit to
526 * userspace, so clear the KVM_REQ_WATCHDOG request.
528 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
529 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
531 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
532 nr_jiffies = watchdog_next_timeout(vcpu);
534 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
535 * then do not run the watchdog timer as this can break timer APIs.
537 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
538 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
540 del_timer(&vcpu->arch.wdt_timer);
541 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
544 void kvmppc_watchdog_func(unsigned long data)
546 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
551 new_tsr = tsr = vcpu->arch.tsr;
559 new_tsr = tsr | TSR_WIS;
561 new_tsr = tsr | TSR_ENW;
563 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
565 if (new_tsr & TSR_WIS) {
567 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
572 * If this is final watchdog expiry and some action is required
573 * then exit to userspace.
575 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
576 vcpu->arch.watchdog_enabled) {
578 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
583 * Stop running the watchdog timer after final expiration to
584 * prevent the host from being flooded with timers if the
585 * guest sets a short period.
586 * Timers will resume when TSR/TCR is updated next time.
589 arm_next_watchdog(vcpu);
592 static void update_timer_ints(struct kvm_vcpu *vcpu)
594 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
595 kvmppc_core_queue_dec(vcpu);
597 kvmppc_core_dequeue_dec(vcpu);
599 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
600 kvmppc_core_queue_watchdog(vcpu);
602 kvmppc_core_dequeue_watchdog(vcpu);
605 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
607 unsigned long *pending = &vcpu->arch.pending_exceptions;
608 unsigned int priority;
610 priority = __ffs(*pending);
611 while (priority < BOOKE_IRQPRIO_MAX) {
612 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
615 priority = find_next_bit(pending,
616 BITS_PER_BYTE * sizeof(*pending),
620 /* Tell the guest about our interrupt status */
621 vcpu->arch.shared->int_pending = !!*pending;
624 /* Check pending exceptions and deliver one, if possible. */
625 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
628 WARN_ON_ONCE(!irqs_disabled());
630 kvmppc_core_check_exceptions(vcpu);
632 if (vcpu->requests) {
633 /* Exception delivery raised request; start over */
637 if (vcpu->arch.shared->msr & MSR_WE) {
639 kvm_vcpu_block(vcpu);
640 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
643 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
650 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
652 int r = 1; /* Indicate we want to get back into the guest */
654 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
655 update_timer_ints(vcpu);
656 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
657 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
658 kvmppc_core_flush_tlb(vcpu);
661 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
662 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
666 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
667 vcpu->run->epr.epr = 0;
668 vcpu->arch.epr_needed = true;
669 vcpu->run->exit_reason = KVM_EXIT_EPR;
676 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
679 struct debug_reg debug;
681 if (!vcpu->arch.sane) {
682 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
686 s = kvmppc_prepare_to_enter(vcpu);
691 /* interrupts now hard-disabled */
693 #ifdef CONFIG_PPC_FPU
694 /* Save userspace FPU state in stack */
698 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
699 * as always using the FPU. Kernel usage of FP (via
700 * enable_kernel_fp()) in this thread must not occur while
701 * vcpu->fpu_active is set.
703 vcpu->fpu_active = 1;
705 kvmppc_load_guest_fp(vcpu);
708 /* Switch to guest debug context */
709 debug = vcpu->arch.shadow_dbg_reg;
710 switch_booke_debug_regs(&debug);
711 debug = current->thread.debug;
712 current->thread.debug = vcpu->arch.shadow_dbg_reg;
714 vcpu->arch.pgdir = current->mm->pgd;
715 kvmppc_fix_ee_before_entry();
717 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
719 /* No need for kvm_guest_exit. It's done in handle_exit.
720 We also get here with interrupts enabled. */
722 /* Switch back to user space debug context */
723 switch_booke_debug_regs(&debug);
724 current->thread.debug = debug;
726 #ifdef CONFIG_PPC_FPU
727 kvmppc_save_guest_fp(vcpu);
729 vcpu->fpu_active = 0;
733 vcpu->mode = OUTSIDE_GUEST_MODE;
737 static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
739 enum emulation_result er;
741 er = kvmppc_emulate_instruction(run, vcpu);
744 /* don't overwrite subtypes, just account kvm_stats */
745 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
746 /* Future optimization: only reload non-volatiles if
747 * they were actually modified by emulation. */
748 return RESUME_GUEST_NV;
751 run->exit_reason = KVM_EXIT_DCR;
755 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
756 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
757 /* For debugging, encode the failing instruction and
758 * report it to userspace. */
759 run->hw.hardware_exit_reason = ~0ULL << 32;
760 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
761 kvmppc_core_queue_program(vcpu, ESR_PIL);
764 case EMULATE_EXIT_USER:
772 static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
774 struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
775 u32 dbsr = vcpu->arch.dbsr;
777 run->debug.arch.status = 0;
778 run->debug.arch.address = vcpu->arch.pc;
780 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
781 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
783 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
784 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
785 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
786 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
787 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
788 run->debug.arch.address = dbg_reg->dac1;
789 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
790 run->debug.arch.address = dbg_reg->dac2;
796 static void kvmppc_fill_pt_regs(struct pt_regs *regs)
798 ulong r1, ip, msr, lr;
800 asm("mr %0, 1" : "=r"(r1));
801 asm("mflr %0" : "=r"(lr));
802 asm("mfmsr %0" : "=r"(msr));
803 asm("bl 1f; 1: mflr %0" : "=r"(ip));
805 memset(regs, 0, sizeof(*regs));
813 * For interrupts needed to be handled by host interrupt handlers,
814 * corresponding host handler are called from here in similar way
815 * (but not exact) as they are called from low level handler
816 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
818 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
819 unsigned int exit_nr)
824 case BOOKE_INTERRUPT_EXTERNAL:
825 kvmppc_fill_pt_regs(®s);
828 case BOOKE_INTERRUPT_DECREMENTER:
829 kvmppc_fill_pt_regs(®s);
830 timer_interrupt(®s);
832 #if defined(CONFIG_PPC_DOORBELL)
833 case BOOKE_INTERRUPT_DOORBELL:
834 kvmppc_fill_pt_regs(®s);
835 doorbell_exception(®s);
838 case BOOKE_INTERRUPT_MACHINE_CHECK:
841 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
842 kvmppc_fill_pt_regs(®s);
843 performance_monitor_exception(®s);
845 case BOOKE_INTERRUPT_WATCHDOG:
846 kvmppc_fill_pt_regs(®s);
847 #ifdef CONFIG_BOOKE_WDT
848 WatchdogException(®s);
850 unknown_exception(®s);
853 case BOOKE_INTERRUPT_CRITICAL:
854 unknown_exception(®s);
856 case BOOKE_INTERRUPT_DEBUG:
857 /* Save DBSR before preemption is enabled */
858 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
867 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
869 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
870 unsigned int exit_nr)
876 /* update before a new last_exit_type is rewritten */
877 kvmppc_update_timing_stats(vcpu);
879 /* restart interrupts if they were meant for the host */
880 kvmppc_restart_interrupt(vcpu, exit_nr);
884 trace_kvm_exit(exit_nr, vcpu);
887 run->exit_reason = KVM_EXIT_UNKNOWN;
888 run->ready_for_interrupt_injection = 1;
891 case BOOKE_INTERRUPT_MACHINE_CHECK:
892 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
893 kvmppc_dump_vcpu(vcpu);
894 /* For debugging, send invalid exit reason to user space */
895 run->hw.hardware_exit_reason = ~1ULL << 32;
896 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
900 case BOOKE_INTERRUPT_EXTERNAL:
901 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
905 case BOOKE_INTERRUPT_DECREMENTER:
906 kvmppc_account_exit(vcpu, DEC_EXITS);
910 case BOOKE_INTERRUPT_WATCHDOG:
914 case BOOKE_INTERRUPT_DOORBELL:
915 kvmppc_account_exit(vcpu, DBELL_EXITS);
919 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
920 kvmppc_account_exit(vcpu, GDBELL_EXITS);
923 * We are here because there is a pending guest interrupt
924 * which could not be delivered as MSR_CE or MSR_ME was not
925 * set. Once we break from here we will retry delivery.
930 case BOOKE_INTERRUPT_GUEST_DBELL:
931 kvmppc_account_exit(vcpu, GDBELL_EXITS);
934 * We are here because there is a pending guest interrupt
935 * which could not be delivered as MSR_EE was not set. Once
936 * we break from here we will retry delivery.
941 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
945 case BOOKE_INTERRUPT_HV_PRIV:
946 r = emulation_exit(run, vcpu);
949 case BOOKE_INTERRUPT_PROGRAM:
950 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
952 * Program traps generated by user-level software must
953 * be handled by the guest kernel.
955 * In GS mode, hypervisor privileged instructions trap
956 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
957 * actual program interrupts, handled by the guest.
959 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
961 kvmppc_account_exit(vcpu, USR_PR_INST);
965 r = emulation_exit(run, vcpu);
968 case BOOKE_INTERRUPT_FP_UNAVAIL:
969 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
970 kvmppc_account_exit(vcpu, FP_UNAVAIL);
975 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
976 if (vcpu->arch.shared->msr & MSR_SPE)
977 kvmppc_vcpu_enable_spe(vcpu);
979 kvmppc_booke_queue_irqprio(vcpu,
980 BOOKE_IRQPRIO_SPE_UNAVAIL);
985 case BOOKE_INTERRUPT_SPE_FP_DATA:
986 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
990 case BOOKE_INTERRUPT_SPE_FP_ROUND:
991 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
995 case BOOKE_INTERRUPT_SPE_UNAVAIL:
997 * Guest wants SPE, but host kernel doesn't support it. Send
998 * an "unimplemented operation" program check to the guest.
1000 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1005 * These really should never happen without CONFIG_SPE,
1006 * as we should never enable the real MSR[SPE] in the guest.
1008 case BOOKE_INTERRUPT_SPE_FP_DATA:
1009 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1010 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1011 __func__, exit_nr, vcpu->arch.pc);
1012 run->hw.hardware_exit_reason = exit_nr;
1017 case BOOKE_INTERRUPT_DATA_STORAGE:
1018 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1019 vcpu->arch.fault_esr);
1020 kvmppc_account_exit(vcpu, DSI_EXITS);
1024 case BOOKE_INTERRUPT_INST_STORAGE:
1025 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
1026 kvmppc_account_exit(vcpu, ISI_EXITS);
1030 case BOOKE_INTERRUPT_ALIGNMENT:
1031 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1032 vcpu->arch.fault_esr);
1036 #ifdef CONFIG_KVM_BOOKE_HV
1037 case BOOKE_INTERRUPT_HV_SYSCALL:
1038 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1039 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1042 * hcall from guest userspace -- send privileged
1043 * instruction program check.
1045 kvmppc_core_queue_program(vcpu, ESR_PPR);
1051 case BOOKE_INTERRUPT_SYSCALL:
1052 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1053 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1054 /* KVM PV hypercalls */
1055 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1058 /* Guest syscalls */
1059 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1061 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
1066 case BOOKE_INTERRUPT_DTLB_MISS: {
1067 unsigned long eaddr = vcpu->arch.fault_dear;
1072 #ifdef CONFIG_KVM_E500V2
1073 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1074 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1075 kvmppc_map_magic(vcpu);
1076 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1083 /* Check the guest TLB. */
1084 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1085 if (gtlb_index < 0) {
1086 /* The guest didn't have a mapping for it. */
1087 kvmppc_core_queue_dtlb_miss(vcpu,
1088 vcpu->arch.fault_dear,
1089 vcpu->arch.fault_esr);
1090 kvmppc_mmu_dtlb_miss(vcpu);
1091 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
1096 idx = srcu_read_lock(&vcpu->kvm->srcu);
1098 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1099 gfn = gpaddr >> PAGE_SHIFT;
1101 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1102 /* The guest TLB had a mapping, but the shadow TLB
1103 * didn't, and it is RAM. This could be because:
1104 * a) the entry is mapping the host kernel, or
1105 * b) the guest used a large mapping which we're faking
1106 * Either way, we need to satisfy the fault without
1107 * invoking the guest. */
1108 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1109 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1112 /* Guest has mapped and accessed a page which is not
1114 vcpu->arch.paddr_accessed = gpaddr;
1115 vcpu->arch.vaddr_accessed = eaddr;
1116 r = kvmppc_emulate_mmio(run, vcpu);
1117 kvmppc_account_exit(vcpu, MMIO_EXITS);
1120 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1124 case BOOKE_INTERRUPT_ITLB_MISS: {
1125 unsigned long eaddr = vcpu->arch.pc;
1132 /* Check the guest TLB. */
1133 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1134 if (gtlb_index < 0) {
1135 /* The guest didn't have a mapping for it. */
1136 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
1137 kvmppc_mmu_itlb_miss(vcpu);
1138 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
1142 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1144 idx = srcu_read_lock(&vcpu->kvm->srcu);
1146 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1147 gfn = gpaddr >> PAGE_SHIFT;
1149 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1150 /* The guest TLB had a mapping, but the shadow TLB
1151 * didn't. This could be because:
1152 * a) the entry is mapping the host kernel, or
1153 * b) the guest used a large mapping which we're faking
1154 * Either way, we need to satisfy the fault without
1155 * invoking the guest. */
1156 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1158 /* Guest mapped and leaped at non-RAM! */
1159 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1162 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1166 case BOOKE_INTERRUPT_DEBUG: {
1167 r = kvmppc_handle_debug(run, vcpu);
1168 if (r == RESUME_HOST)
1169 run->exit_reason = KVM_EXIT_DEBUG;
1170 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1175 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1180 * To avoid clobbering exit_reason, only check for signals if we
1181 * aren't already exiting to userspace for some other reason.
1183 if (!(r & RESUME_HOST)) {
1184 s = kvmppc_prepare_to_enter(vcpu);
1186 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1188 /* interrupts now hard-disabled */
1189 kvmppc_fix_ee_before_entry();
1196 static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1198 u32 old_tsr = vcpu->arch.tsr;
1200 vcpu->arch.tsr = new_tsr;
1202 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1203 arm_next_watchdog(vcpu);
1205 update_timer_ints(vcpu);
1208 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1209 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1215 vcpu->arch.shared->pir = vcpu->vcpu_id;
1216 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
1217 kvmppc_set_msr(vcpu, 0);
1219 #ifndef CONFIG_KVM_BOOKE_HV
1220 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
1221 vcpu->arch.shadow_pid = 1;
1222 vcpu->arch.shared->msr = 0;
1225 /* Eye-catching numbers so we know if the guest takes an interrupt
1226 * before it's programmed its own IVPR/IVORs. */
1227 vcpu->arch.ivpr = 0x55550000;
1228 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1229 vcpu->arch.ivor[i] = 0x7700 | i * 4;
1231 kvmppc_init_timing_stats(vcpu);
1233 r = kvmppc_core_vcpu_setup(vcpu);
1234 kvmppc_sanity_check(vcpu);
1238 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1240 /* setup watchdog timer once */
1241 spin_lock_init(&vcpu->arch.wdt_lock);
1242 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1243 (unsigned long)vcpu);
1248 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1250 del_timer_sync(&vcpu->arch.wdt_timer);
1253 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1257 regs->pc = vcpu->arch.pc;
1258 regs->cr = kvmppc_get_cr(vcpu);
1259 regs->ctr = vcpu->arch.ctr;
1260 regs->lr = vcpu->arch.lr;
1261 regs->xer = kvmppc_get_xer(vcpu);
1262 regs->msr = vcpu->arch.shared->msr;
1263 regs->srr0 = kvmppc_get_srr0(vcpu);
1264 regs->srr1 = kvmppc_get_srr1(vcpu);
1265 regs->pid = vcpu->arch.pid;
1266 regs->sprg0 = vcpu->arch.shared->sprg0;
1267 regs->sprg1 = vcpu->arch.shared->sprg1;
1268 regs->sprg2 = vcpu->arch.shared->sprg2;
1269 regs->sprg3 = vcpu->arch.shared->sprg3;
1270 regs->sprg4 = vcpu->arch.shared->sprg4;
1271 regs->sprg5 = vcpu->arch.shared->sprg5;
1272 regs->sprg6 = vcpu->arch.shared->sprg6;
1273 regs->sprg7 = vcpu->arch.shared->sprg7;
1275 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1276 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1281 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1285 vcpu->arch.pc = regs->pc;
1286 kvmppc_set_cr(vcpu, regs->cr);
1287 vcpu->arch.ctr = regs->ctr;
1288 vcpu->arch.lr = regs->lr;
1289 kvmppc_set_xer(vcpu, regs->xer);
1290 kvmppc_set_msr(vcpu, regs->msr);
1291 kvmppc_set_srr0(vcpu, regs->srr0);
1292 kvmppc_set_srr1(vcpu, regs->srr1);
1293 kvmppc_set_pid(vcpu, regs->pid);
1294 vcpu->arch.shared->sprg0 = regs->sprg0;
1295 vcpu->arch.shared->sprg1 = regs->sprg1;
1296 vcpu->arch.shared->sprg2 = regs->sprg2;
1297 vcpu->arch.shared->sprg3 = regs->sprg3;
1298 vcpu->arch.shared->sprg4 = regs->sprg4;
1299 vcpu->arch.shared->sprg5 = regs->sprg5;
1300 vcpu->arch.shared->sprg6 = regs->sprg6;
1301 vcpu->arch.shared->sprg7 = regs->sprg7;
1303 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1304 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1309 static void get_sregs_base(struct kvm_vcpu *vcpu,
1310 struct kvm_sregs *sregs)
1314 sregs->u.e.features |= KVM_SREGS_E_BASE;
1316 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1317 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1318 sregs->u.e.mcsr = vcpu->arch.mcsr;
1319 sregs->u.e.esr = get_guest_esr(vcpu);
1320 sregs->u.e.dear = get_guest_dear(vcpu);
1321 sregs->u.e.tsr = vcpu->arch.tsr;
1322 sregs->u.e.tcr = vcpu->arch.tcr;
1323 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1325 sregs->u.e.vrsave = vcpu->arch.vrsave;
1328 static int set_sregs_base(struct kvm_vcpu *vcpu,
1329 struct kvm_sregs *sregs)
1331 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1334 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1335 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1336 vcpu->arch.mcsr = sregs->u.e.mcsr;
1337 set_guest_esr(vcpu, sregs->u.e.esr);
1338 set_guest_dear(vcpu, sregs->u.e.dear);
1339 vcpu->arch.vrsave = sregs->u.e.vrsave;
1340 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1342 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1343 vcpu->arch.dec = sregs->u.e.dec;
1344 kvmppc_emulate_dec(vcpu);
1347 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1348 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
1353 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1354 struct kvm_sregs *sregs)
1356 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1358 sregs->u.e.pir = vcpu->vcpu_id;
1359 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1360 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1361 sregs->u.e.decar = vcpu->arch.decar;
1362 sregs->u.e.ivpr = vcpu->arch.ivpr;
1365 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1366 struct kvm_sregs *sregs)
1368 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1371 if (sregs->u.e.pir != vcpu->vcpu_id)
1374 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1375 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1376 vcpu->arch.decar = sregs->u.e.decar;
1377 vcpu->arch.ivpr = sregs->u.e.ivpr;
1382 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1384 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1386 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1387 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1388 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1389 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1390 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1391 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1392 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1393 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1394 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1395 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1396 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1397 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1398 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1399 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1400 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1401 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1405 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1407 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1410 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1411 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1412 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1413 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1414 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1415 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1416 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1417 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1418 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1419 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1420 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1421 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1422 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1423 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1424 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1425 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1430 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1431 struct kvm_sregs *sregs)
1433 sregs->pvr = vcpu->arch.pvr;
1435 get_sregs_base(vcpu, sregs);
1436 get_sregs_arch206(vcpu, sregs);
1437 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1440 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1441 struct kvm_sregs *sregs)
1445 if (vcpu->arch.pvr != sregs->pvr)
1448 ret = set_sregs_base(vcpu, sregs);
1452 ret = set_sregs_arch206(vcpu, sregs);
1456 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1459 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1462 union kvmppc_one_reg val;
1465 size = one_reg_size(reg->id);
1466 if (size > sizeof(val))
1470 case KVM_REG_PPC_IAC1:
1471 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
1473 case KVM_REG_PPC_IAC2:
1474 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
1476 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1477 case KVM_REG_PPC_IAC3:
1478 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
1480 case KVM_REG_PPC_IAC4:
1481 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
1484 case KVM_REG_PPC_DAC1:
1485 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
1487 case KVM_REG_PPC_DAC2:
1488 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
1490 case KVM_REG_PPC_EPR: {
1491 u32 epr = get_guest_epr(vcpu);
1492 val = get_reg_val(reg->id, epr);
1495 #if defined(CONFIG_64BIT)
1496 case KVM_REG_PPC_EPCR:
1497 val = get_reg_val(reg->id, vcpu->arch.epcr);
1500 case KVM_REG_PPC_TCR:
1501 val = get_reg_val(reg->id, vcpu->arch.tcr);
1503 case KVM_REG_PPC_TSR:
1504 val = get_reg_val(reg->id, vcpu->arch.tsr);
1506 case KVM_REG_PPC_DEBUG_INST:
1507 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
1509 case KVM_REG_PPC_VRSAVE:
1510 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1513 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
1520 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1526 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1529 union kvmppc_one_reg val;
1532 size = one_reg_size(reg->id);
1533 if (size > sizeof(val))
1536 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1540 case KVM_REG_PPC_IAC1:
1541 vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
1543 case KVM_REG_PPC_IAC2:
1544 vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
1546 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1547 case KVM_REG_PPC_IAC3:
1548 vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
1550 case KVM_REG_PPC_IAC4:
1551 vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
1554 case KVM_REG_PPC_DAC1:
1555 vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
1557 case KVM_REG_PPC_DAC2:
1558 vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
1560 case KVM_REG_PPC_EPR: {
1561 u32 new_epr = set_reg_val(reg->id, val);
1562 kvmppc_set_epr(vcpu, new_epr);
1565 #if defined(CONFIG_64BIT)
1566 case KVM_REG_PPC_EPCR: {
1567 u32 new_epcr = set_reg_val(reg->id, val);
1568 kvmppc_set_epcr(vcpu, new_epcr);
1572 case KVM_REG_PPC_OR_TSR: {
1573 u32 tsr_bits = set_reg_val(reg->id, val);
1574 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1577 case KVM_REG_PPC_CLEAR_TSR: {
1578 u32 tsr_bits = set_reg_val(reg->id, val);
1579 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1582 case KVM_REG_PPC_TSR: {
1583 u32 tsr = set_reg_val(reg->id, val);
1584 kvmppc_set_tsr(vcpu, tsr);
1587 case KVM_REG_PPC_TCR: {
1588 u32 tcr = set_reg_val(reg->id, val);
1589 kvmppc_set_tcr(vcpu, tcr);
1592 case KVM_REG_PPC_VRSAVE:
1593 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1596 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
1603 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1608 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1613 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1614 struct kvm_translation *tr)
1618 r = kvmppc_core_vcpu_translate(vcpu, tr);
1622 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1627 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1628 struct kvm_memory_slot *dont)
1632 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1633 unsigned long npages)
1638 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1639 struct kvm_memory_slot *memslot,
1640 struct kvm_userspace_memory_region *mem)
1645 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1646 struct kvm_userspace_memory_region *mem,
1647 const struct kvm_memory_slot *old)
1651 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1655 void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1657 #if defined(CONFIG_64BIT)
1658 vcpu->arch.epcr = new_epcr;
1659 #ifdef CONFIG_KVM_BOOKE_HV
1660 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1661 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1662 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1667 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1669 vcpu->arch.tcr = new_tcr;
1670 arm_next_watchdog(vcpu);
1671 update_timer_ints(vcpu);
1674 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1676 set_bits(tsr_bits, &vcpu->arch.tsr);
1678 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1679 kvm_vcpu_kick(vcpu);
1682 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1684 clear_bits(tsr_bits, &vcpu->arch.tsr);
1687 * We may have stopped the watchdog due to
1688 * being stuck on final expiration.
1690 if (tsr_bits & (TSR_ENW | TSR_WIS))
1691 arm_next_watchdog(vcpu);
1693 update_timer_ints(vcpu);
1696 void kvmppc_decrementer_func(unsigned long data)
1698 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1700 if (vcpu->arch.tcr & TCR_ARE) {
1701 vcpu->arch.dec = vcpu->arch.decar;
1702 kvmppc_emulate_dec(vcpu);
1705 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1708 static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1709 uint64_t addr, int index)
1713 dbg_reg->dbcr0 |= DBCR0_IAC1;
1714 dbg_reg->iac1 = addr;
1717 dbg_reg->dbcr0 |= DBCR0_IAC2;
1718 dbg_reg->iac2 = addr;
1720 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1722 dbg_reg->dbcr0 |= DBCR0_IAC3;
1723 dbg_reg->iac3 = addr;
1726 dbg_reg->dbcr0 |= DBCR0_IAC4;
1727 dbg_reg->iac4 = addr;
1734 dbg_reg->dbcr0 |= DBCR0_IDM;
1738 static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1739 int type, int index)
1743 if (type & KVMPPC_DEBUG_WATCH_READ)
1744 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1745 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1746 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1747 dbg_reg->dac1 = addr;
1750 if (type & KVMPPC_DEBUG_WATCH_READ)
1751 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1752 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1753 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1754 dbg_reg->dac2 = addr;
1760 dbg_reg->dbcr0 |= DBCR0_IDM;
1763 void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1765 /* XXX: Add similar MSR protection for BookE-PR */
1766 #ifdef CONFIG_KVM_BOOKE_HV
1767 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1769 if (prot_bitmap & MSR_UCLE)
1770 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1771 if (prot_bitmap & MSR_DE)
1772 vcpu->arch.shadow_msrp |= MSRP_DEP;
1773 if (prot_bitmap & MSR_PMM)
1774 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1776 if (prot_bitmap & MSR_UCLE)
1777 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1778 if (prot_bitmap & MSR_DE)
1779 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1780 if (prot_bitmap & MSR_PMM)
1781 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1786 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1787 struct kvm_guest_debug *dbg)
1789 struct debug_reg *dbg_reg;
1790 int n, b = 0, w = 0;
1792 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
1793 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1794 vcpu->guest_debug = 0;
1795 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1799 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1800 vcpu->guest_debug = dbg->control;
1801 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1802 /* Set DBCR0_EDM in guest visible DBCR0 register. */
1803 vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
1805 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1806 vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1808 /* Code below handles only HW breakpoints */
1809 dbg_reg = &(vcpu->arch.shadow_dbg_reg);
1811 #ifdef CONFIG_KVM_BOOKE_HV
1813 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1814 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1820 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1821 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
1824 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
1826 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
1829 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1832 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
1833 uint64_t addr = dbg->arch.bp[n].addr;
1834 uint32_t type = dbg->arch.bp[n].type;
1836 if (type == KVMPPC_DEBUG_NONE)
1839 if (type & !(KVMPPC_DEBUG_WATCH_READ |
1840 KVMPPC_DEBUG_WATCH_WRITE |
1841 KVMPPC_DEBUG_BREAKPOINT))
1844 if (type & KVMPPC_DEBUG_BREAKPOINT) {
1845 /* Setting H/W breakpoint */
1846 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
1849 /* Setting H/W watchpoint */
1850 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
1859 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1861 vcpu->cpu = smp_processor_id();
1862 current->thread.kvm_vcpu = vcpu;
1865 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1867 current->thread.kvm_vcpu = NULL;
1870 /* Clear pending debug event in DBSR */
1871 kvmppc_clear_dbsr();
1874 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1876 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
1879 int kvmppc_core_init_vm(struct kvm *kvm)
1881 return kvm->arch.kvm_ops->init_vm(kvm);
1884 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1886 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
1889 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1891 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
1894 void kvmppc_core_destroy_vm(struct kvm *kvm)
1896 kvm->arch.kvm_ops->destroy_vm(kvm);
1899 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1901 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
1904 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
1906 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
1909 int __init kvmppc_booke_init(void)
1911 #ifndef CONFIG_KVM_BOOKE_HV
1912 unsigned long ivor[16];
1913 unsigned long *handler = kvmppc_booke_handler_addr;
1914 unsigned long max_ivor = 0;
1915 unsigned long handler_len;
1918 /* We install our own exception handlers by hijacking IVPR. IVPR must
1919 * be 16-bit aligned, so we need a 64KB allocation. */
1920 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1922 if (!kvmppc_booke_handlers)
1925 /* XXX make sure our handlers are smaller than Linux's */
1927 /* Copy our interrupt handlers to match host IVORs. That way we don't
1928 * have to swap the IVORs on every guest/host transition. */
1929 ivor[0] = mfspr(SPRN_IVOR0);
1930 ivor[1] = mfspr(SPRN_IVOR1);
1931 ivor[2] = mfspr(SPRN_IVOR2);
1932 ivor[3] = mfspr(SPRN_IVOR3);
1933 ivor[4] = mfspr(SPRN_IVOR4);
1934 ivor[5] = mfspr(SPRN_IVOR5);
1935 ivor[6] = mfspr(SPRN_IVOR6);
1936 ivor[7] = mfspr(SPRN_IVOR7);
1937 ivor[8] = mfspr(SPRN_IVOR8);
1938 ivor[9] = mfspr(SPRN_IVOR9);
1939 ivor[10] = mfspr(SPRN_IVOR10);
1940 ivor[11] = mfspr(SPRN_IVOR11);
1941 ivor[12] = mfspr(SPRN_IVOR12);
1942 ivor[13] = mfspr(SPRN_IVOR13);
1943 ivor[14] = mfspr(SPRN_IVOR14);
1944 ivor[15] = mfspr(SPRN_IVOR15);
1946 for (i = 0; i < 16; i++) {
1947 if (ivor[i] > max_ivor)
1950 handler_len = handler[i + 1] - handler[i];
1951 memcpy((void *)kvmppc_booke_handlers + ivor[i],
1952 (void *)handler[i], handler_len);
1955 handler_len = handler[max_ivor + 1] - handler[max_ivor];
1956 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
1957 ivor[max_ivor] + handler_len);
1958 #endif /* !BOOKE_HV */
1962 void __exit kvmppc_booke_exit(void)
1964 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);