]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: arm/arm64: Move cntvoff to each timer context
authorJintack Lim <jintack@cs.columbia.edu>
Fri, 3 Feb 2017 15:20:00 +0000 (10:20 -0500)
committerMarc Zyngier <marc.zyngier@arm.com>
Wed, 8 Feb 2017 15:13:33 +0000 (15:13 +0000)
Make cntvoff per each timer context. This is helpful to abstract kvm
timer functions to work with timer context without considering timer
types (e.g. physical timer or virtual timer).

This also would pave the way for ever doing adjustments of the cntvoff
on a per-CPU basis if that should ever make sense.

Signed-off-by: Jintack Lim <jintack@cs.columbia.edu>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
arch/arm/include/asm/kvm_host.h
arch/arm/kvm/arm.c
arch/arm64/include/asm/kvm_host.h
include/kvm/arm_arch_timer.h
virt/kvm/arm/arch_timer.c
virt/kvm/arm/hyp/timer-sr.c

index d5423ab15ed5be1c705817e13d4a7d7fe35b465b..cc495d799c67643c58e136249197a06736299339 100644 (file)
@@ -60,9 +60,6 @@ struct kvm_arch {
        /* The last vcpu id that ran on each physical CPU */
        int __percpu *last_vcpu_ran;
 
-       /* Timer */
-       struct arch_timer_kvm   timer;
-
        /*
         * Anything that is not used directly from assembly code goes
         * here.
index 9d7446456e0c4217e0931f3640eb006cb0fb83d7..f93f2171a48ba5cb17f60469d0efee30f53d64fb 100644 (file)
@@ -135,7 +135,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
                goto out_free_stage2_pgd;
 
        kvm_vgic_early_init(kvm);
-       kvm_timer_init(kvm);
 
        /* Mark the initial VMID generation invalid */
        kvm->arch.vmid_gen = 0;
index e5050388e062209868bac64cab1740ece15b3e13..4a758cba12629eb084fe140dc154da6a3599d71b 100644 (file)
@@ -70,9 +70,6 @@ struct kvm_arch {
 
        /* Interrupt controller */
        struct vgic_dist        vgic;
-
-       /* Timer */
-       struct arch_timer_kvm   timer;
 };
 
 #define KVM_NR_MEM_OBJS     40
index daad3c133b9fa60058f9270ce86a18637cb3752c..2c8560b4642aa04ace59ccd79ad185afe659c3a3 100644 (file)
 #include <linux/hrtimer.h>
 #include <linux/workqueue.h>
 
-struct arch_timer_kvm {
-       /* Virtual offset */
-       u64                     cntvoff;
-};
-
 struct arch_timer_context {
        /* Registers: control register, timer value */
        u32                             cnt_ctl;
@@ -38,6 +33,9 @@ struct arch_timer_context {
 
        /* Active IRQ state caching */
        bool                            active_cleared_last;
+
+       /* Virtual offset */
+       u64                     cntvoff;
 };
 
 struct arch_timer_cpu {
@@ -58,7 +56,6 @@ struct arch_timer_cpu {
 
 int kvm_timer_hyp_init(void);
 int kvm_timer_enable(struct kvm_vcpu *vcpu);
-void kvm_timer_init(struct kvm *kvm);
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
                         const struct kvm_irq_level *irq);
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
index d3556b3ca694c6daa6911baef1c4bdd848f161b5..5004a679b12595aa97e22479e786e73e21c5153a 100644 (file)
@@ -101,9 +101,10 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
 static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
 {
        u64 cval, now;
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
-       cval = vcpu_vtimer(vcpu)->cnt_cval;
-       now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+       cval = vtimer->cnt_cval;
+       now = kvm_phys_timer_read() - vtimer->cntvoff;
 
        if (now < cval) {
                u64 ns;
@@ -159,7 +160,7 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
                return false;
 
        cval = vtimer->cnt_cval;
-       now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+       now = kvm_phys_timer_read() - vtimer->cntvoff;
 
        return cval <= now;
 }
@@ -354,10 +355,32 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+/* Make the updates of cntvoff for all vtimer contexts atomic */
+static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
+{
+       int i;
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_vcpu *tmp;
+
+       mutex_lock(&kvm->lock);
+       kvm_for_each_vcpu(i, tmp, kvm)
+               vcpu_vtimer(tmp)->cntvoff = cntvoff;
+
+       /*
+        * When called from the vcpu create path, the CPU being created is not
+        * included in the loop above, so we just set it here as well.
+        */
+       vcpu_vtimer(vcpu)->cntvoff = cntvoff;
+       mutex_unlock(&kvm->lock);
+}
+
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 
+       /* Synchronize cntvoff across all vtimers of a VM. */
+       update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
+
        INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
        hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
        timer->timer.function = kvm_timer_expire;
@@ -377,7 +400,7 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
                vtimer->cnt_ctl = value;
                break;
        case KVM_REG_ARM_TIMER_CNT:
-               vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
+               update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
                break;
        case KVM_REG_ARM_TIMER_CVAL:
                vtimer->cnt_cval = value;
@@ -398,7 +421,7 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
        case KVM_REG_ARM_TIMER_CTL:
                return vtimer->cnt_ctl;
        case KVM_REG_ARM_TIMER_CNT:
-               return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
+               return kvm_phys_timer_read() - vtimer->cntvoff;
        case KVM_REG_ARM_TIMER_CVAL:
                return vtimer->cnt_cval;
        }
@@ -510,11 +533,6 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-void kvm_timer_init(struct kvm *kvm)
-{
-       kvm->arch.timer.cntvoff = kvm_phys_timer_read();
-}
-
 /*
  * On VHE system, we only need to configure trap on physical timer and counter
  * accesses in EL0 and EL1 once, not for every world switch.
index 0cf08953e81cffcc9afeeca070189d3ec763a993..4734915ab71f78e71c598ec5bbb7b7b2eb73a084 100644 (file)
@@ -53,7 +53,6 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
 
 void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
 {
-       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
        struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
        u64 val;
@@ -71,7 +70,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
        }
 
        if (timer->enabled) {
-               write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
+               write_sysreg(vtimer->cntvoff, cntvoff_el2);
                write_sysreg_el0(vtimer->cnt_cval, cntv_cval);
                isb();
                write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl);