]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: arm/arm64: vgic-new: Add ACTIVE registers handlers
authorAndre Przywara <andre.przywara@arm.com>
Tue, 1 Dec 2015 12:40:58 +0000 (12:40 +0000)
committerChristoffer Dall <christoffer.dall@linaro.org>
Fri, 20 May 2016 13:39:52 +0000 (15:39 +0200)
The active register handlers are shared between the v2 and v3
emulation, so their implementation goes into vgic-mmio.c, to be
easily referenced from the v3 emulation as well later.
Since activation/deactivation of an interrupt may happen entirely
in the guest without it ever exiting, we need some extra logic to
properly track the active state.
For clearing the active state, we basically have to halt the guest to
make sure this is properly propagated into the respective VCPUs.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
virt/kvm/arm/vgic/vgic-mmio-v2.c
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-mmio.h

index c13a7089bc9aff746c9c38b63054e7f89dfd730a..12e101b8fd527fd40e8688b4db47a46246d60d65 100644 (file)
@@ -84,10 +84,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
                vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
-               vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+               vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
-               vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+               vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
                vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
index d8dc8f6480dd10a5935e0eeafb2794705bc29916..79a4622dad04bfe8805d946137590bc5987ff247 100644 (file)
@@ -155,6 +155,87 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
        }
 }
 
+unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       u32 value = 0;
+       int i;
+
+       /* Loop over all IRQs affected by this read */
+       for (i = 0; i < len * 8; i++) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               if (irq->active)
+                       value |= (1U << i);
+       }
+
+       return value;
+}
+
+void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+                            gpa_t addr, unsigned int len,
+                            unsigned long val)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+
+       kvm_arm_halt_guest(vcpu->kvm);
+       for_each_set_bit(i, &val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               spin_lock(&irq->irq_lock);
+               /*
+                * If this virtual IRQ was written into a list register, we
+                * have to make sure the CPU that runs the VCPU thread has
+                * synced back LR state to the struct vgic_irq.  We can only
+                * know this for sure, when either this irq is not assigned to
+                * anyone's AP list anymore, or the VCPU thread is not
+                * running on any CPUs.
+                *
+                * In the opposite case, we know the VCPU thread may be on its
+                * way back from the guest and still has to sync back this
+                * IRQ, so we release and re-acquire the spin_lock to let the
+                * other thread sync back the IRQ.
+                */
+               while (irq->vcpu && /* IRQ may have state in an LR somewhere */
+                      irq->vcpu->cpu != -1) /* VCPU thread is running */
+                       cond_resched_lock(&irq->irq_lock);
+
+               irq->active = false;
+               spin_unlock(&irq->irq_lock);
+       }
+       kvm_arm_resume_guest(vcpu->kvm);
+}
+
+void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+                            gpa_t addr, unsigned int len,
+                            unsigned long val)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+
+       for_each_set_bit(i, &val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               spin_lock(&irq->irq_lock);
+
+               /*
+                * If the IRQ was already active or there is no target VCPU
+                * assigned at the moment, then just proceed.
+                */
+               if (irq->active || !irq->target_vcpu) {
+                       irq->active = true;
+
+                       spin_unlock(&irq->irq_lock);
+                       continue;
+               }
+
+               irq->active = true;
+               vgic_queue_irq_unlock(vcpu->kvm, irq);
+       }
+}
+
 static int match_region(const void *key, const void *elt)
 {
        const unsigned int offset = (unsigned long)key;
index 97ee703a1bd6165555dbbceeb18d078b9b887f5a..50b4464a073006817bc0ad3324a723789df4cee4 100644 (file)
@@ -118,6 +118,16 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
                              gpa_t addr, unsigned int len,
                              unsigned long val);
 
+unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+                            gpa_t addr, unsigned int len,
+                            unsigned long val);
+
+void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+                            gpa_t addr, unsigned int len,
+                            unsigned long val);
 
 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);