]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
KVM: arm/arm64: vgic: Replace spin_is_locked() with lockdep
authorLance Roy <ldr709@gmail.com>
Fri, 5 Oct 2018 06:45:50 +0000 (23:45 -0700)
committerPaul E. McKenney <paulmck@linux.ibm.com>
Mon, 12 Nov 2018 17:06:22 +0000 (09:06 -0800)
lockdep_assert_held() is better suited to checking locking requirements,
since it only checks if the current thread holds the lock regardless of
whether someone else does. This is also a step towards possibly removing
spin_is_locked().

Signed-off-by: Lance Roy <ldr709@gmail.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Eric Auger <eric.auger@redhat.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: <kvmarm@lists.cs.columbia.edu>
Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
virt/kvm/arm/vgic/vgic.c

index 7cfdfbc910e0c48e38b9f6eaaefacbbe71d043fc..50e25438fb3c6ff190973b39eda88ee9ac71788f 100644 (file)
@@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
  */
 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
 {
-       DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+       lockdep_assert_held(&irq->irq_lock);
 
        /* If the interrupt is active, it must stay on the current vcpu */
        if (irq->active)
@@ -273,7 +273,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 
-       DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+       lockdep_assert_held(&vgic_cpu->ap_list_lock);
 
        list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
 }
@@ -311,7 +311,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
 {
        struct kvm_vcpu *vcpu;
 
-       DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+       lockdep_assert_held(&irq->irq_lock);
 
 retry:
        vcpu = vgic_target_oracle(irq);
@@ -702,7 +702,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
                                    struct vgic_irq *irq, int lr)
 {
-       DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+       lockdep_assert_held(&irq->irq_lock);
 
        if (kvm_vgic_global_state.type == VGIC_V2)
                vgic_v2_populate_lr(vcpu, irq, lr);
@@ -736,7 +736,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
 
        *multi_sgi = false;
 
-       DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+       lockdep_assert_held(&vgic_cpu->ap_list_lock);
 
        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
                int w;
@@ -761,7 +761,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
        bool multi_sgi;
        u8 prio = 0xff;
 
-       DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+       lockdep_assert_held(&vgic_cpu->ap_list_lock);
 
        count = compute_ap_list_depth(vcpu, &multi_sgi);
        if (count > kvm_vgic_global_state.nr_lr || multi_sgi)