]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
kvm: Convert kvm_lock to a mutex
authorJunaid Shahid <junaids@google.com>
Fri, 4 Jan 2019 01:14:28 +0000 (17:14 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 5 Jun 2019 12:14:50 +0000 (14:14 +0200)
It doesn't seem as if there is any particular need for kvm_lock to be a
spinlock, so convert the lock to a mutex so that sleepable functions (in
particular cond_resched()) can be called while holding it.

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Documentation/virtual/kvm/locking.txt
arch/s390/kvm/kvm-s390.c
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 1bb8bcaf8497703f7cdd61538ca1374f0e8ac622..635cd6eaf71495e081de44774e489d622323fcf4 100644 (file)
@@ -15,8 +15,6 @@ The acquisition orders for mutexes are as follows:
 
 On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
 
-For spinlocks, kvm_lock is taken outside kvm->mmu_lock.
-
 Everything else is a leaf: no other lock is taken inside the critical
 sections.
 
@@ -169,7 +167,7 @@ which time it will be set using the Dirty tracking mechanism described above.
 ------------
 
 Name:          kvm_lock
-Type:          spinlock_t
+Type:          mutex
 Arch:          any
 Protects:      - vm_list
 
index 7936af0a971f4ab93300be17e82127f5c7326841..0fef9192f6acdf5e5bb5d5ba043947fa4dc7495d 100644 (file)
@@ -2423,13 +2423,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
        if (!kvm->arch.sca)
                goto out_err;
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        sca_offset += 16;
        if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
                sca_offset = 0;
        kvm->arch.sca = (struct bsca_block *)
                        ((char *) kvm->arch.sca + sca_offset);
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        sprintf(debug_name, "kvm-%u", current->pid);
 
index 95ac393e2959cb0fa93472e5cb20c84be7e51e57..3384c539d150e046723d8a439b742a78b6f2f59f 100644 (file)
@@ -5956,7 +5956,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        int nr_to_scan = sc->nr_to_scan;
        unsigned long freed = 0;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
 
        list_for_each_entry(kvm, &vm_list, vm_list) {
                int idx;
@@ -5998,7 +5998,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                break;
        }
 
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        return freed;
 }
 
index 10feed6a01eb8fd83ba6a4f89cbde5073d31d565..6200d5a51f1300ef36ec27cdaaa853a8ff171657 100644 (file)
@@ -6719,7 +6719,7 @@ static void kvm_hyperv_tsc_notifier(void)
        struct kvm_vcpu *vcpu;
        int cpu;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
                kvm_make_mclock_inprogress_request(kvm);
 
@@ -6745,7 +6745,7 @@ static void kvm_hyperv_tsc_notifier(void)
 
                spin_unlock(&ka->pvclock_gtod_sync_lock);
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 }
 #endif
 
@@ -6796,17 +6796,17 @@ static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
 
        smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (vcpu->cpu != cpu)
                                continue;
                        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
-                       if (vcpu->cpu != smp_processor_id())
+                       if (vcpu->cpu != raw_smp_processor_id())
                                send_ipi = 1;
                }
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        if (freq->old < freq->new && send_ipi) {
                /*
@@ -6929,12 +6929,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
        struct kvm_vcpu *vcpu;
        int i;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
                kvm_for_each_vcpu(i, vcpu, kvm)
                        kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
        atomic_set(&kvm_guest_has_master_clock, 0);
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 }
 
 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
index 5e9fd7ad80183b35f5fcde534a96ee1a6b9f6c31..abafddb9fe2c4e796ec826ecd96f314d155981c4 100644 (file)
@@ -162,7 +162,7 @@ static inline bool is_error_page(struct page *page)
 
 extern struct kmem_cache *kvm_vcpu_cache;
 
-extern spinlock_t kvm_lock;
+extern struct mutex kvm_lock;
 extern struct list_head vm_list;
 
 struct kvm_io_range {
index b2579841263feaaf2235c967b5a30d731942b0b9..9613987ef4c8492ed6a920df6fc15946d830e623 100644 (file)
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
  *     kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  */
 
-DEFINE_SPINLOCK(kvm_lock);
+DEFINE_MUTEX(kvm_lock);
 static DEFINE_RAW_SPINLOCK(kvm_count_lock);
 LIST_HEAD(vm_list);
 
@@ -683,9 +683,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
        if (r)
                goto out_err;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_add(&kvm->vm_list, &vm_list);
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        preempt_notifier_inc();
 
@@ -731,9 +731,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
        kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
        kvm_destroy_vm_debugfs(kvm);
        kvm_arch_sync_events(kvm);
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_del(&kvm->vm_list);
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        kvm_free_irq_routing(kvm);
        for (i = 0; i < KVM_NR_BUSES; i++) {
                struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
@@ -4034,13 +4034,13 @@ static int vm_stat_get(void *_offset, u64 *val)
        u64 tmp_val;
 
        *val = 0;
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
                *val += tmp_val;
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        return 0;
 }
 
@@ -4053,12 +4053,12 @@ static int vm_stat_clear(void *_offset, u64 val)
        if (val)
                return -EINVAL;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vm_stat_clear_per_vm((void *)&stat_tmp, 0);
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        return 0;
 }
@@ -4073,13 +4073,13 @@ static int vcpu_stat_get(void *_offset, u64 *val)
        u64 tmp_val;
 
        *val = 0;
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
                *val += tmp_val;
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        return 0;
 }
 
@@ -4092,12 +4092,12 @@ static int vcpu_stat_clear(void *_offset, u64 val)
        if (val)
                return -EINVAL;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        return 0;
 }
@@ -4118,7 +4118,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        if (!kvm_dev.this_device || !kvm)
                return;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        if (type == KVM_EVENT_CREATE_VM) {
                kvm_createvm_count++;
                kvm_active_vms++;
@@ -4127,7 +4127,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        created = kvm_createvm_count;
        active = kvm_active_vms;
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
        if (!env)