]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - virt/kvm/kvm_main.c
KVM: Move initialization of preempt notifier to kvm_vcpu_init()
[linux.git] / virt / kvm / kvm_main.c
index 00268290dcbd85a75ec2f17cc2caa5431c03208b..1ddb6d4cfbfd0bc663568015b63e2406c7e9818b 100644 (file)
@@ -104,8 +104,7 @@ static cpumask_var_t cpus_hardware_enabled;
 static int kvm_usage_count;
 static atomic_t hardware_enable_failed;
 
-struct kmem_cache *kvm_vcpu_cache;
-EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
+static struct kmem_cache *kvm_vcpu_cache;
 
 static __read_mostly struct preempt_ops kvm_preempt_ops;
 
@@ -113,7 +112,7 @@ struct dentry *kvm_debugfs_dir;
 EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
 
 static int kvm_debugfs_num_entries;
-static const struct file_operations *stat_fops_per_vm[];
+static const struct file_operations stat_fops_per_vm;
 
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
                           unsigned long arg);
@@ -322,7 +321,7 @@ void kvm_reload_remote_mmus(struct kvm *kvm)
        kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
 }
 
-int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
+static int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 {
        struct page *page;
        int r;
@@ -349,6 +348,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
        kvm_vcpu_set_dy_eligible(vcpu, false);
        vcpu->preempted = false;
        vcpu->ready = false;
+       preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
 
        r = kvm_arch_vcpu_init(vcpu);
        if (r < 0)
@@ -360,9 +360,8 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 fail:
        return r;
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_init);
 
-void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
+static void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
        /*
         * no need for rcu_read_lock as VCPU_RUN is the only place that
@@ -373,7 +372,15 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
        kvm_arch_vcpu_uninit(vcpu);
        free_page((unsigned long)vcpu->run);
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
+
+void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_arch_vcpu_destroy(vcpu);
+
+       kvm_vcpu_uninit(vcpu);
+       kmem_cache_free(kvm_vcpu_cache, vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
 
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
@@ -650,11 +657,11 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
                        return -ENOMEM;
 
                stat_data->kvm = kvm;
-               stat_data->offset = p->offset;
-               stat_data->mode = p->mode ? p->mode : 0644;
+               stat_data->dbgfs_item = p;
                kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
-               debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry,
-                                   stat_data, stat_fops_per_vm[p->kind]);
+               debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
+                                   kvm->debugfs_dentry, stat_data,
+                                   &stat_fops_per_vm);
        }
        return 0;
 }
@@ -964,7 +971,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
 
        /*
         * Increment the new memslot generation a second time, dropping the
-        * update in-progress flag and incrementing then generation based on
+        * update in-progress flag and incrementing the generation based on
         * the number of address spaces.  This provides a unique and easily
         * identifiable generation number while the memslots are in flux.
         */
@@ -1117,7 +1124,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                 *
                 * validation of sp->gfn happens in:
                 *      - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
-                *      - kvm_is_visible_gfn (mmu_check_roots)
+                *      - kvm_is_visible_gfn (mmu_check_root)
                 */
                kvm_arch_flush_shadow_memslot(kvm, slot);
 
@@ -1519,7 +1526,7 @@ static inline int check_user_page_hwpoison(unsigned long addr)
 /*
  * The fast path to get the writable pfn which will be stored in @pfn,
  * true indicates success, otherwise false is returned.  It's also the
- * only part that runs if we can are in atomic context.
+ * only part that runs if we can in atomic context.
  */
 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
                            bool *writable, kvm_pfn_t *pfn)
@@ -1931,11 +1938,8 @@ EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
 
 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
 {
-       if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
-               struct page *page = pfn_to_page(pfn);
-
-               SetPageDirty(page);
-       }
+       if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
+               SetPageDirty(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
 
@@ -2731,13 +2735,23 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        kvm->created_vcpus++;
        mutex_unlock(&kvm->lock);
 
-       vcpu = kvm_arch_vcpu_create(kvm, id);
-       if (IS_ERR(vcpu)) {
-               r = PTR_ERR(vcpu);
+       r = kvm_arch_vcpu_precreate(kvm, id);
+       if (r)
+               goto vcpu_decrement;
+
+       vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+       if (!vcpu) {
+               r = -ENOMEM;
                goto vcpu_decrement;
        }
 
-       preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
+       r = kvm_vcpu_init(vcpu, kvm, id);
+       if (r)
+               goto vcpu_free;
+
+       r = kvm_arch_vcpu_create(vcpu);
+       if (r)
+               goto vcpu_uninit;
 
        r = kvm_arch_vcpu_setup(vcpu);
        if (r)
@@ -2780,6 +2794,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        debugfs_remove_recursive(vcpu->debugfs_dentry);
 vcpu_destroy:
        kvm_arch_vcpu_destroy(vcpu);
+vcpu_uninit:
+       kvm_vcpu_uninit(vcpu);
+vcpu_free:
+       kmem_cache_free(kvm_vcpu_cache, vcpu);
 vcpu_decrement:
        mutex_lock(&kvm->lock);
        kvm->created_vcpus--;
@@ -4013,8 +4031,9 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
                return -ENOENT;
 
        if (simple_attr_open(inode, file, get,
-                            stat_data->mode & S_IWUGO ? set : NULL,
-                            fmt)) {
+                   KVM_DBGFS_GET_MODE(stat_data->dbgfs_item) & 0222
+                   ? set : NULL,
+                   fmt)) {
                kvm_put_kvm(stat_data->kvm);
                return -ENOMEM;
        }
@@ -4033,105 +4052,111 @@ static int kvm_debugfs_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static int vm_stat_get_per_vm(void *data, u64 *val)
+static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
 {
-       struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+       *val = *(ulong *)((void *)kvm + offset);
+
+       return 0;
+}
 
-       *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset);
+static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
+{
+       *(ulong *)((void *)kvm + offset) = 0;
 
        return 0;
 }
 
-static int vm_stat_clear_per_vm(void *data, u64 val)
+static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
 {
-       struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+       int i;
+       struct kvm_vcpu *vcpu;
 
-       if (val)
-               return -EINVAL;
+       *val = 0;
 
-       *(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               *val += *(u64 *)((void *)vcpu + offset);
 
        return 0;
 }
 
-static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
+static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
 {
-       __simple_attr_check_format("%llu\n", 0ull);
-       return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
-                               vm_stat_clear_per_vm, "%llu\n");
-}
+       int i;
+       struct kvm_vcpu *vcpu;
 
-static const struct file_operations vm_stat_get_per_vm_fops = {
-       .owner   = THIS_MODULE,
-       .open    = vm_stat_get_per_vm_open,
-       .release = kvm_debugfs_release,
-       .read    = simple_attr_read,
-       .write   = simple_attr_write,
-       .llseek  = no_llseek,
-};
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               *(u64 *)((void *)vcpu + offset) = 0;
+
+       return 0;
+}
 
-static int vcpu_stat_get_per_vm(void *data, u64 *val)
+static int kvm_stat_data_get(void *data, u64 *val)
 {
-       int i;
+       int r = -EFAULT;
        struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
-       struct kvm_vcpu *vcpu;
 
-       *val = 0;
-
-       kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
-               *val += *(u64 *)((void *)vcpu + stat_data->offset);
+       switch (stat_data->dbgfs_item->kind) {
+       case KVM_STAT_VM:
+               r = kvm_get_stat_per_vm(stat_data->kvm,
+                                       stat_data->dbgfs_item->offset, val);
+               break;
+       case KVM_STAT_VCPU:
+               r = kvm_get_stat_per_vcpu(stat_data->kvm,
+                                         stat_data->dbgfs_item->offset, val);
+               break;
+       }
 
-       return 0;
+       return r;
 }
 
-static int vcpu_stat_clear_per_vm(void *data, u64 val)
+static int kvm_stat_data_clear(void *data, u64 val)
 {
-       int i;
+       int r = -EFAULT;
        struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
-       struct kvm_vcpu *vcpu;
 
        if (val)
                return -EINVAL;
 
-       kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
-               *(u64 *)((void *)vcpu + stat_data->offset) = 0;
+       switch (stat_data->dbgfs_item->kind) {
+       case KVM_STAT_VM:
+               r = kvm_clear_stat_per_vm(stat_data->kvm,
+                                         stat_data->dbgfs_item->offset);
+               break;
+       case KVM_STAT_VCPU:
+               r = kvm_clear_stat_per_vcpu(stat_data->kvm,
+                                           stat_data->dbgfs_item->offset);
+               break;
+       }
 
-       return 0;
+       return r;
 }
 
-static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
+static int kvm_stat_data_open(struct inode *inode, struct file *file)
 {
        __simple_attr_check_format("%llu\n", 0ull);
-       return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
-                                vcpu_stat_clear_per_vm, "%llu\n");
+       return kvm_debugfs_open(inode, file, kvm_stat_data_get,
+                               kvm_stat_data_clear, "%llu\n");
 }
 
-static const struct file_operations vcpu_stat_get_per_vm_fops = {
-       .owner   = THIS_MODULE,
-       .open    = vcpu_stat_get_per_vm_open,
+static const struct file_operations stat_fops_per_vm = {
+       .owner = THIS_MODULE,
+       .open = kvm_stat_data_open,
        .release = kvm_debugfs_release,
-       .read    = simple_attr_read,
-       .write   = simple_attr_write,
-       .llseek  = no_llseek,
-};
-
-static const struct file_operations *stat_fops_per_vm[] = {
-       [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
-       [KVM_STAT_VM]   = &vm_stat_get_per_vm_fops,
+       .read = simple_attr_read,
+       .write = simple_attr_write,
+       .llseek = no_llseek,
 };
 
 static int vm_stat_get(void *_offset, u64 *val)
 {
        unsigned offset = (long)_offset;
        struct kvm *kvm;
-       struct kvm_stat_data stat_tmp = {.offset = offset};
        u64 tmp_val;
 
        *val = 0;
        mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
-               stat_tmp.kvm = kvm;
-               vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
+               kvm_get_stat_per_vm(kvm, offset, &tmp_val);
                *val += tmp_val;
        }
        mutex_unlock(&kvm_lock);
@@ -4142,15 +4167,13 @@ static int vm_stat_clear(void *_offset, u64 val)
 {
        unsigned offset = (long)_offset;
        struct kvm *kvm;
-       struct kvm_stat_data stat_tmp = {.offset = offset};
 
        if (val)
                return -EINVAL;
 
        mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
-               stat_tmp.kvm = kvm;
-               vm_stat_clear_per_vm((void *)&stat_tmp, 0);
+               kvm_clear_stat_per_vm(kvm, offset);
        }
        mutex_unlock(&kvm_lock);
 
@@ -4163,14 +4186,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
 {
        unsigned offset = (long)_offset;
        struct kvm *kvm;
-       struct kvm_stat_data stat_tmp = {.offset = offset};
        u64 tmp_val;
 
        *val = 0;
        mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
-               stat_tmp.kvm = kvm;
-               vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
+               kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
                *val += tmp_val;
        }
        mutex_unlock(&kvm_lock);
@@ -4181,15 +4202,13 @@ static int vcpu_stat_clear(void *_offset, u64 val)
 {
        unsigned offset = (long)_offset;
        struct kvm *kvm;
-       struct kvm_stat_data stat_tmp = {.offset = offset};
 
        if (val)
                return -EINVAL;
 
        mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
-               stat_tmp.kvm = kvm;
-               vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
+               kvm_clear_stat_per_vcpu(kvm, offset);
        }
        mutex_unlock(&kvm_lock);
 
@@ -4262,9 +4281,8 @@ static void kvm_init_debug(void)
 
        kvm_debugfs_num_entries = 0;
        for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
-               int mode = p->mode ? p->mode : 0644;
-               debugfs_create_file(p->name, mode, kvm_debugfs_dir,
-                                   (void *)(long)p->offset,
+               debugfs_create_file(p->name, KVM_DBGFS_GET_MODE(p),
+                                   kvm_debugfs_dir, (void *)(long)p->offset,
                                    stat_fops[p->kind]);
        }
 }