4 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
5 * Author: Alex Williamson <alex.williamson@redhat.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio:
12 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
13 * Author: Tom Lyon, pugs@cisco.com
16 #include <linux/cdev.h>
17 #include <linux/compat.h>
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
22 #include <linux/idr.h>
23 #include <linux/iommu.h>
24 #include <linux/list.h>
25 #include <linux/miscdevice.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/pci.h>
29 #include <linux/rwsem.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/stat.h>
33 #include <linux/string.h>
34 #include <linux/uaccess.h>
35 #include <linux/vfio.h>
36 #include <linux/wait.h>
38 #define DRIVER_VERSION "0.3"
39 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
40 #define DRIVER_DESC "VFIO - User Level meta-driver"
44 struct list_head iommu_drivers_list;
45 struct mutex iommu_drivers_lock;
46 struct list_head group_list;
48 struct mutex group_lock;
49 struct cdev group_cdev;
51 wait_queue_head_t release_q;
54 struct vfio_iommu_driver {
55 const struct vfio_iommu_driver_ops *ops;
56 struct list_head vfio_next;
59 struct vfio_container {
61 struct list_head group_list;
62 struct rw_semaphore group_lock;
63 struct vfio_iommu_driver *iommu_driver;
68 struct vfio_unbound_dev {
70 struct list_head unbound_next;
76 atomic_t container_users;
77 struct iommu_group *iommu_group;
78 struct vfio_container *container;
79 struct list_head device_list;
80 struct mutex device_lock;
82 struct notifier_block nb;
83 struct list_head vfio_next;
84 struct list_head container_next;
85 struct list_head unbound_list;
86 struct mutex unbound_lock;
90 struct blocking_notifier_head notifier;
96 const struct vfio_device_ops *ops;
97 struct vfio_group *group;
98 struct list_head group_next;
102 #ifdef CONFIG_VFIO_NOIOMMU
103 static bool noiommu __read_mostly;
104 module_param_named(enable_unsafe_noiommu_mode,
105 noiommu, bool, S_IRUGO | S_IWUSR);
106 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
110 * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
111 * and remove functions, any use cases other than acquiring the first
112 * reference for the purpose of calling vfio_add_group_dev() or removing
113 * that symmetric reference after vfio_del_group_dev() should use the raw
114 * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
115 * removes the device from the dummy group and cannot be nested.
117 struct iommu_group *vfio_iommu_group_get(struct device *dev)
119 struct iommu_group *group;
120 int __maybe_unused ret;
122 group = iommu_group_get(dev);
124 #ifdef CONFIG_VFIO_NOIOMMU
126 * With noiommu enabled, an IOMMU group will be created for a device
127 * that doesn't already have one and doesn't have an iommu_ops on their
128 * bus. We set iommudata simply to be able to identify these groups
129 * as special use and for reclamation later.
131 if (group || !noiommu || iommu_present(dev->bus))
134 group = iommu_group_alloc();
138 iommu_group_set_name(group, "vfio-noiommu");
139 iommu_group_set_iommudata(group, &noiommu, NULL);
140 ret = iommu_group_add_device(group, dev);
142 iommu_group_put(group);
147 * Where to taint? At this point we've added an IOMMU group for a
148 * device that is not backed by iommu_ops, therefore any iommu_
149 * callback using iommu_ops can legitimately Oops. So, while we may
150 * be about to give a DMA capable device to a user without IOMMU
151 * protection, which is clearly taint-worthy, let's go ahead and do
154 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
155 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
160 EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
162 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
164 #ifdef CONFIG_VFIO_NOIOMMU
165 if (iommu_group_get_iommudata(group) == &noiommu)
166 iommu_group_remove_device(dev);
169 iommu_group_put(group);
171 EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
173 #ifdef CONFIG_VFIO_NOIOMMU
174 static void *vfio_noiommu_open(unsigned long arg)
176 if (arg != VFIO_NOIOMMU_IOMMU)
177 return ERR_PTR(-EINVAL);
178 if (!capable(CAP_SYS_RAWIO))
179 return ERR_PTR(-EPERM);
184 static void vfio_noiommu_release(void *iommu_data)
188 static long vfio_noiommu_ioctl(void *iommu_data,
189 unsigned int cmd, unsigned long arg)
191 if (cmd == VFIO_CHECK_EXTENSION)
192 return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
197 static int vfio_noiommu_attach_group(void *iommu_data,
198 struct iommu_group *iommu_group)
200 return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
203 static void vfio_noiommu_detach_group(void *iommu_data,
204 struct iommu_group *iommu_group)
208 static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
209 .name = "vfio-noiommu",
210 .owner = THIS_MODULE,
211 .open = vfio_noiommu_open,
212 .release = vfio_noiommu_release,
213 .ioctl = vfio_noiommu_ioctl,
214 .attach_group = vfio_noiommu_attach_group,
215 .detach_group = vfio_noiommu_detach_group,
221 * IOMMU driver registration
223 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
225 struct vfio_iommu_driver *driver, *tmp;
227 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
233 mutex_lock(&vfio.iommu_drivers_lock);
235 /* Check for duplicates */
236 list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
237 if (tmp->ops == ops) {
238 mutex_unlock(&vfio.iommu_drivers_lock);
244 list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
246 mutex_unlock(&vfio.iommu_drivers_lock);
250 EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
252 void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
254 struct vfio_iommu_driver *driver;
256 mutex_lock(&vfio.iommu_drivers_lock);
257 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
258 if (driver->ops == ops) {
259 list_del(&driver->vfio_next);
260 mutex_unlock(&vfio.iommu_drivers_lock);
265 mutex_unlock(&vfio.iommu_drivers_lock);
267 EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
270 * Group minor allocation/free - both called with vfio.group_lock held
272 static int vfio_alloc_group_minor(struct vfio_group *group)
274 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
277 static void vfio_free_group_minor(int minor)
279 idr_remove(&vfio.group_idr, minor);
282 static int vfio_iommu_group_notifier(struct notifier_block *nb,
283 unsigned long action, void *data);
284 static void vfio_group_get(struct vfio_group *group);
287 * Container objects - containers are created when /dev/vfio/vfio is
288 * opened, but their lifecycle extends until the last user is done, so
289 * it's freed via kref. Must support container/group/device being
290 * closed in any order.
292 static void vfio_container_get(struct vfio_container *container)
294 kref_get(&container->kref);
297 static void vfio_container_release(struct kref *kref)
299 struct vfio_container *container;
300 container = container_of(kref, struct vfio_container, kref);
305 static void vfio_container_put(struct vfio_container *container)
307 kref_put(&container->kref, vfio_container_release);
310 static void vfio_group_unlock_and_free(struct vfio_group *group)
312 mutex_unlock(&vfio.group_lock);
314 * Unregister outside of lock. A spurious callback is harmless now
315 * that the group is no longer in vfio.group_list.
317 iommu_group_unregister_notifier(group->iommu_group, &group->nb);
322 * Group objects - create, release, get, put, search
324 static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
326 struct vfio_group *group, *tmp;
330 group = kzalloc(sizeof(*group), GFP_KERNEL);
332 return ERR_PTR(-ENOMEM);
334 kref_init(&group->kref);
335 INIT_LIST_HEAD(&group->device_list);
336 mutex_init(&group->device_lock);
337 INIT_LIST_HEAD(&group->unbound_list);
338 mutex_init(&group->unbound_lock);
339 atomic_set(&group->container_users, 0);
340 atomic_set(&group->opened, 0);
341 group->iommu_group = iommu_group;
342 #ifdef CONFIG_VFIO_NOIOMMU
343 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
345 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
347 group->nb.notifier_call = vfio_iommu_group_notifier;
350 * blocking notifiers acquire a rwsem around registering and hold
351 * it around callback. Therefore, need to register outside of
352 * vfio.group_lock to avoid A-B/B-A contention. Our callback won't
353 * do anything unless it can find the group in vfio.group_list, so
354 * no harm in registering early.
356 ret = iommu_group_register_notifier(iommu_group, &group->nb);
362 mutex_lock(&vfio.group_lock);
364 /* Did we race creating this group? */
365 list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
366 if (tmp->iommu_group == iommu_group) {
368 vfio_group_unlock_and_free(group);
373 minor = vfio_alloc_group_minor(group);
375 vfio_group_unlock_and_free(group);
376 return ERR_PTR(minor);
379 dev = device_create(vfio.class, NULL,
380 MKDEV(MAJOR(vfio.group_devt), minor),
381 group, "%s%d", group->noiommu ? "noiommu-" : "",
382 iommu_group_id(iommu_group));
384 vfio_free_group_minor(minor);
385 vfio_group_unlock_and_free(group);
386 return ERR_CAST(dev);
389 group->minor = minor;
392 list_add(&group->vfio_next, &vfio.group_list);
394 mutex_unlock(&vfio.group_lock);
399 /* called with vfio.group_lock held */
400 static void vfio_group_release(struct kref *kref)
402 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
403 struct vfio_unbound_dev *unbound, *tmp;
404 struct iommu_group *iommu_group = group->iommu_group;
406 WARN_ON(!list_empty(&group->device_list));
407 WARN_ON(group->notifier.head);
409 list_for_each_entry_safe(unbound, tmp,
410 &group->unbound_list, unbound_next) {
411 list_del(&unbound->unbound_next);
415 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
416 list_del(&group->vfio_next);
417 vfio_free_group_minor(group->minor);
418 vfio_group_unlock_and_free(group);
419 iommu_group_put(iommu_group);
422 static void vfio_group_put(struct vfio_group *group)
424 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
427 struct vfio_group_put_work {
428 struct work_struct work;
429 struct vfio_group *group;
432 static void vfio_group_put_bg(struct work_struct *work)
434 struct vfio_group_put_work *do_work;
436 do_work = container_of(work, struct vfio_group_put_work, work);
438 vfio_group_put(do_work->group);
442 static void vfio_group_schedule_put(struct vfio_group *group)
444 struct vfio_group_put_work *do_work;
446 do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
447 if (WARN_ON(!do_work))
450 INIT_WORK(&do_work->work, vfio_group_put_bg);
451 do_work->group = group;
452 schedule_work(&do_work->work);
455 /* Assume group_lock or group reference is held */
456 static void vfio_group_get(struct vfio_group *group)
458 kref_get(&group->kref);
462 * Not really a try as we will sleep for mutex, but we need to make
463 * sure the group pointer is valid under lock and get a reference.
465 static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
467 struct vfio_group *target = group;
469 mutex_lock(&vfio.group_lock);
470 list_for_each_entry(group, &vfio.group_list, vfio_next) {
471 if (group == target) {
472 vfio_group_get(group);
473 mutex_unlock(&vfio.group_lock);
477 mutex_unlock(&vfio.group_lock);
483 struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
485 struct vfio_group *group;
487 mutex_lock(&vfio.group_lock);
488 list_for_each_entry(group, &vfio.group_list, vfio_next) {
489 if (group->iommu_group == iommu_group) {
490 vfio_group_get(group);
491 mutex_unlock(&vfio.group_lock);
495 mutex_unlock(&vfio.group_lock);
500 static struct vfio_group *vfio_group_get_from_minor(int minor)
502 struct vfio_group *group;
504 mutex_lock(&vfio.group_lock);
505 group = idr_find(&vfio.group_idr, minor);
507 mutex_unlock(&vfio.group_lock);
510 vfio_group_get(group);
511 mutex_unlock(&vfio.group_lock);
516 static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
518 struct iommu_group *iommu_group;
519 struct vfio_group *group;
521 iommu_group = iommu_group_get(dev);
525 group = vfio_group_get_from_iommu(iommu_group);
526 iommu_group_put(iommu_group);
532 * Device objects - create, release, get, put, search
535 struct vfio_device *vfio_group_create_device(struct vfio_group *group,
537 const struct vfio_device_ops *ops,
540 struct vfio_device *device;
542 device = kzalloc(sizeof(*device), GFP_KERNEL);
544 return ERR_PTR(-ENOMEM);
546 kref_init(&device->kref);
548 device->group = group;
550 device->device_data = device_data;
551 dev_set_drvdata(dev, device);
553 /* No need to get group_lock, caller has group reference */
554 vfio_group_get(group);
556 mutex_lock(&group->device_lock);
557 list_add(&device->group_next, &group->device_list);
558 mutex_unlock(&group->device_lock);
563 static void vfio_device_release(struct kref *kref)
565 struct vfio_device *device = container_of(kref,
566 struct vfio_device, kref);
567 struct vfio_group *group = device->group;
569 list_del(&device->group_next);
570 mutex_unlock(&group->device_lock);
572 dev_set_drvdata(device->dev, NULL);
576 /* vfio_del_group_dev may be waiting for this device */
577 wake_up(&vfio.release_q);
580 /* Device reference always implies a group reference */
581 void vfio_device_put(struct vfio_device *device)
583 struct vfio_group *group = device->group;
584 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
585 vfio_group_put(group);
587 EXPORT_SYMBOL_GPL(vfio_device_put);
589 static void vfio_device_get(struct vfio_device *device)
591 vfio_group_get(device->group);
592 kref_get(&device->kref);
595 static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
598 struct vfio_device *device;
600 mutex_lock(&group->device_lock);
601 list_for_each_entry(device, &group->device_list, group_next) {
602 if (device->dev == dev) {
603 vfio_device_get(device);
604 mutex_unlock(&group->device_lock);
608 mutex_unlock(&group->device_lock);
613 * Some drivers, like pci-stub, are only used to prevent other drivers from
614 * claiming a device and are therefore perfectly legitimate for a user owned
615 * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
616 * of the device, but it does prevent the user from having direct access to
617 * the device, which is useful in some circumstances.
619 * We also assume that we can include PCI interconnect devices, ie. bridges.
620 * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
621 * then all of the downstream devices will be part of the same IOMMU group as
622 * the bridge. Thus, if placing the bridge into the user owned IOVA space
623 * breaks anything, it only does so for user owned devices downstream. Note
624 * that error notification via MSI can be affected for platforms that handle
625 * MSI within the same IOVA space as DMA.
627 static const char * const vfio_driver_whitelist[] = { "pci-stub" };
629 static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
633 if (dev_is_pci(dev)) {
634 struct pci_dev *pdev = to_pci_dev(dev);
636 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
640 for (i = 0; i < ARRAY_SIZE(vfio_driver_whitelist); i++) {
641 if (!strcmp(drv->name, vfio_driver_whitelist[i]))
649 * A vfio group is viable for use by userspace if all devices are in
650 * one of the following states:
652 * - bound to a vfio driver
653 * - bound to a whitelisted driver
654 * - a PCI interconnect device
656 * We use two methods to determine whether a device is bound to a vfio
657 * driver. The first is to test whether the device exists in the vfio
658 * group. The second is to test if the device exists on the group
659 * unbound_list, indicating it's in the middle of transitioning from
660 * a vfio driver to driver-less.
662 static int vfio_dev_viable(struct device *dev, void *data)
664 struct vfio_group *group = data;
665 struct vfio_device *device;
666 struct device_driver *drv = ACCESS_ONCE(dev->driver);
667 struct vfio_unbound_dev *unbound;
670 mutex_lock(&group->unbound_lock);
671 list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
672 if (dev == unbound->dev) {
677 mutex_unlock(&group->unbound_lock);
679 if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
682 device = vfio_group_get_device(group, dev);
684 vfio_device_put(device);
692 * Async device support
694 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
696 struct vfio_device *device;
698 /* Do we already know about it? We shouldn't */
699 device = vfio_group_get_device(group, dev);
700 if (WARN_ON_ONCE(device)) {
701 vfio_device_put(device);
705 /* Nothing to do for idle groups */
706 if (!atomic_read(&group->container_users))
709 /* TODO Prevent device auto probing */
710 WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
711 iommu_group_id(group->iommu_group));
716 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
718 /* We don't care what happens when the group isn't in use */
719 if (!atomic_read(&group->container_users))
722 return vfio_dev_viable(dev, group);
725 static int vfio_iommu_group_notifier(struct notifier_block *nb,
726 unsigned long action, void *data)
728 struct vfio_group *group = container_of(nb, struct vfio_group, nb);
729 struct device *dev = data;
730 struct vfio_unbound_dev *unbound;
733 * Need to go through a group_lock lookup to get a reference or we
734 * risk racing a group being removed. Ignore spurious notifies.
736 group = vfio_group_try_get(group);
741 case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
742 vfio_group_nb_add_dev(group, dev);
744 case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
746 * Nothing to do here. If the device is in use, then the
747 * vfio sub-driver should block the remove callback until
748 * it is unused. If the device is unused or attached to a
749 * stub driver, then it should be released and we don't
750 * care that it will be going away.
753 case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
754 pr_debug("%s: Device %s, group %d binding to driver\n",
755 __func__, dev_name(dev),
756 iommu_group_id(group->iommu_group));
758 case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
759 pr_debug("%s: Device %s, group %d bound to driver %s\n",
760 __func__, dev_name(dev),
761 iommu_group_id(group->iommu_group), dev->driver->name);
762 BUG_ON(vfio_group_nb_verify(group, dev));
764 case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
765 pr_debug("%s: Device %s, group %d unbinding from driver %s\n",
766 __func__, dev_name(dev),
767 iommu_group_id(group->iommu_group), dev->driver->name);
769 case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
770 pr_debug("%s: Device %s, group %d unbound from driver\n",
771 __func__, dev_name(dev),
772 iommu_group_id(group->iommu_group));
774 * XXX An unbound device in a live group is ok, but we'd
775 * really like to avoid the above BUG_ON by preventing other
776 * drivers from binding to it. Once that occurs, we have to
777 * stop the system to maintain isolation. At a minimum, we'd
778 * want a toggle to disable driver auto probe for this device.
781 mutex_lock(&group->unbound_lock);
782 list_for_each_entry(unbound,
783 &group->unbound_list, unbound_next) {
784 if (dev == unbound->dev) {
785 list_del(&unbound->unbound_next);
790 mutex_unlock(&group->unbound_lock);
795 * If we're the last reference to the group, the group will be
796 * released, which includes unregistering the iommu group notifier.
797 * We hold a read-lock on that notifier list, unregistering needs
798 * a write-lock... deadlock. Release our reference asynchronously
799 * to avoid that situation.
801 vfio_group_schedule_put(group);
808 int vfio_add_group_dev(struct device *dev,
809 const struct vfio_device_ops *ops, void *device_data)
811 struct iommu_group *iommu_group;
812 struct vfio_group *group;
813 struct vfio_device *device;
815 iommu_group = iommu_group_get(dev);
819 group = vfio_group_get_from_iommu(iommu_group);
821 group = vfio_create_group(iommu_group);
823 iommu_group_put(iommu_group);
824 return PTR_ERR(group);
828 * A found vfio_group already holds a reference to the
829 * iommu_group. A created vfio_group keeps the reference.
831 iommu_group_put(iommu_group);
834 device = vfio_group_get_device(group, dev);
836 WARN(1, "Device %s already exists on group %d\n",
837 dev_name(dev), iommu_group_id(iommu_group));
838 vfio_device_put(device);
839 vfio_group_put(group);
843 device = vfio_group_create_device(group, dev, ops, device_data);
844 if (IS_ERR(device)) {
845 vfio_group_put(group);
846 return PTR_ERR(device);
850 * Drop all but the vfio_device reference. The vfio_device holds
851 * a reference to the vfio_group, which holds a reference to the
854 vfio_group_put(group);
858 EXPORT_SYMBOL_GPL(vfio_add_group_dev);
861 * Get a reference to the vfio_device for a device. Even if the
862 * caller thinks they own the device, they could be racing with a
863 * release call path, so we can't trust drvdata for the shortcut.
864 * Go the long way around, from the iommu_group to the vfio_group
865 * to the vfio_device.
867 struct vfio_device *vfio_device_get_from_dev(struct device *dev)
869 struct vfio_group *group;
870 struct vfio_device *device;
872 group = vfio_group_get_from_dev(dev);
876 device = vfio_group_get_device(group, dev);
877 vfio_group_put(group);
881 EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
883 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
886 struct vfio_device *it, *device = NULL;
888 mutex_lock(&group->device_lock);
889 list_for_each_entry(it, &group->device_list, group_next) {
890 if (!strcmp(dev_name(it->dev), buf)) {
892 vfio_device_get(device);
896 mutex_unlock(&group->device_lock);
902 * Caller must hold a reference to the vfio_device
904 void *vfio_device_data(struct vfio_device *device)
906 return device->device_data;
908 EXPORT_SYMBOL_GPL(vfio_device_data);
910 /* Given a referenced group, check if it contains the device */
911 static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
913 struct vfio_device *device;
915 device = vfio_group_get_device(group, dev);
919 vfio_device_put(device);
924 * Decrement the device reference count and wait for the device to be
925 * removed. Open file descriptors for the device... */
926 void *vfio_del_group_dev(struct device *dev)
928 struct vfio_device *device = dev_get_drvdata(dev);
929 struct vfio_group *group = device->group;
930 void *device_data = device->device_data;
931 struct vfio_unbound_dev *unbound;
934 bool interrupted = false;
937 * The group exists so long as we have a device reference. Get
938 * a group reference and use it to scan for the device going away.
940 vfio_group_get(group);
943 * When the device is removed from the group, the group suddenly
944 * becomes non-viable; the device has a driver (until the unbind
945 * completes), but it's not present in the group. This is bad news
946 * for any external users that need to re-acquire a group reference
947 * in order to match and release their existing reference. To
948 * solve this, we track such devices on the unbound_list to bridge
949 * the gap until they're fully unbound.
951 unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
954 mutex_lock(&group->unbound_lock);
955 list_add(&unbound->unbound_next, &group->unbound_list);
956 mutex_unlock(&group->unbound_lock);
960 vfio_device_put(device);
963 * If the device is still present in the group after the above
964 * 'put', then it is in use and we need to request it from the
965 * bus driver. The driver may in turn need to request the
966 * device from the user. We send the request on an arbitrary
967 * interval with counter to allow the driver to take escalating
968 * measures to release the device if it has the ability to do so.
971 device = vfio_group_get_device(group, dev);
975 if (device->ops->request)
976 device->ops->request(device_data, i++);
978 vfio_device_put(device);
981 ret = wait_event_timeout(vfio.release_q,
982 !vfio_dev_present(group, dev), HZ * 10);
984 ret = wait_event_interruptible_timeout(vfio.release_q,
985 !vfio_dev_present(group, dev), HZ * 10);
986 if (ret == -ERESTARTSYS) {
989 "Device is currently in use, task"
991 "blocked until device is released",
992 current->comm, task_pid_nr(current));
997 vfio_group_put(group);
1001 EXPORT_SYMBOL_GPL(vfio_del_group_dev);
1004 * VFIO base fd, /dev/vfio/vfio
1006 static long vfio_ioctl_check_extension(struct vfio_container *container,
1009 struct vfio_iommu_driver *driver;
1012 down_read(&container->group_lock);
1014 driver = container->iommu_driver;
1017 /* No base extensions yet */
1020 * If no driver is set, poll all registered drivers for
1021 * extensions and return the first positive result. If
1022 * a driver is already set, further queries will be passed
1023 * only to that driver.
1026 mutex_lock(&vfio.iommu_drivers_lock);
1027 list_for_each_entry(driver, &vfio.iommu_drivers_list,
1030 #ifdef CONFIG_VFIO_NOIOMMU
1031 if (!list_empty(&container->group_list) &&
1032 (container->noiommu !=
1033 (driver->ops == &vfio_noiommu_ops)))
1037 if (!try_module_get(driver->ops->owner))
1040 ret = driver->ops->ioctl(NULL,
1041 VFIO_CHECK_EXTENSION,
1043 module_put(driver->ops->owner);
1047 mutex_unlock(&vfio.iommu_drivers_lock);
1049 ret = driver->ops->ioctl(container->iommu_data,
1050 VFIO_CHECK_EXTENSION, arg);
1053 up_read(&container->group_lock);
1058 /* hold write lock on container->group_lock */
1059 static int __vfio_container_attach_groups(struct vfio_container *container,
1060 struct vfio_iommu_driver *driver,
1063 struct vfio_group *group;
1066 list_for_each_entry(group, &container->group_list, container_next) {
1067 ret = driver->ops->attach_group(data, group->iommu_group);
1075 list_for_each_entry_continue_reverse(group, &container->group_list,
1077 driver->ops->detach_group(data, group->iommu_group);
1083 static long vfio_ioctl_set_iommu(struct vfio_container *container,
1086 struct vfio_iommu_driver *driver;
1089 down_write(&container->group_lock);
1092 * The container is designed to be an unprivileged interface while
1093 * the group can be assigned to specific users. Therefore, only by
1094 * adding a group to a container does the user get the privilege of
1095 * enabling the iommu, which may allocate finite resources. There
1096 * is no unset_iommu, but by removing all the groups from a container,
1097 * the container is deprivileged and returns to an unset state.
1099 if (list_empty(&container->group_list) || container->iommu_driver) {
1100 up_write(&container->group_lock);
1104 mutex_lock(&vfio.iommu_drivers_lock);
1105 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
1108 #ifdef CONFIG_VFIO_NOIOMMU
1110 * Only noiommu containers can use vfio-noiommu and noiommu
1111 * containers can only use vfio-noiommu.
1113 if (container->noiommu != (driver->ops == &vfio_noiommu_ops))
1117 if (!try_module_get(driver->ops->owner))
1121 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
1122 * so test which iommu driver reported support for this
1123 * extension and call open on them. We also pass them the
1124 * magic, allowing a single driver to support multiple
1125 * interfaces if they'd like.
1127 if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
1128 module_put(driver->ops->owner);
1132 data = driver->ops->open(arg);
1134 ret = PTR_ERR(data);
1135 module_put(driver->ops->owner);
1139 ret = __vfio_container_attach_groups(container, driver, data);
1141 driver->ops->release(data);
1142 module_put(driver->ops->owner);
1146 container->iommu_driver = driver;
1147 container->iommu_data = data;
1151 mutex_unlock(&vfio.iommu_drivers_lock);
1152 up_write(&container->group_lock);
1157 static long vfio_fops_unl_ioctl(struct file *filep,
1158 unsigned int cmd, unsigned long arg)
1160 struct vfio_container *container = filep->private_data;
1161 struct vfio_iommu_driver *driver;
1169 case VFIO_GET_API_VERSION:
1170 ret = VFIO_API_VERSION;
1172 case VFIO_CHECK_EXTENSION:
1173 ret = vfio_ioctl_check_extension(container, arg);
1175 case VFIO_SET_IOMMU:
1176 ret = vfio_ioctl_set_iommu(container, arg);
1179 driver = container->iommu_driver;
1180 data = container->iommu_data;
1182 if (driver) /* passthrough all unrecognized ioctls */
1183 ret = driver->ops->ioctl(data, cmd, arg);
1189 #ifdef CONFIG_COMPAT
1190 static long vfio_fops_compat_ioctl(struct file *filep,
1191 unsigned int cmd, unsigned long arg)
1193 arg = (unsigned long)compat_ptr(arg);
1194 return vfio_fops_unl_ioctl(filep, cmd, arg);
1196 #endif /* CONFIG_COMPAT */
1198 static int vfio_fops_open(struct inode *inode, struct file *filep)
1200 struct vfio_container *container;
1202 container = kzalloc(sizeof(*container), GFP_KERNEL);
1206 INIT_LIST_HEAD(&container->group_list);
1207 init_rwsem(&container->group_lock);
1208 kref_init(&container->kref);
1210 filep->private_data = container;
1215 static int vfio_fops_release(struct inode *inode, struct file *filep)
1217 struct vfio_container *container = filep->private_data;
1219 filep->private_data = NULL;
1221 vfio_container_put(container);
1227 * Once an iommu driver is set, we optionally pass read/write/mmap
1228 * on to the driver, allowing management interfaces beyond ioctl.
1230 static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
1231 size_t count, loff_t *ppos)
1233 struct vfio_container *container = filep->private_data;
1234 struct vfio_iommu_driver *driver;
1235 ssize_t ret = -EINVAL;
1237 driver = container->iommu_driver;
1238 if (likely(driver && driver->ops->read))
1239 ret = driver->ops->read(container->iommu_data,
1245 static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
1246 size_t count, loff_t *ppos)
1248 struct vfio_container *container = filep->private_data;
1249 struct vfio_iommu_driver *driver;
1250 ssize_t ret = -EINVAL;
1252 driver = container->iommu_driver;
1253 if (likely(driver && driver->ops->write))
1254 ret = driver->ops->write(container->iommu_data,
1260 static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1262 struct vfio_container *container = filep->private_data;
1263 struct vfio_iommu_driver *driver;
1266 driver = container->iommu_driver;
1267 if (likely(driver && driver->ops->mmap))
1268 ret = driver->ops->mmap(container->iommu_data, vma);
1273 static const struct file_operations vfio_fops = {
1274 .owner = THIS_MODULE,
1275 .open = vfio_fops_open,
1276 .release = vfio_fops_release,
1277 .read = vfio_fops_read,
1278 .write = vfio_fops_write,
1279 .unlocked_ioctl = vfio_fops_unl_ioctl,
1280 #ifdef CONFIG_COMPAT
1281 .compat_ioctl = vfio_fops_compat_ioctl,
1283 .mmap = vfio_fops_mmap,
1287 * VFIO Group fd, /dev/vfio/$GROUP
1289 static void __vfio_group_unset_container(struct vfio_group *group)
1291 struct vfio_container *container = group->container;
1292 struct vfio_iommu_driver *driver;
1294 down_write(&container->group_lock);
1296 driver = container->iommu_driver;
1298 driver->ops->detach_group(container->iommu_data,
1299 group->iommu_group);
1301 group->container = NULL;
1302 list_del(&group->container_next);
1304 /* Detaching the last group deprivileges a container, remove iommu */
1305 if (driver && list_empty(&container->group_list)) {
1306 driver->ops->release(container->iommu_data);
1307 module_put(driver->ops->owner);
1308 container->iommu_driver = NULL;
1309 container->iommu_data = NULL;
1312 up_write(&container->group_lock);
1314 vfio_container_put(container);
1318 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
1319 * if there was no container to unset. Since the ioctl is called on
1320 * the group, we know that still exists, therefore the only valid
1321 * transition here is 1->0.
1323 static int vfio_group_unset_container(struct vfio_group *group)
1325 int users = atomic_cmpxchg(&group->container_users, 1, 0);
1332 __vfio_group_unset_container(group);
1338 * When removing container users, anything that removes the last user
1339 * implicitly removes the group from the container. That is, if the
1340 * group file descriptor is closed, as well as any device file descriptors,
1341 * the group is free.
1343 static void vfio_group_try_dissolve_container(struct vfio_group *group)
1345 if (0 == atomic_dec_if_positive(&group->container_users))
1346 __vfio_group_unset_container(group);
1349 static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1352 struct vfio_container *container;
1353 struct vfio_iommu_driver *driver;
1356 if (atomic_read(&group->container_users))
1359 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1362 f = fdget(container_fd);
1366 /* Sanity check, is this really our fd? */
1367 if (f.file->f_op != &vfio_fops) {
1372 container = f.file->private_data;
1373 WARN_ON(!container); /* fget ensures we don't race vfio_release */
1375 down_write(&container->group_lock);
1377 /* Real groups and fake groups cannot mix */
1378 if (!list_empty(&container->group_list) &&
1379 container->noiommu != group->noiommu) {
1384 driver = container->iommu_driver;
1386 ret = driver->ops->attach_group(container->iommu_data,
1387 group->iommu_group);
1392 group->container = container;
1393 container->noiommu = group->noiommu;
1394 list_add(&group->container_next, &container->group_list);
1396 /* Get a reference on the container and mark a user within the group */
1397 vfio_container_get(container);
1398 atomic_inc(&group->container_users);
1401 up_write(&container->group_lock);
1406 static bool vfio_group_viable(struct vfio_group *group)
1408 return (iommu_group_for_each_dev(group->iommu_group,
1409 group, vfio_dev_viable) == 0);
1412 static int vfio_group_add_container_user(struct vfio_group *group)
1414 if (!atomic_inc_not_zero(&group->container_users))
1417 if (group->noiommu) {
1418 atomic_dec(&group->container_users);
1421 if (!group->container->iommu_driver || !vfio_group_viable(group)) {
1422 atomic_dec(&group->container_users);
1429 static const struct file_operations vfio_device_fops;
1431 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1433 struct vfio_device *device;
1437 if (0 == atomic_read(&group->container_users) ||
1438 !group->container->iommu_driver || !vfio_group_viable(group))
1441 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1444 device = vfio_device_get_from_name(group, buf);
1448 ret = device->ops->open(device->device_data);
1450 vfio_device_put(device);
1455 * We can't use anon_inode_getfd() because we need to modify
1456 * the f_mode flags directly to allow more than just ioctls
1458 ret = get_unused_fd_flags(O_CLOEXEC);
1460 device->ops->release(device->device_data);
1461 vfio_device_put(device);
1465 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1467 if (IS_ERR(filep)) {
1469 ret = PTR_ERR(filep);
1470 device->ops->release(device->device_data);
1471 vfio_device_put(device);
1476 * TODO: add an anon_inode interface to do this.
1477 * Appears to be missing by lack of need rather than
1478 * explicitly prevented. Now there's need.
1480 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1482 atomic_inc(&group->container_users);
1484 fd_install(ret, filep);
1487 dev_warn(device->dev, "vfio-noiommu device opened by user "
1488 "(%s:%d)\n", current->comm, task_pid_nr(current));
1493 static long vfio_group_fops_unl_ioctl(struct file *filep,
1494 unsigned int cmd, unsigned long arg)
1496 struct vfio_group *group = filep->private_data;
1500 case VFIO_GROUP_GET_STATUS:
1502 struct vfio_group_status status;
1503 unsigned long minsz;
1505 minsz = offsetofend(struct vfio_group_status, flags);
1507 if (copy_from_user(&status, (void __user *)arg, minsz))
1510 if (status.argsz < minsz)
1515 if (vfio_group_viable(group))
1516 status.flags |= VFIO_GROUP_FLAGS_VIABLE;
1518 if (group->container)
1519 status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
1521 if (copy_to_user((void __user *)arg, &status, minsz))
1527 case VFIO_GROUP_SET_CONTAINER:
1531 if (get_user(fd, (int __user *)arg))
1537 ret = vfio_group_set_container(group, fd);
1540 case VFIO_GROUP_UNSET_CONTAINER:
1541 ret = vfio_group_unset_container(group);
1543 case VFIO_GROUP_GET_DEVICE_FD:
1547 buf = strndup_user((const char __user *)arg, PAGE_SIZE);
1549 return PTR_ERR(buf);
1551 ret = vfio_group_get_device_fd(group, buf);
1560 #ifdef CONFIG_COMPAT
1561 static long vfio_group_fops_compat_ioctl(struct file *filep,
1562 unsigned int cmd, unsigned long arg)
1564 arg = (unsigned long)compat_ptr(arg);
1565 return vfio_group_fops_unl_ioctl(filep, cmd, arg);
1567 #endif /* CONFIG_COMPAT */
1569 static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1571 struct vfio_group *group;
1574 group = vfio_group_get_from_minor(iminor(inode));
1578 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1579 vfio_group_put(group);
1583 /* Do we need multiple instances of the group open? Seems not. */
1584 opened = atomic_cmpxchg(&group->opened, 0, 1);
1586 vfio_group_put(group);
1590 /* Is something still in use from a previous open? */
1591 if (group->container) {
1592 atomic_dec(&group->opened);
1593 vfio_group_put(group);
1597 /* Warn if previous user didn't cleanup and re-init to drop them */
1598 if (WARN_ON(group->notifier.head))
1599 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
1601 filep->private_data = group;
1606 static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1608 struct vfio_group *group = filep->private_data;
1610 filep->private_data = NULL;
1612 vfio_group_try_dissolve_container(group);
1614 atomic_dec(&group->opened);
1616 vfio_group_put(group);
1621 static const struct file_operations vfio_group_fops = {
1622 .owner = THIS_MODULE,
1623 .unlocked_ioctl = vfio_group_fops_unl_ioctl,
1624 #ifdef CONFIG_COMPAT
1625 .compat_ioctl = vfio_group_fops_compat_ioctl,
1627 .open = vfio_group_fops_open,
1628 .release = vfio_group_fops_release,
1634 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
1636 struct vfio_device *device = filep->private_data;
1638 device->ops->release(device->device_data);
1640 vfio_group_try_dissolve_container(device->group);
1642 vfio_device_put(device);
1647 static long vfio_device_fops_unl_ioctl(struct file *filep,
1648 unsigned int cmd, unsigned long arg)
1650 struct vfio_device *device = filep->private_data;
1652 if (unlikely(!device->ops->ioctl))
1655 return device->ops->ioctl(device->device_data, cmd, arg);
1658 static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1659 size_t count, loff_t *ppos)
1661 struct vfio_device *device = filep->private_data;
1663 if (unlikely(!device->ops->read))
1666 return device->ops->read(device->device_data, buf, count, ppos);
1669 static ssize_t vfio_device_fops_write(struct file *filep,
1670 const char __user *buf,
1671 size_t count, loff_t *ppos)
1673 struct vfio_device *device = filep->private_data;
1675 if (unlikely(!device->ops->write))
1678 return device->ops->write(device->device_data, buf, count, ppos);
1681 static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1683 struct vfio_device *device = filep->private_data;
1685 if (unlikely(!device->ops->mmap))
1688 return device->ops->mmap(device->device_data, vma);
1691 #ifdef CONFIG_COMPAT
1692 static long vfio_device_fops_compat_ioctl(struct file *filep,
1693 unsigned int cmd, unsigned long arg)
1695 arg = (unsigned long)compat_ptr(arg);
1696 return vfio_device_fops_unl_ioctl(filep, cmd, arg);
1698 #endif /* CONFIG_COMPAT */
1700 static const struct file_operations vfio_device_fops = {
1701 .owner = THIS_MODULE,
1702 .release = vfio_device_fops_release,
1703 .read = vfio_device_fops_read,
1704 .write = vfio_device_fops_write,
1705 .unlocked_ioctl = vfio_device_fops_unl_ioctl,
1706 #ifdef CONFIG_COMPAT
1707 .compat_ioctl = vfio_device_fops_compat_ioctl,
1709 .mmap = vfio_device_fops_mmap,
1713 * External user API, exported by symbols to be linked dynamically.
1715 * The protocol includes:
1716 * 1. do normal VFIO init operation:
1717 * - opening a new container;
1718 * - attaching group(s) to it;
1719 * - setting an IOMMU driver for a container.
1720 * When IOMMU is set for a container, all groups in it are
1721 * considered ready to use by an external user.
1723 * 2. User space passes a group fd to an external user.
1724 * The external user calls vfio_group_get_external_user()
1726 * - the group is initialized;
1727 * - IOMMU is set for it.
1728 * If both checks passed, vfio_group_get_external_user()
1729 * increments the container user counter to prevent
1730 * the VFIO group from disposal before KVM exits.
1732 * 3. The external user calls vfio_external_user_iommu_id()
1733 * to know an IOMMU ID.
1735 * 4. When the external KVM finishes, it calls
1736 * vfio_group_put_external_user() to release the VFIO group.
1737 * This call decrements the container user counter.
1739 struct vfio_group *vfio_group_get_external_user(struct file *filep)
1741 struct vfio_group *group = filep->private_data;
1744 if (filep->f_op != &vfio_group_fops)
1745 return ERR_PTR(-EINVAL);
1747 ret = vfio_group_add_container_user(group);
1749 return ERR_PTR(ret);
1751 vfio_group_get(group);
1755 EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
1757 void vfio_group_put_external_user(struct vfio_group *group)
1759 vfio_group_try_dissolve_container(group);
1760 vfio_group_put(group);
1762 EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
1764 bool vfio_external_group_match_file(struct vfio_group *test_group,
1767 struct vfio_group *group = filep->private_data;
1769 return (filep->f_op == &vfio_group_fops) && (group == test_group);
1771 EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
1773 int vfio_external_user_iommu_id(struct vfio_group *group)
1775 return iommu_group_id(group->iommu_group);
1777 EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
1779 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1781 return vfio_ioctl_check_extension(group->container, arg);
1783 EXPORT_SYMBOL_GPL(vfio_external_check_extension);
1786 * Sub-module support
1789 * Helper for managing a buffer of info chain capabilities, allocate or
1790 * reallocate a buffer with additional @size, filling in @id and @version
1791 * of the capability. A pointer to the new capability is returned.
1793 * NB. The chain is based at the head of the buffer, so new entries are
1794 * added to the tail, vfio_info_cap_shift() should be called to fixup the
1795 * next offsets prior to copying to the user buffer.
1797 struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
1798 size_t size, u16 id, u16 version)
1801 struct vfio_info_cap_header *header, *tmp;
1803 buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
1807 return ERR_PTR(-ENOMEM);
1811 header = buf + caps->size;
1813 /* Eventually copied to user buffer, zero */
1814 memset(header, 0, size);
1817 header->version = version;
1819 /* Add to the end of the capability chain */
1820 for (tmp = buf; tmp->next; tmp = buf + tmp->next)
1823 tmp->next = caps->size;
1828 EXPORT_SYMBOL_GPL(vfio_info_cap_add);
1830 void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
1832 struct vfio_info_cap_header *tmp;
1833 void *buf = (void *)caps->buf;
1835 for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
1836 tmp->next += offset;
1838 EXPORT_SYMBOL(vfio_info_cap_shift);
1840 static int sparse_mmap_cap(struct vfio_info_cap *caps, void *cap_type)
1842 struct vfio_info_cap_header *header;
1843 struct vfio_region_info_cap_sparse_mmap *sparse_cap, *sparse = cap_type;
1846 size = sizeof(*sparse) + sparse->nr_areas * sizeof(*sparse->areas);
1847 header = vfio_info_cap_add(caps, size,
1848 VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1);
1850 return PTR_ERR(header);
1852 sparse_cap = container_of(header,
1853 struct vfio_region_info_cap_sparse_mmap, header);
1854 sparse_cap->nr_areas = sparse->nr_areas;
1855 memcpy(sparse_cap->areas, sparse->areas,
1856 sparse->nr_areas * sizeof(*sparse->areas));
1860 static int region_type_cap(struct vfio_info_cap *caps, void *cap_type)
1862 struct vfio_info_cap_header *header;
1863 struct vfio_region_info_cap_type *type_cap, *cap = cap_type;
1865 header = vfio_info_cap_add(caps, sizeof(*cap),
1866 VFIO_REGION_INFO_CAP_TYPE, 1);
1868 return PTR_ERR(header);
1870 type_cap = container_of(header, struct vfio_region_info_cap_type,
1872 type_cap->type = cap->type;
1873 type_cap->subtype = cap->subtype;
1877 int vfio_info_add_capability(struct vfio_info_cap *caps, int cap_type_id,
1885 switch (cap_type_id) {
1886 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1887 ret = sparse_mmap_cap(caps, cap_type);
1890 case VFIO_REGION_INFO_CAP_TYPE:
1891 ret = region_type_cap(caps, cap_type);
1897 EXPORT_SYMBOL(vfio_info_add_capability);
1899 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
1900 int max_irq_type, size_t *data_size)
1902 unsigned long minsz;
1905 minsz = offsetofend(struct vfio_irq_set, count);
1907 if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
1908 (hdr->count >= (U32_MAX - hdr->start)) ||
1909 (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
1910 VFIO_IRQ_SET_ACTION_TYPE_MASK)))
1916 if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
1919 switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
1920 case VFIO_IRQ_SET_DATA_NONE:
1923 case VFIO_IRQ_SET_DATA_BOOL:
1924 size = sizeof(uint8_t);
1926 case VFIO_IRQ_SET_DATA_EVENTFD:
1927 size = sizeof(int32_t);
1934 if (hdr->argsz - minsz < hdr->count * size)
1940 *data_size = hdr->count * size;
1945 EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
1948 * Pin a set of guest PFNs and return their associated host PFNs for local
1950 * @dev [in] : device
1951 * @user_pfn [in]: array of user/guest PFNs to be pinned.
1952 * @npage [in] : count of elements in user_pfn array. This count should not
1953 * be greater VFIO_PIN_PAGES_MAX_ENTRIES.
1954 * @prot [in] : protection flags
1955 * @phys_pfn[out]: array of host PFNs
1956 * Return error or number of pages pinned.
1958 int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
1959 int prot, unsigned long *phys_pfn)
1961 struct vfio_container *container;
1962 struct vfio_group *group;
1963 struct vfio_iommu_driver *driver;
1966 if (!dev || !user_pfn || !phys_pfn || !npage)
1969 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
1972 group = vfio_group_get_from_dev(dev);
1976 ret = vfio_group_add_container_user(group);
1980 container = group->container;
1981 driver = container->iommu_driver;
1982 if (likely(driver && driver->ops->pin_pages))
1983 ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
1984 npage, prot, phys_pfn);
1988 vfio_group_try_dissolve_container(group);
1991 vfio_group_put(group);
1994 EXPORT_SYMBOL(vfio_pin_pages);
1997 * Unpin set of host PFNs for local domain only.
1998 * @dev [in] : device
1999 * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
2000 * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
2001 * @npage [in] : count of elements in user_pfn array. This count should not
2002 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
2003 * Return error or number of pages unpinned.
2005 int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
2007 struct vfio_container *container;
2008 struct vfio_group *group;
2009 struct vfio_iommu_driver *driver;
2012 if (!dev || !user_pfn || !npage)
2015 if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
2018 group = vfio_group_get_from_dev(dev);
2022 ret = vfio_group_add_container_user(group);
2024 goto err_unpin_pages;
2026 container = group->container;
2027 driver = container->iommu_driver;
2028 if (likely(driver && driver->ops->unpin_pages))
2029 ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
2034 vfio_group_try_dissolve_container(group);
2037 vfio_group_put(group);
2040 EXPORT_SYMBOL(vfio_unpin_pages);
2042 static int vfio_register_iommu_notifier(struct vfio_group *group,
2043 unsigned long *events,
2044 struct notifier_block *nb)
2046 struct vfio_container *container;
2047 struct vfio_iommu_driver *driver;
2050 ret = vfio_group_add_container_user(group);
2054 container = group->container;
2055 driver = container->iommu_driver;
2056 if (likely(driver && driver->ops->register_notifier))
2057 ret = driver->ops->register_notifier(container->iommu_data,
2062 vfio_group_try_dissolve_container(group);
2067 static int vfio_unregister_iommu_notifier(struct vfio_group *group,
2068 struct notifier_block *nb)
2070 struct vfio_container *container;
2071 struct vfio_iommu_driver *driver;
2074 ret = vfio_group_add_container_user(group);
2078 container = group->container;
2079 driver = container->iommu_driver;
2080 if (likely(driver && driver->ops->unregister_notifier))
2081 ret = driver->ops->unregister_notifier(container->iommu_data,
2086 vfio_group_try_dissolve_container(group);
2091 void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
2094 blocking_notifier_call_chain(&group->notifier,
2095 VFIO_GROUP_NOTIFY_SET_KVM, kvm);
2097 EXPORT_SYMBOL_GPL(vfio_group_set_kvm);
2099 static int vfio_register_group_notifier(struct vfio_group *group,
2100 unsigned long *events,
2101 struct notifier_block *nb)
2104 bool set_kvm = false;
2106 if (*events & VFIO_GROUP_NOTIFY_SET_KVM)
2109 /* clear known events */
2110 *events &= ~VFIO_GROUP_NOTIFY_SET_KVM;
2112 /* refuse to continue if still events remaining */
2116 ret = vfio_group_add_container_user(group);
2120 ret = blocking_notifier_chain_register(&group->notifier, nb);
2123 * The attaching of kvm and vfio_group might already happen, so
2124 * here we replay once upon registration.
2126 if (!ret && set_kvm && group->kvm)
2127 blocking_notifier_call_chain(&group->notifier,
2128 VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
2130 vfio_group_try_dissolve_container(group);
2135 static int vfio_unregister_group_notifier(struct vfio_group *group,
2136 struct notifier_block *nb)
2140 ret = vfio_group_add_container_user(group);
2144 ret = blocking_notifier_chain_unregister(&group->notifier, nb);
2146 vfio_group_try_dissolve_container(group);
2151 int vfio_register_notifier(struct device *dev, enum vfio_notify_type type,
2152 unsigned long *events, struct notifier_block *nb)
2154 struct vfio_group *group;
2157 if (!dev || !nb || !events || (*events == 0))
2160 group = vfio_group_get_from_dev(dev);
2165 case VFIO_IOMMU_NOTIFY:
2166 ret = vfio_register_iommu_notifier(group, events, nb);
2168 case VFIO_GROUP_NOTIFY:
2169 ret = vfio_register_group_notifier(group, events, nb);
2175 vfio_group_put(group);
2178 EXPORT_SYMBOL(vfio_register_notifier);
2180 int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
2181 struct notifier_block *nb)
2183 struct vfio_group *group;
2189 group = vfio_group_get_from_dev(dev);
2194 case VFIO_IOMMU_NOTIFY:
2195 ret = vfio_unregister_iommu_notifier(group, nb);
2197 case VFIO_GROUP_NOTIFY:
2198 ret = vfio_unregister_group_notifier(group, nb);
2204 vfio_group_put(group);
2207 EXPORT_SYMBOL(vfio_unregister_notifier);
2210 * Module/class support
2212 static char *vfio_devnode(struct device *dev, umode_t *mode)
2214 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
2217 static struct miscdevice vfio_dev = {
2218 .minor = VFIO_MINOR,
2221 .nodename = "vfio/vfio",
2222 .mode = S_IRUGO | S_IWUGO,
2225 static int __init vfio_init(void)
2229 idr_init(&vfio.group_idr);
2230 mutex_init(&vfio.group_lock);
2231 mutex_init(&vfio.iommu_drivers_lock);
2232 INIT_LIST_HEAD(&vfio.group_list);
2233 INIT_LIST_HEAD(&vfio.iommu_drivers_list);
2234 init_waitqueue_head(&vfio.release_q);
2236 ret = misc_register(&vfio_dev);
2238 pr_err("vfio: misc device register failed\n");
2242 /* /dev/vfio/$GROUP */
2243 vfio.class = class_create(THIS_MODULE, "vfio");
2244 if (IS_ERR(vfio.class)) {
2245 ret = PTR_ERR(vfio.class);
2249 vfio.class->devnode = vfio_devnode;
2251 ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
2253 goto err_alloc_chrdev;
2255 cdev_init(&vfio.group_cdev, &vfio_group_fops);
2256 ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
2260 pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
2262 #ifdef CONFIG_VFIO_NOIOMMU
2263 vfio_register_iommu_driver(&vfio_noiommu_ops);
2268 unregister_chrdev_region(vfio.group_devt, MINORMASK);
2270 class_destroy(vfio.class);
2273 misc_deregister(&vfio_dev);
2277 static void __exit vfio_cleanup(void)
2279 WARN_ON(!list_empty(&vfio.group_list));
2281 #ifdef CONFIG_VFIO_NOIOMMU
2282 vfio_unregister_iommu_driver(&vfio_noiommu_ops);
2284 idr_destroy(&vfio.group_idr);
2285 cdev_del(&vfio.group_cdev);
2286 unregister_chrdev_region(vfio.group_devt, MINORMASK);
2287 class_destroy(vfio.class);
2289 misc_deregister(&vfio_dev);
2292 module_init(vfio_init);
2293 module_exit(vfio_cleanup);
2295 MODULE_VERSION(DRIVER_VERSION);
2296 MODULE_LICENSE("GPL v2");
2297 MODULE_AUTHOR(DRIVER_AUTHOR);
2298 MODULE_DESCRIPTION(DRIVER_DESC);
2299 MODULE_ALIAS_MISCDEV(VFIO_MINOR);
2300 MODULE_ALIAS("devname:vfio/vfio");
2301 MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");