]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
vfio: If an IOMMU backend fails, keep looking
authorAlex Williamson <alex.williamson@redhat.com>
Mon, 22 Feb 2016 23:02:30 +0000 (16:02 -0700)
committerAlex Williamson <alex.williamson@redhat.com>
Mon, 22 Feb 2016 23:10:08 +0000 (16:10 -0700)
Consider an IOMMU to be an API rather than an implementation, we might
have multiple implementations supporting the same API, so try another
if one fails.  The expectation here is that we'll really only have
one implementation per device type.  For instance the existing type1
driver works with any PCI device where the IOMMU API is available.  A
vGPU vendor may have a virtual PCI device which provides DMA isolation
and mapping through other mechanisms, but can re-use userspaces that
make use of the type1 VFIO IOMMU API.  This allows that to work.

Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
drivers/vfio/vfio.c

index ecca316386f57fa64d04746d4b8326fa4c241ebd..4cc961b894affece4b77bbfc00b3fab2f5d677b9 100644 (file)
@@ -1080,30 +1080,26 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
                        continue;
                }
 
-               /* module reference holds the driver we're working on */
-               mutex_unlock(&vfio.iommu_drivers_lock);
-
                data = driver->ops->open(arg);
                if (IS_ERR(data)) {
                        ret = PTR_ERR(data);
                        module_put(driver->ops->owner);
-                       goto skip_drivers_unlock;
+                       continue;
                }
 
                ret = __vfio_container_attach_groups(container, driver, data);
-               if (!ret) {
-                       container->iommu_driver = driver;
-                       container->iommu_data = data;
-               } else {
+               if (ret) {
                        driver->ops->release(data);
                        module_put(driver->ops->owner);
+                       continue;
                }
 
-               goto skip_drivers_unlock;
+               container->iommu_driver = driver;
+               container->iommu_data = data;
+               break;
        }
 
        mutex_unlock(&vfio.iommu_drivers_lock);
-skip_drivers_unlock:
        up_write(&container->group_lock);
 
        return ret;