]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/drm_memory.c
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[linux.git] / drivers / gpu / drm / drm_memory.c
index 40c4349cb93935a694be59ef37e321941b845e0f..132fef8ff1b65f1776459d5d864808c87de9d0eb 100644 (file)
@@ -35,6 +35,7 @@
 
 #include <linux/highmem.h>
 #include <linux/export.h>
+#include <xen/xen.h>
 #include <drm/drmP.h>
 #include "drm_legacy.h"
 
@@ -150,15 +151,34 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_legacy_ioremapfree);
 
-u64 drm_get_max_iomem(void)
+bool drm_need_swiotlb(int dma_bits)
 {
        struct resource *tmp;
        resource_size_t max_iomem = 0;
 
+       /*
+        * Xen paravirtual hosts require swiotlb regardless of requested dma
+        * transfer size.
+        *
+        * NOTE: Really, what it requires is use of the dma_alloc_coherent
+        *       allocator used in ttm_dma_populate() instead of
+        *       ttm_populate_and_map_pages(), which bounce buffers so much in
+        *       Xen it leads to swiotlb buffer exhaustion.
+        */
+       if (xen_pv_domain())
+               return true;
+
+       /*
+        * Enforce dma_alloc_coherent when memory encryption is active as well
+        * for the same reasons as for Xen paravirtual hosts.
+        */
+       if (mem_encrypt_active())
+               return true;
+
        for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
                max_iomem = max(max_iomem,  tmp->end);
        }
 
-       return max_iomem;
+       return max_iomem > ((u64)1 << dma_bits);
 }
-EXPORT_SYMBOL(drm_get_max_iomem);
+EXPORT_SYMBOL(drm_need_swiotlb);