]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
x86/dma/amd_gart: Use dma_direct_{alloc,free}()
authorChristoph Hellwig <hch@lst.de>
Mon, 19 Mar 2018 10:38:18 +0000 (11:38 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 20 Mar 2018 09:01:57 +0000 (10:01 +0100)
This gains support for CMA allocations for the force_iommu case, and
cleans up the code a bit.

Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Jon Mason <jdmason@kudzu.us>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Muli Ben-Yehuda <mulix@mulix.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: iommu@lists.linux-foundation.org
Link: http://lkml.kernel.org/r/20180319103826.12853-7-hch@lst.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/amd_gart_64.c

index 79ac6caaaabb36f0e6f932e8110d8dd25e6cf747..f299d8a479bbb359d45da12b2743b4d47733afb9 100644 (file)
@@ -480,29 +480,21 @@ static void *
 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
                    gfp_t flag, unsigned long attrs)
 {
-       dma_addr_t paddr;
-       unsigned long align_mask;
-       struct page *page;
-
-       if (force_iommu && dev->coherent_dma_mask > DMA_BIT_MASK(24)) {
-               flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
-               page = alloc_pages(flag | __GFP_ZERO, get_order(size));
-               if (!page)
-                       return NULL;
-
-               align_mask = (1UL << get_order(size)) - 1;
-               paddr = dma_map_area(dev, page_to_phys(page), size,
-                                    DMA_BIDIRECTIONAL, align_mask);
-
-               flush_gart();
-               if (paddr != bad_dma_addr) {
-                       *dma_addr = paddr;
-                       return page_address(page);
-               }
-               __free_pages(page, get_order(size));
-       } else
-               return dma_direct_alloc(dev, size, dma_addr, flag, attrs);
+       void *vaddr;
+
+       vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
+       if (!vaddr ||
+           !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
+               return vaddr;
 
+       *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
+                       DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
+       flush_gart();
+       if (unlikely(*dma_addr == bad_dma_addr))
+               goto out_free;
+       return vaddr;
+out_free:
+       dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
        return NULL;
 }