]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance
authorChristoph Hellwig <hch@lst.de>
Mon, 2 Sep 2019 08:44:19 +0000 (10:44 +0200)
committerChristoph Hellwig <hch@lst.de>
Wed, 11 Sep 2019 10:43:16 +0000 (12:43 +0200)
Copy the arm64 code that uses the dma-direct/swiotlb helpers for DMA
on-coherent devices.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
arch/arm/include/asm/device.h
arch/arm/include/asm/xen/page-coherent.h
arch/arm/mm/dma-mapping.c
drivers/xen/swiotlb-xen.c

index f6955b55c5449552759f6583e1d88484e9353e08..c675bc0d5aa886b130f96ba8488e7e6360540a4c 100644 (file)
@@ -14,9 +14,6 @@ struct dev_archdata {
 #endif
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
        struct dma_iommu_mapping        *mapping;
-#endif
-#ifdef CONFIG_XEN
-       const struct dma_map_ops *dev_dma_ops;
 #endif
        unsigned int dma_coherent:1;
        unsigned int dma_ops_setup:1;
index 2c403e7c782d31f83b956dcbcc9a9a9027092ffe..602ac02f154c9013a6ff13fefd14fa785d8bfe3a 100644 (file)
@@ -6,23 +6,37 @@
 #include <asm/page.h>
 #include <xen/arm/page-coherent.h>
 
-static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
-{
-       if (dev && dev->archdata.dev_dma_ops)
-               return dev->archdata.dev_dma_ops;
-       return get_arch_dma_ops(NULL);
-}
-
 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
                dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
 {
-       return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+       return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
 }
 
 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
                void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
 {
-       xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+       dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned long pfn = PFN_DOWN(handle);
+
+       if (pfn_valid(pfn))
+               dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
+       else
+               __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+       unsigned long pfn = PFN_DOWN(handle);
+       if (pfn_valid(pfn))
+               dma_direct_sync_single_for_device(hwdev, handle, size, dir);
+       else
+               __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
 }
 
 static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
@@ -36,17 +50,8 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
        bool local = (page_pfn <= dev_pfn) &&
                (dev_pfn - page_pfn < compound_pages);
 
-       /*
-        * Dom0 is mapped 1:1, while the Linux page can span across
-        * multiple Xen pages, it's not possible for it to contain a
-        * mix of local and foreign Xen pages. So if the first xen_pfn
-        * == mfn the page is local otherwise it's a foreign page
-        * grant-mapped in dom0. If the page is local we can safely
-        * call the native dma_ops function, otherwise we call the xen
-        * specific function.
-        */
        if (local)
-               xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+               dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
        else
                __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
 }
@@ -63,33 +68,10 @@ static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
         * safely call the native dma_ops function, otherwise we call the xen
         * specific function.
         */
-       if (pfn_valid(pfn)) {
-               if (xen_get_dma_ops(hwdev)->unmap_page)
-                       xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-       } else
+       if (pfn_valid(pfn))
+               dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
+       else
                __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
 }
 
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       unsigned long pfn = PFN_DOWN(handle);
-       if (pfn_valid(pfn)) {
-               if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
-                       xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-       } else
-               __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-       unsigned long pfn = PFN_DOWN(handle);
-       if (pfn_valid(pfn)) {
-               if (xen_get_dma_ops(hwdev)->sync_single_for_device)
-                       xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-       } else
-               __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
-}
-
 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
index d9889889314056d908aab9d3a83ea3027933e35b..143d7c79b4cb679e80d703dc2c1344330c61fcc0 100644 (file)
@@ -1105,10 +1105,6 @@ static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
         * 32-bit DMA.
         * Use the generic dma-direct / swiotlb ops code in that case, as that
         * handles bounce buffering for us.
-        *
-        * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
-        * latter is also selected by the Xen code, but that code for now relies
-        * on non-NULL dev_dma_ops.  To be cleaned up later.
         */
        if (IS_ENABLED(CONFIG_ARM_LPAE))
                return NULL;
@@ -2318,10 +2314,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
        set_dma_ops(dev, dma_ops);
 
 #ifdef CONFIG_XEN
-       if (xen_initial_domain()) {
-               dev->archdata.dev_dma_ops = dev->dma_ops;
+       if (xen_initial_domain())
                dev->dma_ops = xen_dma_ops;
-       }
 #endif
        dev->archdata.dma_ops_setup = true;
 }
index ae1df496bf384bdb3d8f9c37769326f4c95f01cb..eee86cc7046b7983999c0b36f81d6206c8e3e199 100644 (file)
@@ -557,11 +557,6 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                     void *cpu_addr, dma_addr_t dma_addr, size_t size,
                     unsigned long attrs)
 {
-#ifdef CONFIG_ARM
-       if (xen_get_dma_ops(dev)->mmap)
-               return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
-                                                   dma_addr, size, attrs);
-#endif
        return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
 
@@ -574,21 +569,6 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
                        void *cpu_addr, dma_addr_t handle, size_t size,
                        unsigned long attrs)
 {
-#ifdef CONFIG_ARM
-       if (xen_get_dma_ops(dev)->get_sgtable) {
-#if 0
-       /*
-        * This check verifies that the page belongs to the current domain and
-        * is not one mapped from another domain.
-        * This check is for debug only, and should not go to production build
-        */
-               unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
-               BUG_ON (!page_is_ram(bfn));
-#endif
-               return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
-                                                          handle, size, attrs);
-       }
-#endif
        return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
 }