]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
dma-direct: provide generic support for uncached kernel segments
authorChristoph Hellwig <hch@lst.de>
Mon, 3 Jun 2019 06:43:51 +0000 (08:43 +0200)
committerChristoph Hellwig <hch@lst.de>
Mon, 3 Jun 2019 14:00:08 +0000 (16:00 +0200)
A few architectures support uncached kernel segments.  In that case we get
an uncached mapping for a given physica address by using an offset in the
uncached segement.  Implement support for this scheme in the generic
dma-direct code instead of duplicating it in arch hooks.

Signed-off-by: Christoph Hellwig <hch@lst.de>
arch/Kconfig
include/linux/dma-noncoherent.h
kernel/dma/direct.c

index c47b328eada033257e30c89a0f1678d030b5b95f..e8d19c3cb91f226adf2a6444e61eca4c75b406c7 100644 (file)
@@ -260,6 +260,14 @@ config ARCH_HAS_SET_MEMORY
 config ARCH_HAS_SET_DIRECT_MAP
        bool
 
+#
+# Select if arch has an uncached kernel segment and provides the
+# uncached_kernel_address / cached_kernel_address symbols to use it
+#
+config ARCH_HAS_UNCACHED_SEGMENT
+       select ARCH_HAS_DMA_PREP_COHERENT
+       bool
+
 # Select if arch init_task must go in the __init_task_data section
 config ARCH_TASK_STRUCT_ON_STACK
        bool
index 9741767e400fbff4e94a56934187fb1920fd510a..7e0126a04e02d2ade2c6c90ba2a4905a363dd77f 100644 (file)
@@ -80,4 +80,7 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size)
 }
 #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
 
+void *uncached_kernel_address(void *addr);
+void *cached_kernel_address(void *addr);
+
 #endif /* _LINUX_DMA_NONCOHERENT_H */
index 0816c1e8b05af5e586613c31ec1fa43492be9a7c..b67f0aa08aa34b64a118fe19eab90d5ccf4feea1 100644 (file)
@@ -158,6 +158,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
                *dma_handle = phys_to_dma(dev, page_to_phys(page));
        }
        memset(ret, 0, size);
+
+       if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+           !dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+               arch_dma_prep_coherent(page, size);
+               ret = uncached_kernel_address(ret);
+       }
+
        return ret;
 }
 
@@ -173,13 +180,18 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
 
        if (force_dma_unencrypted())
                set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+
+       if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+           !dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT))
+               cpu_addr = cached_kernel_address(cpu_addr);
        __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
 }
 
 void *dma_direct_alloc(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
-       if (!dev_is_dma_coherent(dev))
+       if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+           !dev_is_dma_coherent(dev))
                return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
        return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
 }
@@ -187,7 +199,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 void dma_direct_free(struct device *dev, size_t size,
                void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
 {
-       if (!dev_is_dma_coherent(dev))
+       if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+           !dev_is_dma_coherent(dev))
                arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
        else
                dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);