]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/arm/xen/mm.c
xen/arm: simplify dma_cache_maint
[linux.git] / arch / arm / xen / mm.c
index 90574d89d0d4843cbf35b7a1f2190bc3dc2d8e82..2fde161733b01d218b2be81aa8b4ad89a36ec1a0 100644 (file)
@@ -35,64 +35,45 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int order)
        return __get_free_pages(flags, order);
 }
 
-enum dma_cache_op {
-       DMA_UNMAP,
-       DMA_MAP,
-};
 static bool hypercall_cflush = false;
 
-/* functions called by SWIOTLB */
-
-static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
-       size_t size, enum dma_data_direction dir, enum dma_cache_op op)
+/* buffers in highmem or foreign pages cannot cross page boundaries */
+static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
 {
        struct gnttab_cache_flush cflush;
-       unsigned long xen_pfn;
-       size_t left = size;
 
-       xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
-       offset %= XEN_PAGE_SIZE;
+       cflush.a.dev_bus_addr = handle & XEN_PAGE_MASK;
+       cflush.offset = xen_offset_in_page(handle);
+       cflush.op = op;
 
        do {
-               size_t len = left;
-       
-               /* buffers in highmem or foreign pages cannot cross page
-                * boundaries */
-               if (len + offset > XEN_PAGE_SIZE)
-                       len = XEN_PAGE_SIZE - offset;
-
-               cflush.op = 0;
-               cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
-               cflush.offset = offset;
-               cflush.length = len;
-
-               if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
-                       cflush.op = GNTTAB_CACHE_INVAL;
-               if (op == DMA_MAP) {
-                       if (dir == DMA_FROM_DEVICE)
-                               cflush.op = GNTTAB_CACHE_INVAL;
-                       else
-                               cflush.op = GNTTAB_CACHE_CLEAN;
-               }
-               if (cflush.op)
-                       HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
+               if (size + cflush.offset > XEN_PAGE_SIZE)
+                       cflush.length = XEN_PAGE_SIZE - cflush.offset;
+               else
+                       cflush.length = size;
+
+               HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
 
-               offset = 0;
-               xen_pfn++;
-               left -= len;
-       } while (left);
+               cflush.offset = 0;
+               cflush.a.dev_bus_addr += cflush.length;
+               size -= cflush.length;
+       } while (size);
 }
 
 static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir)
 {
-       dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
+       if (dir != DMA_TO_DEVICE)
+               dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
 }
 
 static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
                size_t size, enum dma_data_direction dir)
 {
-       dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
+       if (dir == DMA_FROM_DEVICE)
+               dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
+       else
+               dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN);
 }
 
 void __xen_dma_map_page(struct device *hwdev, struct page *page,