]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/nds32/kernel/dma.c
nds32: implement the unmap_sg DMA operation
[linux.git] / arch / nds32 / kernel / dma.c
index d291800fc62185bf69be3a7468823ca856ddaccf..b9973317c734d28ee0f0acbfca250588cb39762a 100644 (file)
 static pte_t *consistent_pte;
 static DEFINE_RAW_SPINLOCK(consistent_lock);
 
-enum master_type {
-       FOR_CPU = 0,
-       FOR_DEVICE = 1,
-};
-
 /*
  * VM region handling support.
  *
@@ -333,15 +328,85 @@ static int __init consistent_init(void)
 }
 
 core_initcall(consistent_init);
-static void consistent_sync(void *vaddr, size_t size, int direction, int master_type);
+
+static inline void cache_op(phys_addr_t paddr, size_t size,
+               void (*fn)(unsigned long start, unsigned long end))
+{
+       struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
+       unsigned offset = paddr & ~PAGE_MASK;
+       size_t left = size;
+       unsigned long start;
+
+       do {
+               size_t len = left;
+
+               if (PageHighMem(page)) {
+                       void *addr;
+
+                       if (offset + len > PAGE_SIZE) {
+                               if (offset >= PAGE_SIZE) {
+                                       page += offset >> PAGE_SHIFT;
+                                       offset &= ~PAGE_MASK;
+                               }
+                               len = PAGE_SIZE - offset;
+                       }
+
+                       addr = kmap_atomic(page);
+                       start = (unsigned long)(addr + offset);
+                       fn(start, start + len);
+                       kunmap_atomic(addr);
+               } else {
+                       start = (unsigned long)phys_to_virt(paddr);
+                       fn(start, start + size);
+               }
+               offset = 0;
+               page++;
+               left -= len;
+       } while (left);
+}
+
+static void
+nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+                                size_t size, enum dma_data_direction dir)
+{
+       switch (dir) {
+       case DMA_FROM_DEVICE:
+               break;
+       case DMA_TO_DEVICE:
+       case DMA_BIDIRECTIONAL:
+               cache_op(handle, size, cpu_dma_wb_range);
+               break;
+       default:
+               BUG();
+       }
+}
+
+static void
+nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+                             size_t size, enum dma_data_direction dir)
+{
+       switch (dir) {
+       case DMA_TO_DEVICE:
+               break;
+       case DMA_FROM_DEVICE:
+       case DMA_BIDIRECTIONAL:
+               cache_op(handle, size, cpu_dma_inval_range);
+               break;
+       default:
+               BUG();
+       }
+}
+
 static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
                                     unsigned long offset, size_t size,
                                     enum dma_data_direction dir,
                                     unsigned long attrs)
 {
+       dma_addr_t dma_addr = page_to_phys(page) + offset;
+
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE);
-       return page_to_phys(page) + offset;
+               nds32_dma_sync_single_for_device(dev, handle, size, dir);
+       return dma_addr;
 }
 
 static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
@@ -349,115 +414,55 @@ static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
                                 unsigned long attrs)
 {
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU);
+               nds32_dma_sync_single_for_cpu(dev, handle, size, dir);
 }
 
-/*
- * Make an area consistent for devices.
- */
-static void consistent_sync(void *vaddr, size_t size, int direction, int master_type)
-{
-       unsigned long start = (unsigned long)vaddr;
-       unsigned long end = start + size;
-
-       if (master_type == FOR_CPU) {
-               switch (direction) {
-               case DMA_TO_DEVICE:
-                       break;
-               case DMA_FROM_DEVICE:
-               case DMA_BIDIRECTIONAL:
-                       cpu_dma_inval_range(start, end);
-                       break;
-               default:
-                       BUG();
-               }
-       } else {
-               /* FOR_DEVICE */
-               switch (direction) {
-               case DMA_FROM_DEVICE:
-                       break;
-               case DMA_TO_DEVICE:
-               case DMA_BIDIRECTIONAL:
-                       cpu_dma_wb_range(start, end);
-                       break;
-               default:
-                       BUG();
-               }
-       }
-}
-
-static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                           int nents, enum dma_data_direction dir,
-                           unsigned long attrs)
+static void
+nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+                            int nents, enum dma_data_direction dir)
 {
        int i;
 
        for (i = 0; i < nents; i++, sg++) {
-               void *virt;
-               unsigned long pfn;
-               struct page *page = sg_page(sg);
-
-               sg->dma_address = sg_phys(sg);
-               pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE;
-               page = pfn_to_page(pfn);
-               if (PageHighMem(page)) {
-                       virt = kmap_atomic(page);
-                       consistent_sync(virt, sg->length, dir, FOR_CPU);
-                       kunmap_atomic(virt);
-               } else {
-                       if (sg->offset > PAGE_SIZE)
-                               panic("sg->offset:%08x > PAGE_SIZE\n",
-                                     sg->offset);
-                       virt = page_address(page) + sg->offset;
-                       consistent_sync(virt, sg->length, dir, FOR_CPU);
-               }
+               nds32_dma_sync_single_for_device(dev, sg_dma_address(sg),
+                               sg->length, dir);
        }
-       return nents;
-}
-
-static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-                              int nhwentries, enum dma_data_direction dir,
-                              unsigned long attrs)
-{
 }
 
 static void
-nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
-                             size_t size, enum dma_data_direction dir)
+nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
+                         enum dma_data_direction dir)
 {
-       consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU);
-}
+       int i;
 
-static void
-nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
-                                size_t size, enum dma_data_direction dir)
-{
-       consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE);
+       for (i = 0; i < nents; i++, sg++) {
+               nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+                               sg->length, dir);
+       }
 }
 
-static void
-nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
-                         enum dma_data_direction dir)
+static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
+                           int nents, enum dma_data_direction dir,
+                           unsigned long attrs)
 {
        int i;
 
        for (i = 0; i < nents; i++, sg++) {
-               char *virt =
-                   page_address((struct page *)sg->page_link) + sg->offset;
-               consistent_sync(virt, sg->length, dir, FOR_CPU);
+               nds32_dma_sync_single_for_device(dev, sg_dma_address(sg),
+                               sg->length, dir);
        }
+       return nents;
 }
 
-static void
-nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-                            int nents, enum dma_data_direction dir)
+static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+                              int nhwentries, enum dma_data_direction dir,
+                              unsigned long attrs)
 {
        int i;
 
-       for (i = 0; i < nents; i++, sg++) {
-               char *virt =
-                   page_address((struct page *)sg->page_link) + sg->offset;
-               consistent_sync(virt, sg->length, dir, FOR_DEVICE);
+       for (i = 0; i < nhwentries; i++, sg++) {
+               nds32_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+                               sg->length, dir);
        }
 }