From ff92b9e3c9f85fa442c430d70bf075499e1193b7 Mon Sep 17 00:00:00 2001 From: Phil Elwell Date: Thu, 4 May 2017 10:58:20 +0100 Subject: [PATCH] staging: vc04_services: Fix bulk cache maintenance vchiq_arm supports transfers less than one page and at arbitrary alignment, using the dma-mapping API to perform its cache maintenance (even though the VPU drives the DMA hardware). Read (DMA_FROM_DEVICE) operations use cache invalidation for speed, falling back to clean+invalidate on partial cache lines, with writes (DMA_TO_DEVICE) using flushes. If a read transfer has ends which aren't page-aligned, performing cache maintenance as if they were whole pages can lead to memory corruption since the partial cache lines at the ends (and any cache lines before or after the transfer area) will be invalidated. This bug was masked until the disabling of the cache flush in flush_dcache_page(). Honouring the requested transfer start- and end-points prevents the corruption. Fixes: cf9caf192988 ("staging: vc04_services: Replace dmac_map_area with dmac_map_sg") Signed-off-by: Phil Elwell Cc: stable # 4.10 Reported-by: Stefan Wahren Tested-by: Stefan Wahren Signed-off-by: Greg Kroah-Hartman --- .../interface/vchiq_arm/vchiq_2835_arm.c | 31 ++++++++++++------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c index 988ee61fb4a7..d04db3f55519 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c @@ -502,8 +502,15 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, */ sg_init_table(scatterlist, num_pages); /* Now set the pages for each scatterlist */ - for (i = 0; i < num_pages; i++) - sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0); + for (i = 0; i < num_pages; i++) { + unsigned int len = PAGE_SIZE - offset; + + if (len > count) + len = count; + sg_set_page(scatterlist + i, pages[i], len, offset); + offset = 0; + count -= len; + } dma_buffers = dma_map_sg(g_dev, scatterlist, @@ -524,20 +531,20 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, u32 addr = sg_dma_address(sg); /* Note: addrs is the address + page_count - 1 - * The firmware expects the block to be page + * The firmware expects blocks after the first to be page- * aligned and a multiple of the page size */ WARN_ON(len == 0); - WARN_ON(len & ~PAGE_MASK); - WARN_ON(addr & ~PAGE_MASK); + WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK)); + WARN_ON(i && (addr & ~PAGE_MASK)); if (k > 0 && - ((addrs[k - 1] & PAGE_MASK) | - ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT) - == addr) { - addrs[k - 1] += (len >> PAGE_SHIFT); - } else { - addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1); - } + ((addrs[k - 1] & PAGE_MASK) + + (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT)) + == (addr & PAGE_MASK)) + addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT); + else + addrs[k++] = (addr & PAGE_MASK) | + (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1); } /* Partial cache lines (fragments) require special measures */ -- 2.45.2