]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
powerpc/32: Add warning on misaligned copy_page() or clear_page()
authorChristophe Leroy <christophe.leroy@c-s.fr>
Fri, 16 Aug 2019 07:52:20 +0000 (07:52 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 20 Aug 2019 11:22:15 +0000 (21:22 +1000)
copy_page() and clear_page() expect page aligned destination, and
use dcbz instruction to clear entire cache lines based on the
assumption that the destination is cache aligned.

As shown during analysis of a bug in BTRFS filesystem, a misaligned
copy_page() can create bugs that are difficult to locate (see Link).

Add an explicit WARNING when copy_page() or clear_page() are called
with misaligned destination.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=204371
Link: https://lore.kernel.org/r/c6cea38f90480268d439ca44a645647e260fff09.1565941808.git.christophe.leroy@c-s.fr
arch/powerpc/include/asm/page_32.h
arch/powerpc/kernel/misc_32.S

index 683dfbc67ca8f555bc1e6fbee3689b5a39a418d5..d64dfe3ac712469b183a4430e5fb2e15843c957a 100644 (file)
@@ -40,6 +40,8 @@ typedef unsigned long long pte_basic_t;
 typedef unsigned long pte_basic_t;
 #endif
 
+#include <asm/bug.h>
+
 /*
  * Clear page using the dcbz instruction, which doesn't cause any
  * memory traffic (except to write out any cache lines which get
@@ -49,6 +51,8 @@ static inline void clear_page(void *addr)
 {
        unsigned int i;
 
+       WARN_ON((unsigned long)addr & (L1_CACHE_BYTES - 1));
+
        for (i = 0; i < PAGE_SIZE / L1_CACHE_BYTES; i++, addr += L1_CACHE_BYTES)
                dcbz(addr);
 }
index fe4bd321730e163bc901ebf329c8f6f980b4f8a5..02d90e1ebf65b4d4f1e4bcb62e4b595e509fb782 100644 (file)
@@ -452,7 +452,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
        stwu    r9,16(r3)
 
 _GLOBAL(copy_page)
+       rlwinm  r5, r3, 0, L1_CACHE_BYTES - 1
        addi    r3,r3,-4
+
+0:     twnei   r5, 0   /* WARN if r3 is not cache aligned */
+       EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
+
        addi    r4,r4,-4
 
        li      r5,4