]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge MIPS prerequisites
authorJames Hogan <james.hogan@imgtec.com>
Thu, 2 Feb 2017 13:50:39 +0000 (13:50 +0000)
committerJames Hogan <james.hogan@imgtec.com>
Fri, 3 Feb 2017 15:20:40 +0000 (15:20 +0000)
Merge in MIPS prerequisites from GVA page tables and GPA page tables
series. The same branch can also merge into the MIPS tree.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
arch/mips/include/asm/pgalloc.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/tlbex.h [new file with mode: 0644]
arch/mips/include/asm/uasm.h
arch/mips/mm/Makefile
arch/mips/mm/init.c
arch/mips/mm/pgtable-64.c
arch/mips/mm/pgtable.c [new file with mode: 0644]
arch/mips/mm/tlbex.c

index a03e86969f78a86a9989897ad95cfad1b7798d73..a8705f6c81808f786076d93909d3a34fc2f637d5 100644 (file)
@@ -43,21 +43,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  * Initialize a new pgd / pmd table with invalid pointers.
  */
 extern void pgd_init(unsigned long page);
-
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
-       pgd_t *ret, *init;
-
-       ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
-       if (ret) {
-               init = pgd_offset(&init_mm, 0UL);
-               pgd_init((unsigned long)ret);
-               memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
-                      (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-       }
-
-       return ret;
-}
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
index b42b513007a2c3da70480fd7dd86d990bb0bc874..7227c158cbf809697ea9d3cd95907421b0d3fe19 100644 (file)
@@ -147,49 +147,64 @@ static inline void flush_scache_line(unsigned long addr)
 }
 
 #define protected_cache_op(op,addr)                            \
+({                                                             \
+       int __err = 0;                                          \
        __asm__ __volatile__(                                   \
        "       .set    push                    \n"             \
        "       .set    noreorder               \n"             \
        "       .set "MIPS_ISA_ARCH_LEVEL"      \n"             \
-       "1:     cache   %0, (%1)                \n"             \
+       "1:     cache   %1, (%2)                \n"             \
        "2:     .set    pop                     \n"             \
+       "       .section .fixup,\"ax\"          \n"             \
+       "3:     li      %0, %3                  \n"             \
+       "       j       2b                      \n"             \
+       "       .previous                       \n"             \
        "       .section __ex_table,\"a\"       \n"             \
-       "       "STR(PTR)" 1b, 2b               \n"             \
+       "       "STR(PTR)" 1b, 3b               \n"             \
        "       .previous"                                      \
-       :                                                       \
-       : "i" (op), "r" (addr))
+       : "+r" (__err)                                          \
+       : "i" (op), "r" (addr), "i" (-EFAULT));                 \
+       __err;                                                  \
+})
+
 
 #define protected_cachee_op(op,addr)                           \
+({                                                             \
+       int __err = 0;                                          \
        __asm__ __volatile__(                                   \
        "       .set    push                    \n"             \
        "       .set    noreorder               \n"             \
        "       .set    mips0                   \n"             \
        "       .set    eva                     \n"             \
-       "1:     cachee  %0, (%1)                \n"             \
+       "1:     cachee  %1, (%2)                \n"             \
        "2:     .set    pop                     \n"             \
+       "       .section .fixup,\"ax\"          \n"             \
+       "3:     li      %0, %3                  \n"             \
+       "       j       2b                      \n"             \
+       "       .previous                       \n"             \
        "       .section __ex_table,\"a\"       \n"             \
-       "       "STR(PTR)" 1b, 2b               \n"             \
+       "       "STR(PTR)" 1b, 3b               \n"             \
        "       .previous"                                      \
-       :                                                       \
-       : "i" (op), "r" (addr))
+       : "+r" (__err)                                          \
+       : "i" (op), "r" (addr), "i" (-EFAULT));                 \
+       __err;                                                  \
+})
 
 /*
  * The next two are for badland addresses like signal trampolines.
  */
-static inline void protected_flush_icache_line(unsigned long addr)
+static inline int protected_flush_icache_line(unsigned long addr)
 {
        switch (boot_cpu_type()) {
        case CPU_LOONGSON2:
-               protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
-               break;
+               return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
 
        default:
 #ifdef CONFIG_EVA
-               protected_cachee_op(Hit_Invalidate_I, addr);
+               return protected_cachee_op(Hit_Invalidate_I, addr);
 #else
-               protected_cache_op(Hit_Invalidate_I, addr);
+               return protected_cache_op(Hit_Invalidate_I, addr);
 #endif
-               break;
        }
 }
 
@@ -199,21 +214,21 @@ static inline void protected_flush_icache_line(unsigned long addr)
  * caches.  We're talking about one cacheline unnecessarily getting invalidated
  * here so the penalty isn't overly hard.
  */
-static inline void protected_writeback_dcache_line(unsigned long addr)
+static inline int protected_writeback_dcache_line(unsigned long addr)
 {
 #ifdef CONFIG_EVA
-       protected_cachee_op(Hit_Writeback_Inv_D, addr);
+       return protected_cachee_op(Hit_Writeback_Inv_D, addr);
 #else
-       protected_cache_op(Hit_Writeback_Inv_D, addr);
+       return protected_cache_op(Hit_Writeback_Inv_D, addr);
 #endif
 }
 
-static inline void protected_writeback_scache_line(unsigned long addr)
+static inline int protected_writeback_scache_line(unsigned long addr)
 {
 #ifdef CONFIG_EVA
-       protected_cachee_op(Hit_Writeback_Inv_SD, addr);
+       return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
 #else
-       protected_cache_op(Hit_Writeback_Inv_SD, addr);
+       return protected_cache_op(Hit_Writeback_Inv_SD, addr);
 #endif
 }
 
diff --git a/arch/mips/include/asm/tlbex.h b/arch/mips/include/asm/tlbex.h
new file mode 100644 (file)
index 0000000..53050e9
--- /dev/null
@@ -0,0 +1,26 @@
+#ifndef __ASM_TLBEX_H
+#define __ASM_TLBEX_H
+
+#include <asm/uasm.h>
+
+/*
+ * Write random or indexed TLB entry, and care about the hazards from
+ * the preceding mtc0 and for the following eret.
+ */
+enum tlb_write_entry {
+       tlb_random,
+       tlb_indexed
+};
+
+extern int pgd_reg;
+
+void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+                     unsigned int tmp, unsigned int ptr);
+void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr);
+void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr);
+void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep);
+void build_tlb_write_entry(u32 **p, struct uasm_label **l,
+                          struct uasm_reloc **r,
+                          enum tlb_write_entry wmode);
+
+#endif /* __ASM_TLBEX_H */
index f7929f65f7ca27bf4926deb8e51d73d724c935bb..e9a9e2ade1d216db0d0260b2a796b50b334518e3 100644 (file)
@@ -9,6 +9,9 @@
  * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
  */
 
+#ifndef __ASM_UASM_H
+#define __ASM_UASM_H
+
 #include <linux/types.h>
 
 #ifdef CONFIG_EXPORT_UASM
@@ -309,3 +312,5 @@ void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
 void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
                 unsigned int reg2, int lid);
 void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+
+#endif /* __ASM_UASM_H */
index b4c64bd3f723903296c73d75dba3c3e29b861591..b4cc8811a6642b35901e4bcc9a277b657801d8fb 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-y                          += cache.o dma-default.o extable.o fault.o \
                                   gup.o init.o mmap.o page.o page-funcs.o \
-                                  tlbex.o tlbex-fault.o tlb-funcs.o
+                                  pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o
 
 ifdef CONFIG_CPU_MICROMIPS
 obj-y                          += uasm-micromips.o
index e86ebcf5c071f8c9d9737b9d1d1e21cd95204212..653569bc0da73a5f068c2079641e27e26753a562 100644 (file)
@@ -538,5 +538,6 @@ unsigned long pgd_current[NR_CPUS];
 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
 #ifndef __PAGETABLE_PMD_FOLDED
 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+EXPORT_SYMBOL_GPL(invalid_pmd_table);
 #endif
 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
index ce4473e7c0d261b04d7bf44fcfc8ddc6414435d9..0ae7b28b4db5d992c11a79e05c76b39cb9a41dcf 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 1999, 2000 by Silicon Graphics
  * Copyright (C) 2003 by Ralf Baechle
  */
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <asm/fixmap.h>
@@ -60,6 +61,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
                p[-1] = pagetable;
        } while (p != end);
 }
+EXPORT_SYMBOL_GPL(pmd_init);
 #endif
 
 pmd_t mk_pmd(struct page *page, pgprot_t prot)
diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c
new file mode 100644 (file)
index 0000000..05560b0
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <asm/pgalloc.h>
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+       pgd_t *ret, *init;
+
+       ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+       if (ret) {
+               init = pgd_offset(&init_mm, 0UL);
+               pgd_init((unsigned long)ret);
+               memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+                      (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(pgd_alloc);
index 55ce39606cb8781d23e3c8f3b6df1273b2ee7a5a..2465f83c79c3b8a229b01753d837e8742d9d6531 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include <linux/bug.h>
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/smp.h>
@@ -34,6 +35,7 @@
 #include <asm/war.h>
 #include <asm/uasm.h>
 #include <asm/setup.h>
+#include <asm/tlbex.h>
 
 static int mips_xpa_disabled;
 
@@ -344,7 +346,8 @@ static int allocate_kscratch(void)
 }
 
 static int scratch_reg;
-static int pgd_reg;
+int pgd_reg;
+EXPORT_SYMBOL_GPL(pgd_reg);
 enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
 
 static struct work_registers build_get_work_registers(u32 **p)
@@ -496,15 +499,9 @@ static void __maybe_unused build_tlb_probe_entry(u32 **p)
        }
 }
 
-/*
- * Write random or indexed TLB entry, and care about the hazards from
- * the preceding mtc0 and for the following eret.
- */
-enum tlb_write_entry { tlb_random, tlb_indexed };
-
-static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
-                                 struct uasm_reloc **r,
-                                 enum tlb_write_entry wmode)
+void build_tlb_write_entry(u32 **p, struct uasm_label **l,
+                          struct uasm_reloc **r,
+                          enum tlb_write_entry wmode)
 {
        void(*tlbw)(u32 **) = NULL;
 
@@ -627,6 +624,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
                break;
        }
 }
+EXPORT_SYMBOL_GPL(build_tlb_write_entry);
 
 static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
                                                        unsigned int reg)
@@ -781,9 +779,8 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
  * TMP and PTR are scratch.
  * TMP will be clobbered, PTR will hold the pmd entry.
  */
-static void
-build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
-                unsigned int tmp, unsigned int ptr)
+void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
+                     unsigned int tmp, unsigned int ptr)
 {
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
        long pgdc = (long)pgd_current;
@@ -859,6 +856,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
        uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
 #endif
 }
+EXPORT_SYMBOL_GPL(build_get_pmde64);
 
 /*
  * BVADDR is the faulting address, PTR is scratch.
@@ -934,8 +932,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
  * TMP and PTR are scratch.
  * TMP will be clobbered, PTR will hold the pgd entry.
  */
-static void __maybe_unused
-build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
+void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 {
        if (pgd_reg != -1) {
                /* pgd is in pgd_reg */
@@ -960,6 +957,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
        uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
        uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
 }
+EXPORT_SYMBOL_GPL(build_get_pgde32);
 
 #endif /* !CONFIG_64BIT */
 
@@ -989,7 +987,7 @@ static void build_adjust_context(u32 **p, unsigned int ctx)
        uasm_i_andi(p, ctx, ctx, mask);
 }
 
-static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
+void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
 {
        /*
         * Bug workaround for the Nevada. It seems as if under certain
@@ -1013,8 +1011,9 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
        build_adjust_context(p, tmp);
        UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
 }
+EXPORT_SYMBOL_GPL(build_get_ptep);
 
-static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
+void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
 {
        int pte_off_even = 0;
        int pte_off_odd = sizeof(pte_t);
@@ -1063,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
                UASM_i_MTC0(p, 0, C0_ENTRYLO1);
        UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
 }
+EXPORT_SYMBOL_GPL(build_update_entries);
 
 struct mips_huge_tlb_info {
        int huge_pte;
@@ -1536,7 +1536,9 @@ static void build_loongson3_tlb_refill_handler(void)
 extern u32 handle_tlbl[], handle_tlbl_end[];
 extern u32 handle_tlbs[], handle_tlbs_end[];
 extern u32 handle_tlbm[], handle_tlbm_end[];
-extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[];
+extern u32 tlbmiss_handler_setup_pgd_start[];
+extern u32 tlbmiss_handler_setup_pgd[];
+EXPORT_SYMBOL_GPL(tlbmiss_handler_setup_pgd);
 extern u32 tlbmiss_handler_setup_pgd_end[];
 
 static void build_setup_pgd(void)