]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/mm/fault.c
x86/mm: Check for pfn instead of page in vmalloc_sync_one()
[linux.git] / arch / x86 / mm / fault.c
index 794f364cb8829ef5a719cafbc26766e68d08ca9e..e64173db497015083e1f8ed3fd231614d4a64212 100644 (file)
@@ -46,23 +46,6 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr)
        return 0;
 }
 
-static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
-{
-       if (!kprobes_built_in())
-               return 0;
-       if (user_mode(regs))
-               return 0;
-       /*
-        * To be potentially processing a kprobe fault and to be allowed to call
-        * kprobe_running(), we have to be non-preemptible.
-        */
-       if (preemptible())
-               return 0;
-       if (!kprobe_running())
-               return 0;
-       return kprobe_fault_handler(regs, X86_TRAP_PF);
-}
-
 /*
  * Prefetch quirks:
  *
@@ -200,7 +183,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
        if (!pmd_present(*pmd))
                set_pmd(pmd, *pmd_k);
        else
-               BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+               BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
 
        return pmd_k;
 }
@@ -1282,7 +1265,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
                return;
 
        /* kprobes don't want to hook the spurious faults: */
-       if (kprobes_fault(regs))
+       if (kprobe_page_fault(regs, X86_TRAP_PF))
                return;
 
        /*
@@ -1313,7 +1296,7 @@ void do_user_addr_fault(struct pt_regs *regs,
        mm = tsk->mm;
 
        /* kprobes don't want to hook the spurious faults: */
-       if (unlikely(kprobes_fault(regs)))
+       if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
                return;
 
        /*
@@ -1507,9 +1490,8 @@ void do_user_addr_fault(struct pt_regs *regs,
 NOKPROBE_SYMBOL(do_user_addr_fault);
 
 /*
- * This routine handles page faults.  It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
+ * Explicitly marked noinline such that the function tracer sees this as the
+ * page_fault entry point.
  */
 static noinline void
 __do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
@@ -1528,33 +1510,26 @@ __do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
 }
 NOKPROBE_SYMBOL(__do_page_fault);
 
-static nokprobe_inline void
-trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
-                        unsigned long error_code)
+static __always_inline void
+trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
+                        unsigned long address)
 {
+       if (!trace_pagefault_enabled())
+               return;
+
        if (user_mode(regs))
                trace_page_fault_user(address, regs, error_code);
        else
                trace_page_fault_kernel(address, regs, error_code);
 }
 
-/*
- * We must have this function blacklisted from kprobes, tagged with notrace
- * and call read_cr2() before calling anything else. To avoid calling any
- * kind of tracing machinery before we've observed the CR2 value.
- *
- * exception_{enter,exit}() contains all sorts of tracepoints.
- */
-dotraplinkage void notrace
-do_page_fault(struct pt_regs *regs, unsigned long error_code)
+dotraplinkage void
+do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
 {
-       unsigned long address = read_cr2(); /* Get the faulting address */
        enum ctx_state prev_state;
 
        prev_state = exception_enter();
-       if (trace_pagefault_enabled())
-               trace_page_fault_entries(address, regs, error_code);
-
+       trace_page_fault_entries(regs, error_code, address);
        __do_page_fault(regs, error_code, address);
        exception_exit(prev_state);
 }