]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/mm/init.c
x86/mm: Implement PCID based optimization: try to preserve old TLB entries using PCID
[linux.git] / arch / x86 / mm / init.c
index cbc87ea9875109b79b70a17ea6c130d8803ff207..65ae17d45c4a262087424c77d51bbd204a5dd2b7 100644 (file)
@@ -161,16 +161,16 @@ static int page_size_mask;
 
 static void __init probe_page_size_mask(void)
 {
-#if !defined(CONFIG_KMEMCHECK)
        /*
         * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
         * use small pages.
         * This will simplify cpa(), which otherwise needs to support splitting
         * large pages into small in interrupt context, etc.
         */
-       if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
+       if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
                page_size_mask |= 1 << PG_LEVEL_2M;
-#endif
+       else
+               direct_gbpages = 0;
 
        /* Enable PSE if available */
        if (boot_cpu_has(X86_FEATURE_PSE))
@@ -811,10 +811,8 @@ void __init zone_sizes_init(void)
 }
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
-#ifdef CONFIG_SMP
-       .active_mm = &init_mm,
-       .state = 0,
-#endif
+       .loaded_mm = &init_mm,
+       .next_asid = 1,
        .cr4 = ~0UL,    /* fail hard if we screw up cr4 shadow initialization */
 };
 EXPORT_SYMBOL_GPL(cpu_tlbstate);