]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
x86-32, hibernate: Set up temporary text mapping for 32bit system
authorZhimin Gu <kookoo.gu@intel.com>
Fri, 21 Sep 2018 06:28:32 +0000 (14:28 +0800)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Wed, 3 Oct 2018 09:56:34 +0000 (11:56 +0200)
Set up the temporary text mapping for the final jump address
so that the system could jump to the right address after all
the pages have been copied back to their original address -
otherwise the final mapping for the jump address is invalid.

Analogous changes were made for 64-bit in commit 65c0554b73c9
(x86/power/64: Fix kernel text mapping corruption during image
restoration).

Signed-off-by: Zhimin Gu <kookoo.gu@intel.com>
Acked-by: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Chen Yu <yu.c.chen@intel.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
arch/x86/power/hibernate.c
arch/x86/power/hibernate_32.c
arch/x86/power/hibernate_asm_32.S

index 7383cb67ffd71f90b68b55042914305bbd2a4cb2..bcddf09b5aa3912a63ccdaba7a248c4f70960792 100644 (file)
@@ -157,10 +157,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
        if (max_size < sizeof(struct restore_data_record))
                return -EOVERFLOW;
        rdr->magic = RESTORE_MAGIC;
-#ifdef CONFIG_X86_64
        rdr->jump_address = (unsigned long)restore_registers;
        rdr->jump_address_phys = __pa_symbol(restore_registers);
-#endif
 
        /*
         * The restore code fixes up CR3 and CR4 in the following sequence:
@@ -198,10 +196,8 @@ int arch_hibernation_header_restore(void *addr)
                return -EINVAL;
        }
 
-#ifdef CONFIG_X86_64
        restore_jump_address = rdr->jump_address;
        jump_address_phys = rdr->jump_address_phys;
-#endif
        restore_cr3 = rdr->cr3;
 
        if (hibernation_e820_mismatch(rdr->e820_digest)) {
index a9861095fbb85a2bf823a7bda562192ae342f5ad..15695e30f982e633ca7c74a605ff08927caec888 100644 (file)
@@ -143,6 +143,32 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
 #endif
 }
 
+static int set_up_temporary_text_mapping(pgd_t *pgd_base)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       pgd = pgd_base + pgd_index(restore_jump_address);
+
+       pmd = resume_one_md_table_init(pgd);
+       if (!pmd)
+               return -ENOMEM;
+
+       if (boot_cpu_has(X86_FEATURE_PSE)) {
+               set_pmd(pmd + pmd_index(restore_jump_address),
+               __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
+       } else {
+               pte = resume_one_page_table_init(pmd);
+               if (!pte)
+                       return -ENOMEM;
+               set_pte(pte + pte_index(restore_jump_address),
+               __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
+       }
+
+       return 0;
+}
+
 asmlinkage int swsusp_arch_resume(void)
 {
        int error;
@@ -152,6 +178,11 @@ asmlinkage int swsusp_arch_resume(void)
                return -ENOMEM;
 
        resume_init_first_level_page_table(resume_pg_dir);
+
+       error = set_up_temporary_text_mapping(resume_pg_dir);
+       if (error)
+               return error;
+
        error = resume_physical_mapping_init(resume_pg_dir);
        if (error)
                return error;
index e9adda6b6b022640127947b40f0876e92877a9a7..01f653fae7bd48ee41a0713aa7e8388ae57f3c31 100644 (file)
@@ -36,6 +36,8 @@ ENTRY(swsusp_arch_suspend)
 ENDPROC(swsusp_arch_suspend)
 
 ENTRY(restore_image)
+       /* prepare to jump to the image kernel */
+       movl    restore_jump_address, %ebx
        movl    restore_cr3, %ebp
 
        movl    mmu_cr4_features, %ecx
@@ -74,6 +76,7 @@ copy_loop:
        .p2align 4,,7
 
 done:
+       jmpl    *%ebx
 
        /* code below belongs to the image kernel */
        .align PAGE_SIZE