2 * Hibernation support for x86-64
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/gfp.h>
12 #include <linux/smp.h>
13 #include <linux/suspend.h>
14 #include <linux/scatterlist.h>
15 #include <linux/kdebug.h>
17 #include <crypto/hash.h>
19 #include <asm/e820/api.h>
21 #include <asm/proto.h>
23 #include <asm/pgtable.h>
25 #include <asm/sections.h>
26 #include <asm/suspend.h>
27 #include <asm/tlbflush.h>
29 /* Defined in hibernate_asm_64.S */
30 extern asmlinkage __visible int restore_image(void);
33 * Address to jump to in the last phase of restore in order to get to the image
34 * kernel's text (this value is passed in the image header).
36 unsigned long restore_jump_address __visible;
37 unsigned long jump_address_phys;
40 * Value of the cr3 register from before the hibernation (this value is passed
41 * in the image header).
43 unsigned long restore_cr3 __visible;
45 unsigned long temp_level4_pgt __visible;
47 unsigned long relocated_restore_code __visible;
49 static int set_up_temporary_text_mapping(pgd_t *pgd)
56 * The new mapping only has to cover the page containing the image
57 * kernel's entry point (jump_address_phys), because the switch over to
58 * it is carried out by relocated code running from a page allocated
59 * specifically for this purpose and covered by the identity mapping, so
60 * the temporary kernel text mapping is only needed for the final jump.
61 * Moreover, in that mapping the virtual address of the image kernel's
62 * entry point must be the same as its virtual address in the image
63 * kernel (restore_jump_address), so the image kernel's
64 * restore_registers() code doesn't find itself in a different area of
65 * the virtual address space after switching over to the original page
66 * tables used by the image kernel.
69 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
70 p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
75 pud = (pud_t *)get_safe_page(GFP_ATOMIC);
79 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
83 set_pmd(pmd + pmd_index(restore_jump_address),
84 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
85 set_pud(pud + pud_index(restore_jump_address),
86 __pud(__pa(pmd) | _KERNPG_TABLE));
87 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
88 set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE));
89 set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE));
91 /* No p4d for 4-level paging: point the pgd to the pud page table */
92 set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(pud) | _KERNPG_TABLE));
98 static void *alloc_pgt_page(void *context)
100 return (void *)get_safe_page(GFP_ATOMIC);
103 static int set_up_temporary_mappings(void)
105 struct x86_mapping_info info = {
106 .alloc_pgt_page = alloc_pgt_page,
107 .page_flag = __PAGE_KERNEL_LARGE_EXEC,
108 .offset = __PAGE_OFFSET,
110 unsigned long mstart, mend;
115 pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
119 /* Prepare a temporary mapping for the kernel text */
120 result = set_up_temporary_text_mapping(pgd);
124 /* Set up the direct mapping from scratch */
125 for (i = 0; i < nr_pfn_mapped; i++) {
126 mstart = pfn_mapped[i].start << PAGE_SHIFT;
127 mend = pfn_mapped[i].end << PAGE_SHIFT;
129 result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
134 temp_level4_pgt = __pa(pgd);
138 static int relocate_restore_code(void)
146 relocated_restore_code = get_safe_page(GFP_ATOMIC);
147 if (!relocated_restore_code)
150 memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
152 /* Make the page containing the relocated code executable */
153 pgd = (pgd_t *)__va(read_cr3_pa()) +
154 pgd_index(relocated_restore_code);
155 p4d = p4d_offset(pgd, relocated_restore_code);
156 if (p4d_large(*p4d)) {
157 set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
160 pud = pud_offset(p4d, relocated_restore_code);
161 if (pud_large(*pud)) {
162 set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
165 pmd = pmd_offset(pud, relocated_restore_code);
166 if (pmd_large(*pmd)) {
167 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
170 pte = pte_offset_kernel(pmd, relocated_restore_code);
171 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
177 int swsusp_arch_resume(void)
181 /* We have got enough memory and from now on we cannot recover */
182 error = set_up_temporary_mappings();
186 error = relocate_restore_code();
195 * pfn_is_nosave - check if given pfn is in the 'nosave' section
198 int pfn_is_nosave(unsigned long pfn)
200 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
201 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
202 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
205 #define MD5_DIGEST_SIZE 16
207 struct restore_data_record {
208 unsigned long jump_address;
209 unsigned long jump_address_phys;
212 u8 e820_digest[MD5_DIGEST_SIZE];
215 #define RESTORE_MAGIC 0x23456789ABCDEF01UL
217 #if IS_BUILTIN(CONFIG_CRYPTO_MD5)
219 * get_e820_md5 - calculate md5 according to given e820 table
221 * @table: the e820 table to be calculated
222 * @buf: the md5 result to be stored to
224 static int get_e820_md5(struct e820_table *table, void *buf)
226 struct scatterlist sg;
227 struct crypto_ahash *tfm;
231 tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
236 AHASH_REQUEST_ON_STACK(req, tfm);
237 size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry) * table->nr_entries;
238 ahash_request_set_tfm(req, tfm);
239 sg_init_one(&sg, (u8 *)table, size);
240 ahash_request_set_callback(req, 0, NULL, NULL);
241 ahash_request_set_crypt(req, &sg, buf, size);
243 if (crypto_ahash_digest(req))
245 ahash_request_zero(req);
247 crypto_free_ahash(tfm);
252 static void hibernation_e820_save(void *buf)
254 get_e820_md5(e820_table_firmware, buf);
257 static bool hibernation_e820_mismatch(void *buf)
260 u8 result[MD5_DIGEST_SIZE];
262 memset(result, 0, MD5_DIGEST_SIZE);
263 /* If there is no digest in suspend kernel, let it go. */
264 if (!memcmp(result, buf, MD5_DIGEST_SIZE))
267 ret = get_e820_md5(e820_table_firmware, result);
271 return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
274 static void hibernation_e820_save(void *buf)
278 static bool hibernation_e820_mismatch(void *buf)
280 /* If md5 is not builtin for restore kernel, let it go. */
286 * arch_hibernation_header_save - populate the architecture specific part
287 * of a hibernation image header
288 * @addr: address to save the data at
290 int arch_hibernation_header_save(void *addr, unsigned int max_size)
292 struct restore_data_record *rdr = addr;
294 if (max_size < sizeof(struct restore_data_record))
296 rdr->jump_address = (unsigned long)restore_registers;
297 rdr->jump_address_phys = __pa_symbol(restore_registers);
300 * The restore code fixes up CR3 and CR4 in the following sequence:
302 * [in hibernation asm]
303 * 1. CR3 <= temporary page tables
304 * 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
306 * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
307 * [in restore_processor_state()]
308 * 5. CR4 <= saved CR4
309 * 6. CR3 <= saved CR3
311 * Our mmu_cr4_features has CR4.PCIDE=0, and toggling
312 * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
313 * rdr->cr3 needs to point to valid page tables but must not
314 * have any of the PCID bits set.
316 rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
318 rdr->magic = RESTORE_MAGIC;
320 hibernation_e820_save(rdr->e820_digest);
326 * arch_hibernation_header_restore - read the architecture specific data
327 * from the hibernation image header
328 * @addr: address to read the data from
330 int arch_hibernation_header_restore(void *addr)
332 struct restore_data_record *rdr = addr;
334 restore_jump_address = rdr->jump_address;
335 jump_address_phys = rdr->jump_address_phys;
336 restore_cr3 = rdr->cr3;
338 if (rdr->magic != RESTORE_MAGIC) {
339 pr_crit("Unrecognized hibernate image header format!\n");
343 if (hibernation_e820_mismatch(rdr->e820_digest)) {
344 pr_crit("Hibernate inconsistent memory map detected!\n");