2 * Hibernation support for x86-64
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/gfp.h>
12 #include <linux/smp.h>
13 #include <linux/suspend.h>
14 #include <linux/scatterlist.h>
15 #include <linux/kdebug.h>
17 #include <crypto/hash.h>
20 #include <asm/proto.h>
22 #include <asm/pgtable.h>
24 #include <asm/sections.h>
25 #include <asm/suspend.h>
26 #include <asm/tlbflush.h>
28 /* Defined in hibernate_asm_64.S */
29 extern asmlinkage __visible int restore_image(void);
32 * Address to jump to in the last phase of restore in order to get to the image
33 * kernel's text (this value is passed in the image header).
35 unsigned long restore_jump_address __visible;
36 unsigned long jump_address_phys;
39 * Value of the cr3 register from before the hibernation (this value is passed
40 * in the image header).
42 unsigned long restore_cr3 __visible;
44 unsigned long temp_level4_pgt __visible;
46 unsigned long relocated_restore_code __visible;
48 static int set_up_temporary_text_mapping(pgd_t *pgd)
55 * The new mapping only has to cover the page containing the image
56 * kernel's entry point (jump_address_phys), because the switch over to
57 * it is carried out by relocated code running from a page allocated
58 * specifically for this purpose and covered by the identity mapping, so
59 * the temporary kernel text mapping is only needed for the final jump.
60 * Moreover, in that mapping the virtual address of the image kernel's
61 * entry point must be the same as its virtual address in the image
62 * kernel (restore_jump_address), so the image kernel's
63 * restore_registers() code doesn't find itself in a different area of
64 * the virtual address space after switching over to the original page
65 * tables used by the image kernel.
68 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
69 p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
74 pud = (pud_t *)get_safe_page(GFP_ATOMIC);
78 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
82 set_pmd(pmd + pmd_index(restore_jump_address),
83 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
84 set_pud(pud + pud_index(restore_jump_address),
85 __pud(__pa(pmd) | _KERNPG_TABLE));
86 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
87 set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE));
88 set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE));
90 /* No p4d for 4-level paging: point the pgd to the pud page table */
91 set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(pud) | _KERNPG_TABLE));
97 static void *alloc_pgt_page(void *context)
99 return (void *)get_safe_page(GFP_ATOMIC);
102 static int set_up_temporary_mappings(void)
104 struct x86_mapping_info info = {
105 .alloc_pgt_page = alloc_pgt_page,
106 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
107 .offset = __PAGE_OFFSET,
109 unsigned long mstart, mend;
114 pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
118 /* Prepare a temporary mapping for the kernel text */
119 result = set_up_temporary_text_mapping(pgd);
123 /* Set up the direct mapping from scratch */
124 for (i = 0; i < nr_pfn_mapped; i++) {
125 mstart = pfn_mapped[i].start << PAGE_SHIFT;
126 mend = pfn_mapped[i].end << PAGE_SHIFT;
128 result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
133 temp_level4_pgt = __pa(pgd);
137 static int relocate_restore_code(void)
145 relocated_restore_code = get_safe_page(GFP_ATOMIC);
146 if (!relocated_restore_code)
149 memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
151 /* Make the page containing the relocated code executable */
152 pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
153 p4d = p4d_offset(pgd, relocated_restore_code);
154 if (p4d_large(*p4d)) {
155 set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
158 pud = pud_offset(p4d, relocated_restore_code);
159 if (pud_large(*pud)) {
160 set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
163 pmd = pmd_offset(pud, relocated_restore_code);
164 if (pmd_large(*pmd)) {
165 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
168 pte = pte_offset_kernel(pmd, relocated_restore_code);
169 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
175 int swsusp_arch_resume(void)
179 /* We have got enough memory and from now on we cannot recover */
180 error = set_up_temporary_mappings();
184 error = relocate_restore_code();
193 * pfn_is_nosave - check if given pfn is in the 'nosave' section
196 int pfn_is_nosave(unsigned long pfn)
198 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
199 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
200 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
203 #define MD5_DIGEST_SIZE 16
205 struct restore_data_record {
206 unsigned long jump_address;
207 unsigned long jump_address_phys;
210 u8 e820_digest[MD5_DIGEST_SIZE];
213 #define RESTORE_MAGIC 0x23456789ABCDEF01UL
215 #if IS_BUILTIN(CONFIG_CRYPTO_MD5)
217 * get_e820_md5 - calculate md5 according to given e820 map
219 * @map: the e820 map to be calculated
220 * @buf: the md5 result to be stored to
222 static int get_e820_md5(struct e820map *map, void *buf)
224 struct scatterlist sg;
225 struct crypto_ahash *tfm;
229 tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
234 AHASH_REQUEST_ON_STACK(req, tfm);
235 size = offsetof(struct e820map, map)
236 + sizeof(struct e820entry) * map->nr_map;
237 ahash_request_set_tfm(req, tfm);
238 sg_init_one(&sg, (u8 *)map, size);
239 ahash_request_set_callback(req, 0, NULL, NULL);
240 ahash_request_set_crypt(req, &sg, buf, size);
242 if (crypto_ahash_digest(req))
244 ahash_request_zero(req);
246 crypto_free_ahash(tfm);
251 static void hibernation_e820_save(void *buf)
253 get_e820_md5(e820_saved, buf);
256 static bool hibernation_e820_mismatch(void *buf)
259 u8 result[MD5_DIGEST_SIZE];
261 memset(result, 0, MD5_DIGEST_SIZE);
262 /* If there is no digest in suspend kernel, let it go. */
263 if (!memcmp(result, buf, MD5_DIGEST_SIZE))
266 ret = get_e820_md5(e820_saved, result);
270 return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
273 static void hibernation_e820_save(void *buf)
277 static bool hibernation_e820_mismatch(void *buf)
279 /* If md5 is not builtin for restore kernel, let it go. */
285 * arch_hibernation_header_save - populate the architecture specific part
286 * of a hibernation image header
287 * @addr: address to save the data at
289 int arch_hibernation_header_save(void *addr, unsigned int max_size)
291 struct restore_data_record *rdr = addr;
293 if (max_size < sizeof(struct restore_data_record))
295 rdr->jump_address = (unsigned long)&restore_registers;
296 rdr->jump_address_phys = __pa_symbol(&restore_registers);
297 rdr->cr3 = restore_cr3;
298 rdr->magic = RESTORE_MAGIC;
300 hibernation_e820_save(rdr->e820_digest);
306 * arch_hibernation_header_restore - read the architecture specific data
307 * from the hibernation image header
308 * @addr: address to read the data from
310 int arch_hibernation_header_restore(void *addr)
312 struct restore_data_record *rdr = addr;
314 restore_jump_address = rdr->jump_address;
315 jump_address_phys = rdr->jump_address_phys;
316 restore_cr3 = rdr->cr3;
318 if (rdr->magic != RESTORE_MAGIC) {
319 pr_crit("Unrecognized hibernate image header format!\n");
323 if (hibernation_e820_mismatch(rdr->e820_digest)) {
324 pr_crit("Hibernate inconsistent memory map detected!\n");