2 * Suspend support specific for i386/x86-64.
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/suspend.h>
12 #include <linux/export.h>
13 #include <linux/smp.h>
14 #include <linux/perf_event.h>
16 #include <asm/pgtable.h>
17 #include <asm/proto.h>
22 #include <asm/suspend.h>
23 #include <asm/debugreg.h>
24 #include <asm/fpu-internal.h> /* pcntxt_mask */
28 static struct saved_context saved_context;
30 unsigned long saved_context_ebx;
31 unsigned long saved_context_esp, saved_context_ebp;
32 unsigned long saved_context_esi, saved_context_edi;
33 unsigned long saved_context_eflags;
36 struct saved_context saved_context;
40 * __save_processor_state - save CPU registers before creating a
41 * hibernation image and before restoring the memory state from it
42 * @ctxt - structure to store the registers contents in
44 * NOTE: If there is a CPU register the modification of which by the
45 * boot kernel (ie. the kernel used for loading the hibernation image)
46 * might affect the operations of the restored target kernel (ie. the one
47 * saved in the hibernation image), then its contents must be saved by this
48 * function. In other words, if kernel A is hibernated and different
49 * kernel B is used for loading the hibernation image into memory, the
50 * kernel A's __save_processor_state() function must save all registers
51 * needed by kernel A, so that it can operate correctly after the resume
52 * regardless of what kernel B does in the meantime.
54 static void __save_processor_state(struct saved_context *ctxt)
57 mtrr_save_fixed_ranges(NULL);
65 store_idt(&ctxt->idt);
68 store_idt((struct desc_ptr *)&ctxt->idt_limit);
72 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
77 savesegment(es, ctxt->es);
78 savesegment(fs, ctxt->fs);
79 savesegment(gs, ctxt->gs);
80 savesegment(ss, ctxt->ss);
83 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
84 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
85 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
86 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
87 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
89 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
90 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
91 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
92 mtrr_save_fixed_ranges(NULL);
94 rdmsrl(MSR_EFER, ctxt->efer);
100 ctxt->cr0 = read_cr0();
101 ctxt->cr2 = read_cr2();
102 ctxt->cr3 = read_cr3();
104 ctxt->cr4 = read_cr4_safe();
107 ctxt->cr4 = read_cr4();
108 ctxt->cr8 = read_cr8();
110 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
114 /* Needed by apm.c */
115 void save_processor_state(void)
117 __save_processor_state(&saved_context);
118 x86_platform.save_sched_clock_state();
121 EXPORT_SYMBOL(save_processor_state);
124 static void do_fpu_end(void)
127 * Restore FPU regs if necessary.
132 static void fix_processor_context(void)
134 int cpu = smp_processor_id();
135 struct tss_struct *t = &per_cpu(init_tss, cpu);
137 struct desc_struct *desc = get_cpu_gdt_table(cpu);
140 set_tss_desc(cpu, t); /*
141 * This just modifies memory; should not be
142 * necessary. But... This is necessary, because
143 * 386 hardware has concept of busy TSS or some
148 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
149 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
150 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
152 syscall_init(); /* This sets MSR_*STAR and related */
154 load_TR_desc(); /* This does ltr */
155 load_LDT(¤t->active_mm->context); /* This does lldt */
159 * __restore_processor_state - restore the contents of CPU registers saved
160 * by __save_processor_state()
161 * @ctxt - structure to load the registers contents from
163 static void __restore_processor_state(struct saved_context *ctxt)
165 if (ctxt->misc_enable_saved)
166 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
170 /* cr4 was introduced in the Pentium CPU */
173 write_cr4(ctxt->cr4);
176 wrmsrl(MSR_EFER, ctxt->efer);
177 write_cr8(ctxt->cr8);
178 write_cr4(ctxt->cr4);
180 write_cr3(ctxt->cr3);
181 write_cr2(ctxt->cr2);
182 write_cr0(ctxt->cr0);
185 * now restore the descriptor tables to their proper values
186 * ltr is done i fix_processor_context().
189 load_idt(&ctxt->idt);
192 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
199 loadsegment(es, ctxt->es);
200 loadsegment(fs, ctxt->fs);
201 loadsegment(gs, ctxt->gs);
202 loadsegment(ss, ctxt->ss);
207 if (boot_cpu_has(X86_FEATURE_SEP))
211 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
212 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
213 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
214 load_gs_index(ctxt->gs);
215 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
217 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
218 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
219 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
223 * restore XCR0 for xsave capable cpu's.
226 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
228 fix_processor_context();
231 x86_platform.restore_sched_clock_state();
233 perf_restore_debug_store();
236 /* Needed by apm.c */
237 void restore_processor_state(void)
239 __restore_processor_state(&saved_context);
242 EXPORT_SYMBOL(restore_processor_state);
246 * When bsp_check() is called in hibernate and suspend, cpu hotplug
247 * is disabled already. So it's unnessary to handle race condition between
248 * cpumask query and cpu hotplug.
250 static int bsp_check(void)
252 if (cpumask_first(cpu_online_mask) != 0) {
253 pr_warn("CPU0 is offline.\n");
260 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
266 case PM_SUSPEND_PREPARE:
267 case PM_HIBERNATION_PREPARE:
270 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
271 case PM_RESTORE_PREPARE:
273 * When system resumes from hibernation, online CPU0 because
274 * 1. it's required for resume and
275 * 2. the CPU was online before hibernation
278 _debug_hotplug_cpu(0, 1);
280 case PM_POST_RESTORE:
282 * When a resume really happens, this code won't be called.
284 * This code is called only when user space hibernation software
285 * prepares for snapshot device during boot time. So we just
286 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
287 * preparing the snapshot device.
289 * This works for normal boot case in our CPU0 hotplug debug
290 * mode, i.e. CPU0 is offline and user mode hibernation
291 * software initializes during boot time.
293 * If CPU0 is online and user application accesses snapshot
294 * device after boot time, this will offline CPU0 and user may
295 * see different CPU0 state before and after accessing
296 * the snapshot device. But hopefully this is not a case when
297 * user debugging CPU0 hotplug. Even if users hit this case,
298 * they can easily online CPU0 back.
300 * To simplify this debug code, we only consider normal boot
301 * case. Otherwise we need to remember CPU0's state and restore
302 * to that state and resolve racy conditions etc.
304 _debug_hotplug_cpu(0, 0);
310 return notifier_from_errno(ret);
313 static int __init bsp_pm_check_init(void)
316 * Set this bsp_pm_callback as lower priority than
317 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
318 * earlier to disable cpu hotplug before bsp online check.
320 pm_notifier(bsp_pm_callback, -INT_MAX);
324 core_initcall(bsp_pm_check_init);