2 * Suspend support specific for i386/x86-64.
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/suspend.h>
12 #include <linux/export.h>
13 #include <linux/smp.h>
14 #include <linux/perf_event.h>
16 #include <asm/pgtable.h>
17 #include <asm/proto.h>
22 #include <asm/suspend.h>
23 #include <asm/debugreg.h>
24 #include <asm/fpu-internal.h> /* pcntxt_mask */
28 static struct saved_context saved_context;
30 unsigned long saved_context_ebx;
31 unsigned long saved_context_esp, saved_context_ebp;
32 unsigned long saved_context_esi, saved_context_edi;
33 unsigned long saved_context_eflags;
36 struct saved_context saved_context;
40 * __save_processor_state - save CPU registers before creating a
41 * hibernation image and before restoring the memory state from it
42 * @ctxt - structure to store the registers contents in
44 * NOTE: If there is a CPU register the modification of which by the
45 * boot kernel (ie. the kernel used for loading the hibernation image)
46 * might affect the operations of the restored target kernel (ie. the one
47 * saved in the hibernation image), then its contents must be saved by this
48 * function. In other words, if kernel A is hibernated and different
49 * kernel B is used for loading the hibernation image into memory, the
50 * kernel A's __save_processor_state() function must save all registers
51 * needed by kernel A, so that it can operate correctly after the resume
52 * regardless of what kernel B does in the meantime.
54 static void __save_processor_state(struct saved_context *ctxt)
57 mtrr_save_fixed_ranges(NULL);
65 store_gdt(&ctxt->gdt);
66 store_idt(&ctxt->idt);
69 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
70 store_idt((struct desc_ptr *)&ctxt->idt_limit);
74 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
79 savesegment(es, ctxt->es);
80 savesegment(fs, ctxt->fs);
81 savesegment(gs, ctxt->gs);
82 savesegment(ss, ctxt->ss);
85 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
86 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
87 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
88 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
89 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
91 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
92 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
93 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
94 mtrr_save_fixed_ranges(NULL);
96 rdmsrl(MSR_EFER, ctxt->efer);
102 ctxt->cr0 = read_cr0();
103 ctxt->cr2 = read_cr2();
104 ctxt->cr3 = read_cr3();
106 ctxt->cr4 = read_cr4_safe();
109 ctxt->cr4 = read_cr4();
110 ctxt->cr8 = read_cr8();
112 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
116 /* Needed by apm.c */
117 void save_processor_state(void)
119 __save_processor_state(&saved_context);
120 x86_platform.save_sched_clock_state();
123 EXPORT_SYMBOL(save_processor_state);
126 static void do_fpu_end(void)
129 * Restore FPU regs if necessary.
134 static void fix_processor_context(void)
136 int cpu = smp_processor_id();
137 struct tss_struct *t = &per_cpu(init_tss, cpu);
139 set_tss_desc(cpu, t); /*
140 * This just modifies memory; should not be
141 * necessary. But... This is necessary, because
142 * 386 hardware has concept of busy TSS or some
147 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
149 syscall_init(); /* This sets MSR_*STAR and related */
151 load_TR_desc(); /* This does ltr */
152 load_LDT(¤t->active_mm->context); /* This does lldt */
156 * __restore_processor_state - restore the contents of CPU registers saved
157 * by __save_processor_state()
158 * @ctxt - structure to load the registers contents from
160 static void __restore_processor_state(struct saved_context *ctxt)
162 if (ctxt->misc_enable_saved)
163 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
167 /* cr4 was introduced in the Pentium CPU */
170 write_cr4(ctxt->cr4);
173 wrmsrl(MSR_EFER, ctxt->efer);
174 write_cr8(ctxt->cr8);
175 write_cr4(ctxt->cr4);
177 write_cr3(ctxt->cr3);
178 write_cr2(ctxt->cr2);
179 write_cr0(ctxt->cr0);
182 * now restore the descriptor tables to their proper values
183 * ltr is done i fix_processor_context().
186 load_gdt(&ctxt->gdt);
187 load_idt(&ctxt->idt);
190 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
191 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
198 loadsegment(es, ctxt->es);
199 loadsegment(fs, ctxt->fs);
200 loadsegment(gs, ctxt->gs);
201 loadsegment(ss, ctxt->ss);
206 if (boot_cpu_has(X86_FEATURE_SEP))
210 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
211 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
212 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
213 load_gs_index(ctxt->gs);
214 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
216 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
217 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
218 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
222 * restore XCR0 for xsave capable cpu's.
225 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
227 fix_processor_context();
230 x86_platform.restore_sched_clock_state();
232 perf_restore_debug_store();
235 /* Needed by apm.c */
236 void restore_processor_state(void)
238 __restore_processor_state(&saved_context);
241 EXPORT_SYMBOL(restore_processor_state);
245 * When bsp_check() is called in hibernate and suspend, cpu hotplug
246 * is disabled already. So it's unnessary to handle race condition between
247 * cpumask query and cpu hotplug.
249 static int bsp_check(void)
251 if (cpumask_first(cpu_online_mask) != 0) {
252 pr_warn("CPU0 is offline.\n");
259 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
265 case PM_SUSPEND_PREPARE:
266 case PM_HIBERNATION_PREPARE:
269 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
270 case PM_RESTORE_PREPARE:
272 * When system resumes from hibernation, online CPU0 because
273 * 1. it's required for resume and
274 * 2. the CPU was online before hibernation
277 _debug_hotplug_cpu(0, 1);
279 case PM_POST_RESTORE:
281 * When a resume really happens, this code won't be called.
283 * This code is called only when user space hibernation software
284 * prepares for snapshot device during boot time. So we just
285 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
286 * preparing the snapshot device.
288 * This works for normal boot case in our CPU0 hotplug debug
289 * mode, i.e. CPU0 is offline and user mode hibernation
290 * software initializes during boot time.
292 * If CPU0 is online and user application accesses snapshot
293 * device after boot time, this will offline CPU0 and user may
294 * see different CPU0 state before and after accessing
295 * the snapshot device. But hopefully this is not a case when
296 * user debugging CPU0 hotplug. Even if users hit this case,
297 * they can easily online CPU0 back.
299 * To simplify this debug code, we only consider normal boot
300 * case. Otherwise we need to remember CPU0's state and restore
301 * to that state and resolve racy conditions etc.
303 _debug_hotplug_cpu(0, 0);
309 return notifier_from_errno(ret);
312 static int __init bsp_pm_check_init(void)
315 * Set this bsp_pm_callback as lower priority than
316 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
317 * earlier to disable cpu hotplug before bsp online check.
319 pm_notifier(bsp_pm_callback, -INT_MAX);
323 core_initcall(bsp_pm_check_init);