]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/power/cpu.c
Merge branch 'linus' into tracing/hw-breakpoints
[mv-sheeva.git] / arch / x86 / power / cpu.c
similarity index 69%
rename from arch/x86/power/cpu_64.c
rename to arch/x86/power/cpu.c
index 46866a13a93acac5e4db22228855f93b6c975caf..394cbb88987cf32d8d0f962dceaf9386c3361821 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Suspend and hibernation support for x86-64
+ * Suspend support specific for i386/x86-64.
  *
  * Distribute under GPLv2
  *
@@ -8,19 +8,29 @@
  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
  */
 
-#include <linux/smp.h>
 #include <linux/suspend.h>
-#include <asm/proto.h>
-#include <asm/page.h>
+#include <linux/smp.h>
+
 #include <asm/pgtable.h>
+#include <asm/proto.h>
 #include <asm/mtrr.h>
+#include <asm/page.h>
+#include <asm/mce.h>
 #include <asm/xcr.h>
 #include <asm/suspend.h>
 #include <asm/debugreg.h>
 
-static void fix_processor_context(void);
+#ifdef CONFIG_X86_32
+static struct saved_context saved_context;
 
+unsigned long saved_context_ebx;
+unsigned long saved_context_esp, saved_context_ebp;
+unsigned long saved_context_esi, saved_context_edi;
+unsigned long saved_context_eflags;
+#else
+/* CONFIG_X86_64 */
 struct saved_context saved_context;
+#endif
 
 /**
  *     __save_processor_state - save CPU registers before creating a
@@ -39,19 +49,35 @@ struct saved_context saved_context;
  */
 static void __save_processor_state(struct saved_context *ctxt)
 {
+#ifdef CONFIG_X86_32
+       mtrr_save_fixed_ranges(NULL);
+#endif
        kernel_fpu_begin();
 
        /*
         * descriptor tables
         */
+#ifdef CONFIG_X86_32
+       store_gdt(&ctxt->gdt);
+       store_idt(&ctxt->idt);
+#else
+/* CONFIG_X86_64 */
        store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
        store_idt((struct desc_ptr *)&ctxt->idt_limit);
+#endif
        store_tr(ctxt->tr);
 
        /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
        /*
         * segment registers
         */
+#ifdef CONFIG_X86_32
+       savesegment(es, ctxt->es);
+       savesegment(fs, ctxt->fs);
+       savesegment(gs, ctxt->gs);
+       savesegment(ss, ctxt->ss);
+#else
+/* CONFIG_X86_64 */
        asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
        asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
        asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
@@ -63,31 +89,68 @@ static void __save_processor_state(struct saved_context *ctxt)
        rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
        mtrr_save_fixed_ranges(NULL);
 
+       rdmsrl(MSR_EFER, ctxt->efer);
+#endif
+
        /*
         * control registers
         */
-       rdmsrl(MSR_EFER, ctxt->efer);
        ctxt->cr0 = read_cr0();
        ctxt->cr2 = read_cr2();
        ctxt->cr3 = read_cr3();
+#ifdef CONFIG_X86_32
+       ctxt->cr4 = read_cr4_safe();
+#else
+/* CONFIG_X86_64 */
        ctxt->cr4 = read_cr4();
        ctxt->cr8 = read_cr8();
+#endif
        hw_breakpoint_disable();
 }
 
+/* Needed by apm.c */
 void save_processor_state(void)
 {
        __save_processor_state(&saved_context);
 }
+#ifdef CONFIG_X86_32
+EXPORT_SYMBOL(save_processor_state);
+#endif
 
 static void do_fpu_end(void)
 {
        /*
-        * Restore FPU regs if necessary
+        * Restore FPU regs if necessary.
         */
        kernel_fpu_end();
 }
 
+static void fix_processor_context(void)
+{
+       int cpu = smp_processor_id();
+       struct tss_struct *t = &per_cpu(init_tss, cpu);
+
+       set_tss_desc(cpu, t);   /*
+                                * This just modifies memory; should not be
+                                * necessary. But... This is necessary, because
+                                * 386 hardware has concept of busy TSS or some
+                                * similar stupidity.
+                                */
+
+#ifdef CONFIG_X86_64
+       get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
+
+       syscall_init();                         /* This sets MSR_*STAR and related */
+#endif
+       load_TR_desc();                         /* This does ltr */
+       load_LDT(&current->active_mm->context); /* This does lldt */
+
+       /*
+        * Now maybe reload the debug registers
+        */
+       load_debug_registers();
+}
+
 /**
  *     __restore_processor_state - restore the contents of CPU registers saved
  *             by __save_processor_state()
@@ -98,9 +161,16 @@ static void __restore_processor_state(struct saved_context *ctxt)
        /*
         * control registers
         */
+       /* cr4 was introduced in the Pentium CPU */
+#ifdef CONFIG_X86_32
+       if (ctxt->cr4)
+               write_cr4(ctxt->cr4);
+#else
+/* CONFIG X86_64 */
        wrmsrl(MSR_EFER, ctxt->efer);
        write_cr8(ctxt->cr8);
        write_cr4(ctxt->cr4);
+#endif
        write_cr3(ctxt->cr3);
        write_cr2(ctxt->cr2);
        write_cr0(ctxt->cr0);
@@ -109,13 +179,31 @@ static void __restore_processor_state(struct saved_context *ctxt)
         * now restore the descriptor tables to their proper values
         * ltr is done i fix_processor_context().
         */
+#ifdef CONFIG_X86_32
+       load_gdt(&ctxt->gdt);
+       load_idt(&ctxt->idt);
+#else
+/* CONFIG_X86_64 */
        load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
        load_idt((const struct desc_ptr *)&ctxt->idt_limit);
-
+#endif
 
        /*
         * segment registers
         */
+#ifdef CONFIG_X86_32
+       loadsegment(es, ctxt->es);
+       loadsegment(fs, ctxt->fs);
+       loadsegment(gs, ctxt->gs);
+       loadsegment(ss, ctxt->ss);
+
+       /*
+        * sysenter MSRs
+        */
+       if (boot_cpu_has(X86_FEATURE_SEP))
+               enable_sep_cpu();
+#else
+/* CONFIG_X86_64 */
        asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
        asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
        asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
@@ -125,6 +213,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
        wrmsrl(MSR_FS_BASE, ctxt->fs_base);
        wrmsrl(MSR_GS_BASE, ctxt->gs_base);
        wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
+#endif
 
        /*
         * restore XCR0 for xsave capable cpu's.
@@ -136,33 +225,17 @@ static void __restore_processor_state(struct saved_context *ctxt)
 
        do_fpu_end();
        mtrr_ap_init();
+
+#ifdef CONFIG_X86_32
+       mcheck_init(&boot_cpu_data);
+#endif
 }
 
+/* Needed by apm.c */
 void restore_processor_state(void)
 {
        __restore_processor_state(&saved_context);
 }
-
-static void fix_processor_context(void)
-{
-       int cpu = smp_processor_id();
-       struct tss_struct *t = &per_cpu(init_tss, cpu);
-
-       /*
-        * This just modifies memory; should not be necessary. But... This
-        * is necessary, because 386 hardware has concept of busy TSS or some
-        * similar stupidity.
-        */
-       set_tss_desc(cpu, t);
-
-       get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
-
-       syscall_init();                         /* This sets MSR_*STAR and related */
-       load_TR_desc();                         /* This does ltr */
-       load_LDT(&current->active_mm->context); /* This does lldt */
-
-       /*
-        * Now maybe reload the debug registers
-        */
-       load_debug_registers();
-}
+#ifdef CONFIG_X86_32
+EXPORT_SYMBOL(restore_processor_state);
+#endif