2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
9 * This file handles the architecture-dependent parts of process handling..
12 #include <linux/cpu.h>
13 #include <linux/errno.h>
14 #include <linux/sched.h>
16 #include <linux/kernel.h>
18 #include <linux/elfcore.h>
19 #include <linux/smp.h>
20 #include <linux/stddef.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/user.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/reboot.h>
27 #include <linux/mc146818rtc.h>
28 #include <linux/export.h>
29 #include <linux/kallsyms.h>
30 #include <linux/ptrace.h>
31 #include <linux/personality.h>
32 #include <linux/percpu.h>
33 #include <linux/prctl.h>
34 #include <linux/ftrace.h>
35 #include <linux/uaccess.h>
37 #include <linux/kdebug.h>
39 #include <asm/pgtable.h>
41 #include <asm/processor.h>
42 #include <asm/fpu/internal.h>
44 #ifdef CONFIG_MATH_EMULATION
45 #include <asm/math_emu.h>
48 #include <linux/err.h>
50 #include <asm/tlbflush.h>
52 #include <asm/syscalls.h>
53 #include <asm/debugreg.h>
54 #include <asm/switch_to.h>
56 #include <asm/intel_rdt.h>
58 void __show_regs(struct pt_regs *regs, int all)
60 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
61 unsigned long d0, d1, d2, d3, d6, d7;
63 unsigned short ss, gs;
65 if (user_mode(regs)) {
67 ss = regs->ss & 0xffff;
68 gs = get_user_gs(regs);
70 sp = kernel_stack_pointer(regs);
75 printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
76 printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
79 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
80 regs->ax, regs->bx, regs->cx, regs->dx);
81 printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
82 regs->si, regs->di, regs->bp, sp);
83 printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
84 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
93 printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
103 /* Only print out debug registers if they are in their non-default state. */
104 if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
105 (d6 == DR6_RESERVED) && (d7 == 0x400))
108 printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
110 printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
114 void release_thread(struct task_struct *dead_task)
116 BUG_ON(dead_task->mm);
117 release_vm86_irqs(dead_task);
120 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
121 unsigned long arg, struct task_struct *p, unsigned long tls)
123 struct pt_regs *childregs = task_pt_regs(p);
124 struct fork_frame *fork_frame = container_of(childregs, struct fork_frame, regs);
125 struct inactive_task_frame *frame = &fork_frame->frame;
126 struct task_struct *tsk;
130 frame->ret_addr = (unsigned long) ret_from_fork;
131 p->thread.sp = (unsigned long) fork_frame;
132 p->thread.sp0 = (unsigned long) (childregs+1);
133 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
135 if (unlikely(p->flags & PF_KTHREAD)) {
137 memset(childregs, 0, sizeof(struct pt_regs));
138 frame->bx = sp; /* function */
140 p->thread.io_bitmap_ptr = NULL;
144 *childregs = *current_pt_regs();
149 task_user_gs(p) = get_user_gs(current_pt_regs());
151 p->thread.io_bitmap_ptr = NULL;
155 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
156 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
157 IO_BITMAP_BYTES, GFP_KERNEL);
158 if (!p->thread.io_bitmap_ptr) {
159 p->thread.io_bitmap_max = 0;
162 set_tsk_thread_flag(p, TIF_IO_BITMAP);
168 * Set a new TLS for the child thread?
170 if (clone_flags & CLONE_SETTLS)
171 err = do_set_thread_area(p, -1,
172 (struct user_desc __user *)tls, 0);
174 if (err && p->thread.io_bitmap_ptr) {
175 kfree(p->thread.io_bitmap_ptr);
176 p->thread.io_bitmap_max = 0;
182 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
184 set_user_gs(regs, 0);
186 regs->ds = __USER_DS;
187 regs->es = __USER_DS;
188 regs->ss = __USER_DS;
189 regs->cs = __USER_CS;
192 regs->flags = X86_EFLAGS_IF;
195 EXPORT_SYMBOL_GPL(start_thread);
199 * switch_to(x,y) should switch tasks from x to y.
201 * We fsave/fwait so that an exception goes off at the right time
202 * (as a call from the fsave or fwait in effect) rather than to
203 * the wrong process. Lazy FP saving no longer makes any sense
204 * with modern CPU's, and this simplifies a lot of things (SMP
205 * and UP become the same).
207 * NOTE! We used to use the x86 hardware context switching. The
208 * reason for not using it any more becomes apparent when you
209 * try to recover gracefully from saved state that is no longer
210 * valid (stale segment register values in particular). With the
211 * hardware task-switch, there is no way to fix up bad state in
212 * a reasonable manner.
214 * The fact that Intel documents the hardware task-switching to
215 * be slow is a fairly red herring - this code is not noticeably
216 * faster. However, there _is_ some room for improvement here,
217 * so the performance issues may eventually be a valid point.
218 * More important, however, is the fact that this allows us much
221 * The return value (in %ax) will be the "prev" task after
222 * the task-switch, and shows up in ret_from_fork in entry.S,
225 __visible __notrace_funcgraph struct task_struct *
226 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
228 struct thread_struct *prev = &prev_p->thread,
229 *next = &next_p->thread;
230 struct fpu *prev_fpu = &prev->fpu;
231 struct fpu *next_fpu = &next->fpu;
232 int cpu = smp_processor_id();
233 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
235 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
237 switch_fpu_prepare(prev_fpu, cpu);
240 * Save away %gs. No need to save %fs, as it was saved on the
241 * stack on entry. No need to save %es and %ds, as those are
242 * always kernel segments while inside the kernel. Doing this
243 * before setting the new TLS descriptors avoids the situation
244 * where we temporarily have non-reloadable segments in %fs
245 * and %gs. This could be an issue if the NMI handler ever
246 * used %fs or %gs (it does not today), or if the kernel is
247 * running inside of a hypervisor layer.
249 lazy_save_gs(prev->gs);
252 * Load the per-thread Thread-Local Storage descriptor.
257 * Restore IOPL if needed. In normal use, the flags restore
258 * in the switch assembly will handle this. But if the kernel
259 * is running virtualized at a non-zero CPL, the popf will
260 * not restore flags, so it must be done in a separate step.
262 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
263 set_iopl_mask(next->iopl);
266 * Now maybe handle debug registers and/or IO bitmaps
268 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
269 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
270 __switch_to_xtra(prev_p, next_p, tss);
273 * Leave lazy mode, flushing any hypercalls made here.
274 * This must be done before restoring TLS segments so
275 * the GDT and LDT are properly updated, and must be
276 * done before fpu__restore(), so the TS bit is up
279 arch_end_context_switch(next_p);
282 * Reload esp0 and cpu_current_top_of_stack. This changes
283 * current_thread_info().
286 this_cpu_write(cpu_current_top_of_stack,
287 (unsigned long)task_stack_page(next_p) +
291 * Restore %gs if needed (which is common)
293 if (prev->gs | next->gs)
294 lazy_load_gs(next->gs);
296 switch_fpu_finish(next_fpu, cpu);
298 this_cpu_write(current_task, next_p);
300 /* Load the Intel cache allocation PQR MSR. */
301 intel_rdt_sched_in();