2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/ptrace.h>
32 #include <linux/notifier.h>
33 #include <linux/kprobes.h>
34 #include <linux/kdebug.h>
35 #include <linux/tick.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/cpuidle.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/mmu_context.h>
47 #include <asm/prctl.h>
49 #include <asm/proto.h>
52 #include <asm/syscalls.h>
53 #include <asm/debugreg.h>
56 asmlinkage extern void ret_from_fork(void);
58 DEFINE_PER_CPU(unsigned long, old_rsp);
59 static DEFINE_PER_CPU(unsigned char, is_idle);
61 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
63 void idle_notifier_register(struct notifier_block *n)
65 atomic_notifier_chain_register(&idle_notifier, n);
67 EXPORT_SYMBOL_GPL(idle_notifier_register);
69 void idle_notifier_unregister(struct notifier_block *n)
71 atomic_notifier_chain_unregister(&idle_notifier, n);
73 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
77 percpu_write(is_idle, 1);
78 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
81 static void __exit_idle(void)
83 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
85 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
88 /* Called from interrupts to signify idle end */
91 /* idle loop has pid 0 */
98 static inline void play_dead(void)
105 * The idle thread. There's no useful work to be
106 * done, so just try to conserve power and have a
107 * low exit latency (ie sit in a loop waiting for
108 * somebody to say that they'd like to reschedule)
112 current_thread_info()->status |= TS_POLLING;
115 * If we're the non-boot CPU, nothing set the stack canary up
116 * for us. CPU0 already has it initialized but no harm in
117 * doing it again. This is a good place for updating it, as
118 * we wont ever return from this function (so the invalid
119 * canaries already on the stack wont ever trigger).
121 boot_init_stack_canary();
123 /* endless idle loop with no priority at all */
125 tick_nohz_idle_enter();
126 while (!need_resched()) {
130 if (cpu_is_offline(smp_processor_id()))
133 * Idle routines should keep interrupts disabled
134 * from here on, until they go to idle.
135 * Otherwise, idle callbacks can misfire.
140 /* Don't trace irqs off for idle */
141 stop_critical_timings();
143 /* enter_idle() needs rcu for notifiers */
146 if (cpuidle_idle_call())
150 start_critical_timings();
152 /* In many cases the interrupt that ended idle
153 has already called exit_idle. But some idle
154 loops can be woken up without interrupt. */
158 tick_nohz_idle_exit();
159 preempt_enable_no_resched();
165 /* Prints also some state that isn't saved in the pt_regs */
166 void __show_regs(struct pt_regs *regs, int all)
168 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
169 unsigned long d0, d1, d2, d3, d6, d7;
170 unsigned int fsindex, gsindex;
171 unsigned int ds, cs, es;
174 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
175 printk_address(regs->ip, 1);
176 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
177 regs->sp, regs->flags);
178 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
179 regs->ax, regs->bx, regs->cx);
180 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
181 regs->dx, regs->si, regs->di);
182 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
183 regs->bp, regs->r8, regs->r9);
184 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
185 regs->r10, regs->r11, regs->r12);
186 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
187 regs->r13, regs->r14, regs->r15);
189 asm("movl %%ds,%0" : "=r" (ds));
190 asm("movl %%cs,%0" : "=r" (cs));
191 asm("movl %%es,%0" : "=r" (es));
192 asm("movl %%fs,%0" : "=r" (fsindex));
193 asm("movl %%gs,%0" : "=r" (gsindex));
195 rdmsrl(MSR_FS_BASE, fs);
196 rdmsrl(MSR_GS_BASE, gs);
197 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
207 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
208 fs, fsindex, gs, gsindex, shadowgs);
209 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
211 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
217 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
221 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
224 void release_thread(struct task_struct *dead_task)
227 if (dead_task->mm->context.size) {
228 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
230 dead_task->mm->context.ldt,
231 dead_task->mm->context.size);
237 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
239 struct user_desc ud = {
246 struct desc_struct *desc = t->thread.tls_array;
251 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
253 return get_desc_base(&t->thread.tls_array[tls]);
257 * This gets called before we allocate a new thread and copy
258 * the current task into it.
260 void prepare_to_copy(struct task_struct *tsk)
265 int copy_thread(unsigned long clone_flags, unsigned long sp,
266 unsigned long unused,
267 struct task_struct *p, struct pt_regs *regs)
270 struct pt_regs *childregs;
271 struct task_struct *me = current;
273 childregs = ((struct pt_regs *)
274 (THREAD_SIZE + task_stack_page(p))) - 1;
281 childregs->sp = (unsigned long)childregs;
283 p->thread.sp = (unsigned long) childregs;
284 p->thread.sp0 = (unsigned long) (childregs+1);
285 p->thread.usersp = me->thread.usersp;
287 set_tsk_thread_flag(p, TIF_FORK);
289 p->thread.io_bitmap_ptr = NULL;
291 savesegment(gs, p->thread.gsindex);
292 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
293 savesegment(fs, p->thread.fsindex);
294 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
295 savesegment(es, p->thread.es);
296 savesegment(ds, p->thread.ds);
299 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
301 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
302 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
303 IO_BITMAP_BYTES, GFP_KERNEL);
304 if (!p->thread.io_bitmap_ptr) {
305 p->thread.io_bitmap_max = 0;
308 set_tsk_thread_flag(p, TIF_IO_BITMAP);
312 * Set a new TLS for the child thread?
314 if (clone_flags & CLONE_SETTLS) {
315 #ifdef CONFIG_IA32_EMULATION
316 if (test_thread_flag(TIF_IA32))
317 err = do_set_thread_area(p, -1,
318 (struct user_desc __user *)childregs->si, 0);
321 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
327 if (err && p->thread.io_bitmap_ptr) {
328 kfree(p->thread.io_bitmap_ptr);
329 p->thread.io_bitmap_max = 0;
336 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
337 unsigned long new_sp,
338 unsigned int _cs, unsigned int _ss, unsigned int _ds)
341 loadsegment(es, _ds);
342 loadsegment(ds, _ds);
346 percpu_write(old_rsp, new_sp);
349 regs->flags = X86_EFLAGS_IF;
351 * Free the old FP and other extended state
353 free_thread_xstate(current);
357 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
359 start_thread_common(regs, new_ip, new_sp,
360 __USER_CS, __USER_DS, 0);
363 #ifdef CONFIG_IA32_EMULATION
364 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
366 start_thread_common(regs, new_ip, new_sp,
367 __USER32_CS, __USER32_DS, __USER32_DS);
372 * switch_to(x,y) should switch tasks from x to y.
374 * This could still be optimized:
375 * - fold all the options into a flag word and test it with a single test.
376 * - could test fs/gs bitsliced
378 * Kprobes not supported here. Set the probe on schedule instead.
379 * Function graph tracer not supported too.
381 __notrace_funcgraph struct task_struct *
382 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
384 struct thread_struct *prev = &prev_p->thread;
385 struct thread_struct *next = &next_p->thread;
386 int cpu = smp_processor_id();
387 struct tss_struct *tss = &per_cpu(init_tss, cpu);
388 unsigned fsindex, gsindex;
392 * If the task has used fpu the last 5 timeslices, just do a full
393 * restore of the math state immediately to avoid the trap; the
394 * chances of needing FPU soon are obviously high now
396 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
398 /* we're going to use this soon, after a few expensive things */
400 prefetch(next->fpu.state);
403 * Reload esp0, LDT and the page table pointer:
409 * This won't pick up thread selector changes, but I guess that is ok.
411 savesegment(es, prev->es);
412 if (unlikely(next->es | prev->es))
413 loadsegment(es, next->es);
415 savesegment(ds, prev->ds);
416 if (unlikely(next->ds | prev->ds))
417 loadsegment(ds, next->ds);
420 /* We must save %fs and %gs before load_TLS() because
421 * %fs and %gs may be cleared by load_TLS().
423 * (e.g. xen_load_tls())
425 savesegment(fs, fsindex);
426 savesegment(gs, gsindex);
430 /* Must be after DS reload */
431 __unlazy_fpu(prev_p);
433 /* Make sure cpu is ready for new context */
438 * Leave lazy mode, flushing any hypercalls made here.
439 * This must be done before restoring TLS segments so
440 * the GDT and LDT are properly updated, and must be
441 * done before math_state_restore, so the TS bit is up
444 arch_end_context_switch(next_p);
449 * Segment register != 0 always requires a reload. Also
450 * reload when it has changed. When prev process used 64bit
451 * base always reload to avoid an information leak.
453 if (unlikely(fsindex | next->fsindex | prev->fs)) {
454 loadsegment(fs, next->fsindex);
456 * Check if the user used a selector != 0; if yes
457 * clear 64bit base, since overloaded base is always
458 * mapped to the Null selector
463 /* when next process has a 64bit base use it */
465 wrmsrl(MSR_FS_BASE, next->fs);
466 prev->fsindex = fsindex;
468 if (unlikely(gsindex | next->gsindex | prev->gs)) {
469 load_gs_index(next->gsindex);
474 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
475 prev->gsindex = gsindex;
478 * Switch the PDA and FPU contexts.
480 prev->usersp = percpu_read(old_rsp);
481 percpu_write(old_rsp, next->usersp);
482 percpu_write(current_task, next_p);
484 percpu_write(kernel_stack,
485 (unsigned long)task_stack_page(next_p) +
486 THREAD_SIZE - KERNEL_STACK_OFFSET);
489 * Now maybe reload the debug registers and handle I/O bitmaps
491 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
492 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
493 __switch_to_xtra(prev_p, next_p, tss);
496 * Preload the FPU context, now that we've determined that the
497 * task is likely to be using it.
500 __math_state_restore();
505 void set_personality_64bit(void)
507 /* inherit personality from parent */
509 /* Make sure to be in 64bit mode */
510 clear_thread_flag(TIF_IA32);
512 /* Ensure the corresponding mm is not marked. */
514 current->mm->context.ia32_compat = 0;
516 /* TBD: overwrites user setup. Should have two bits.
517 But 64bit processes have always behaved this way,
518 so it's not too bad. The main problem is just that
519 32bit childs are affected again. */
520 current->personality &= ~READ_IMPLIES_EXEC;
523 void set_personality_ia32(void)
525 /* inherit personality from parent */
527 /* Make sure to be in 32bit mode */
528 set_thread_flag(TIF_IA32);
529 current->personality |= force_personality32;
531 /* Mark the associated mm as containing 32-bit tasks. */
533 current->mm->context.ia32_compat = 1;
535 /* Prepare the first "return" to user space */
536 current_thread_info()->status |= TS_COMPAT;
539 unsigned long get_wchan(struct task_struct *p)
545 if (!p || p == current || p->state == TASK_RUNNING)
547 stack = (unsigned long)task_stack_page(p);
548 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
550 fp = *(u64 *)(p->thread.sp);
552 if (fp < (unsigned long)stack ||
553 fp >= (unsigned long)stack+THREAD_SIZE)
556 if (!in_sched_functions(ip))
559 } while (count++ < 16);
563 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
566 int doit = task == current;
571 if (addr >= TASK_SIZE_OF(task))
574 /* handle small bases via the GDT because that's faster to
576 if (addr <= 0xffffffff) {
577 set_32bit_tls(task, GS_TLS, addr);
579 load_TLS(&task->thread, cpu);
580 load_gs_index(GS_TLS_SEL);
582 task->thread.gsindex = GS_TLS_SEL;
585 task->thread.gsindex = 0;
586 task->thread.gs = addr;
589 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
595 /* Not strictly needed for fs, but do it for symmetry
597 if (addr >= TASK_SIZE_OF(task))
600 /* handle small bases via the GDT because that's faster to
602 if (addr <= 0xffffffff) {
603 set_32bit_tls(task, FS_TLS, addr);
605 load_TLS(&task->thread, cpu);
606 loadsegment(fs, FS_TLS_SEL);
608 task->thread.fsindex = FS_TLS_SEL;
611 task->thread.fsindex = 0;
612 task->thread.fs = addr;
614 /* set the selector to 0 to not confuse
617 ret = checking_wrmsrl(MSR_FS_BASE, addr);
624 if (task->thread.fsindex == FS_TLS_SEL)
625 base = read_32bit_tls(task, FS_TLS);
627 rdmsrl(MSR_FS_BASE, base);
629 base = task->thread.fs;
630 ret = put_user(base, (unsigned long __user *)addr);
636 if (task->thread.gsindex == GS_TLS_SEL)
637 base = read_32bit_tls(task, GS_TLS);
639 savesegment(gs, gsindex);
641 rdmsrl(MSR_KERNEL_GS_BASE, base);
643 base = task->thread.gs;
645 base = task->thread.gs;
646 ret = put_user(base, (unsigned long __user *)addr);
658 long sys_arch_prctl(int code, unsigned long addr)
660 return do_arch_prctl(current, code, addr);
663 unsigned long KSTK_ESP(struct task_struct *task)
665 return (test_tsk_thread_flag(task, TIF_IA32)) ?
666 (task_pt_regs(task)->sp) : ((task)->thread.usersp);