2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/ptrace.h>
32 #include <linux/notifier.h>
33 #include <linux/kprobes.h>
34 #include <linux/kdebug.h>
35 #include <linux/tick.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/cpuidle.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/mmu_context.h>
47 #include <asm/prctl.h>
49 #include <asm/proto.h>
52 #include <asm/syscalls.h>
53 #include <asm/debugreg.h>
56 asmlinkage extern void ret_from_fork(void);
58 DEFINE_PER_CPU(unsigned long, old_rsp);
59 static DEFINE_PER_CPU(unsigned char, is_idle);
61 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
63 void idle_notifier_register(struct notifier_block *n)
65 atomic_notifier_chain_register(&idle_notifier, n);
67 EXPORT_SYMBOL_GPL(idle_notifier_register);
69 void idle_notifier_unregister(struct notifier_block *n)
71 atomic_notifier_chain_unregister(&idle_notifier, n);
73 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
77 percpu_write(is_idle, 1);
78 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
81 static void __exit_idle(void)
83 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
85 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
88 /* Called from interrupts to signify idle end */
91 /* idle loop has pid 0 */
98 static inline void play_dead(void)
105 * The idle thread. There's no useful work to be
106 * done, so just try to conserve power and have a
107 * low exit latency (ie sit in a loop waiting for
108 * somebody to say that they'd like to reschedule)
112 current_thread_info()->status |= TS_POLLING;
115 * If we're the non-boot CPU, nothing set the stack canary up
116 * for us. CPU0 already has it initialized but no harm in
117 * doing it again. This is a good place for updating it, as
118 * we wont ever return from this function (so the invalid
119 * canaries already on the stack wont ever trigger).
121 boot_init_stack_canary();
123 /* endless idle loop with no priority at all */
125 tick_nohz_idle_enter();
126 while (!need_resched()) {
130 if (cpu_is_offline(smp_processor_id()))
133 * Idle routines should keep interrupts disabled
134 * from here on, until they go to idle.
135 * Otherwise, idle callbacks can misfire.
140 /* Don't trace irqs off for idle */
141 stop_critical_timings();
143 /* enter_idle() needs rcu for notifiers */
146 if (cpuidle_idle_call())
150 start_critical_timings();
152 /* In many cases the interrupt that ended idle
153 has already called exit_idle. But some idle
154 loops can be woken up without interrupt. */
158 tick_nohz_idle_exit();
159 schedule_preempt_disabled();
163 /* Prints also some state that isn't saved in the pt_regs */
164 void __show_regs(struct pt_regs *regs, int all)
166 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
167 unsigned long d0, d1, d2, d3, d6, d7;
168 unsigned int fsindex, gsindex;
169 unsigned int ds, cs, es;
172 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
173 printk_address(regs->ip, 1);
174 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
175 regs->sp, regs->flags);
176 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
177 regs->ax, regs->bx, regs->cx);
178 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
179 regs->dx, regs->si, regs->di);
180 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
181 regs->bp, regs->r8, regs->r9);
182 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
183 regs->r10, regs->r11, regs->r12);
184 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
185 regs->r13, regs->r14, regs->r15);
187 asm("movl %%ds,%0" : "=r" (ds));
188 asm("movl %%cs,%0" : "=r" (cs));
189 asm("movl %%es,%0" : "=r" (es));
190 asm("movl %%fs,%0" : "=r" (fsindex));
191 asm("movl %%gs,%0" : "=r" (gsindex));
193 rdmsrl(MSR_FS_BASE, fs);
194 rdmsrl(MSR_GS_BASE, gs);
195 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
205 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
206 fs, fsindex, gs, gsindex, shadowgs);
207 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
209 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
215 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
219 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
222 void release_thread(struct task_struct *dead_task)
225 if (dead_task->mm->context.size) {
226 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
228 dead_task->mm->context.ldt,
229 dead_task->mm->context.size);
235 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
237 struct user_desc ud = {
244 struct desc_struct *desc = t->thread.tls_array;
249 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
251 return get_desc_base(&t->thread.tls_array[tls]);
255 * This gets called before we allocate a new thread and copy
256 * the current task into it.
258 void prepare_to_copy(struct task_struct *tsk)
263 int copy_thread(unsigned long clone_flags, unsigned long sp,
264 unsigned long unused,
265 struct task_struct *p, struct pt_regs *regs)
268 struct pt_regs *childregs;
269 struct task_struct *me = current;
271 childregs = ((struct pt_regs *)
272 (THREAD_SIZE + task_stack_page(p))) - 1;
279 childregs->sp = (unsigned long)childregs;
281 p->thread.sp = (unsigned long) childregs;
282 p->thread.sp0 = (unsigned long) (childregs+1);
283 p->thread.usersp = me->thread.usersp;
285 set_tsk_thread_flag(p, TIF_FORK);
288 p->thread.io_bitmap_ptr = NULL;
290 savesegment(gs, p->thread.gsindex);
291 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
292 savesegment(fs, p->thread.fsindex);
293 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
294 savesegment(es, p->thread.es);
295 savesegment(ds, p->thread.ds);
298 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
300 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
301 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
302 IO_BITMAP_BYTES, GFP_KERNEL);
303 if (!p->thread.io_bitmap_ptr) {
304 p->thread.io_bitmap_max = 0;
307 set_tsk_thread_flag(p, TIF_IO_BITMAP);
311 * Set a new TLS for the child thread?
313 if (clone_flags & CLONE_SETTLS) {
314 #ifdef CONFIG_IA32_EMULATION
315 if (test_thread_flag(TIF_IA32))
316 err = do_set_thread_area(p, -1,
317 (struct user_desc __user *)childregs->si, 0);
320 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
326 if (err && p->thread.io_bitmap_ptr) {
327 kfree(p->thread.io_bitmap_ptr);
328 p->thread.io_bitmap_max = 0;
335 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
336 unsigned long new_sp,
337 unsigned int _cs, unsigned int _ss, unsigned int _ds)
340 loadsegment(es, _ds);
341 loadsegment(ds, _ds);
343 current->thread.usersp = new_sp;
346 percpu_write(old_rsp, new_sp);
349 regs->flags = X86_EFLAGS_IF;
351 * Free the old FP and other extended state
353 free_thread_xstate(current);
357 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
359 start_thread_common(regs, new_ip, new_sp,
360 __USER_CS, __USER_DS, 0);
363 #ifdef CONFIG_IA32_EMULATION
364 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
366 start_thread_common(regs, new_ip, new_sp,
367 __USER32_CS, __USER32_DS, __USER32_DS);
372 * switch_to(x,y) should switch tasks from x to y.
374 * This could still be optimized:
375 * - fold all the options into a flag word and test it with a single test.
376 * - could test fs/gs bitsliced
378 * Kprobes not supported here. Set the probe on schedule instead.
379 * Function graph tracer not supported too.
381 __notrace_funcgraph struct task_struct *
382 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
384 struct thread_struct *prev = &prev_p->thread;
385 struct thread_struct *next = &next_p->thread;
386 int cpu = smp_processor_id();
387 struct tss_struct *tss = &per_cpu(init_tss, cpu);
388 unsigned fsindex, gsindex;
391 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
394 * Reload esp0, LDT and the page table pointer:
400 * This won't pick up thread selector changes, but I guess that is ok.
402 savesegment(es, prev->es);
403 if (unlikely(next->es | prev->es))
404 loadsegment(es, next->es);
406 savesegment(ds, prev->ds);
407 if (unlikely(next->ds | prev->ds))
408 loadsegment(ds, next->ds);
411 /* We must save %fs and %gs before load_TLS() because
412 * %fs and %gs may be cleared by load_TLS().
414 * (e.g. xen_load_tls())
416 savesegment(fs, fsindex);
417 savesegment(gs, gsindex);
422 * Leave lazy mode, flushing any hypercalls made here.
423 * This must be done before restoring TLS segments so
424 * the GDT and LDT are properly updated, and must be
425 * done before math_state_restore, so the TS bit is up
428 arch_end_context_switch(next_p);
433 * Segment register != 0 always requires a reload. Also
434 * reload when it has changed. When prev process used 64bit
435 * base always reload to avoid an information leak.
437 if (unlikely(fsindex | next->fsindex | prev->fs)) {
438 loadsegment(fs, next->fsindex);
440 * Check if the user used a selector != 0; if yes
441 * clear 64bit base, since overloaded base is always
442 * mapped to the Null selector
447 /* when next process has a 64bit base use it */
449 wrmsrl(MSR_FS_BASE, next->fs);
450 prev->fsindex = fsindex;
452 if (unlikely(gsindex | next->gsindex | prev->gs)) {
453 load_gs_index(next->gsindex);
458 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
459 prev->gsindex = gsindex;
461 switch_fpu_finish(next_p, fpu);
464 * Switch the PDA and FPU contexts.
466 prev->usersp = percpu_read(old_rsp);
467 percpu_write(old_rsp, next->usersp);
468 percpu_write(current_task, next_p);
470 percpu_write(kernel_stack,
471 (unsigned long)task_stack_page(next_p) +
472 THREAD_SIZE - KERNEL_STACK_OFFSET);
475 * Now maybe reload the debug registers and handle I/O bitmaps
477 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
478 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
479 __switch_to_xtra(prev_p, next_p, tss);
484 void set_personality_64bit(void)
486 /* inherit personality from parent */
488 /* Make sure to be in 64bit mode */
489 clear_thread_flag(TIF_IA32);
491 /* Ensure the corresponding mm is not marked. */
493 current->mm->context.ia32_compat = 0;
495 /* TBD: overwrites user setup. Should have two bits.
496 But 64bit processes have always behaved this way,
497 so it's not too bad. The main problem is just that
498 32bit childs are affected again. */
499 current->personality &= ~READ_IMPLIES_EXEC;
502 void set_personality_ia32(void)
504 /* inherit personality from parent */
506 /* Make sure to be in 32bit mode */
507 set_thread_flag(TIF_IA32);
508 current->personality |= force_personality32;
510 /* Mark the associated mm as containing 32-bit tasks. */
512 current->mm->context.ia32_compat = 1;
514 /* Prepare the first "return" to user space */
515 current_thread_info()->status |= TS_COMPAT;
518 unsigned long get_wchan(struct task_struct *p)
524 if (!p || p == current || p->state == TASK_RUNNING)
526 stack = (unsigned long)task_stack_page(p);
527 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
529 fp = *(u64 *)(p->thread.sp);
531 if (fp < (unsigned long)stack ||
532 fp >= (unsigned long)stack+THREAD_SIZE)
535 if (!in_sched_functions(ip))
538 } while (count++ < 16);
542 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
545 int doit = task == current;
550 if (addr >= TASK_SIZE_OF(task))
553 /* handle small bases via the GDT because that's faster to
555 if (addr <= 0xffffffff) {
556 set_32bit_tls(task, GS_TLS, addr);
558 load_TLS(&task->thread, cpu);
559 load_gs_index(GS_TLS_SEL);
561 task->thread.gsindex = GS_TLS_SEL;
564 task->thread.gsindex = 0;
565 task->thread.gs = addr;
568 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
574 /* Not strictly needed for fs, but do it for symmetry
576 if (addr >= TASK_SIZE_OF(task))
579 /* handle small bases via the GDT because that's faster to
581 if (addr <= 0xffffffff) {
582 set_32bit_tls(task, FS_TLS, addr);
584 load_TLS(&task->thread, cpu);
585 loadsegment(fs, FS_TLS_SEL);
587 task->thread.fsindex = FS_TLS_SEL;
590 task->thread.fsindex = 0;
591 task->thread.fs = addr;
593 /* set the selector to 0 to not confuse
596 ret = checking_wrmsrl(MSR_FS_BASE, addr);
603 if (task->thread.fsindex == FS_TLS_SEL)
604 base = read_32bit_tls(task, FS_TLS);
606 rdmsrl(MSR_FS_BASE, base);
608 base = task->thread.fs;
609 ret = put_user(base, (unsigned long __user *)addr);
615 if (task->thread.gsindex == GS_TLS_SEL)
616 base = read_32bit_tls(task, GS_TLS);
618 savesegment(gs, gsindex);
620 rdmsrl(MSR_KERNEL_GS_BASE, base);
622 base = task->thread.gs;
624 base = task->thread.gs;
625 ret = put_user(base, (unsigned long __user *)addr);
637 long sys_arch_prctl(int code, unsigned long addr)
639 return do_arch_prctl(current, code, addr);
642 unsigned long KSTK_ESP(struct task_struct *task)
644 return (test_tsk_thread_flag(task, TIF_IA32)) ?
645 (task_pt_regs(task)->sp) : ((task)->thread.usersp);