1 /* linux/arch/sparc/kernel/process.c
3 * Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
8 * This file handles the architecture-dependent parts of process handling..
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
18 #include <linux/stddef.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/smp.h>
22 #include <linux/reboot.h>
23 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
28 #include <asm/auxio.h>
29 #include <asm/oplib.h>
30 #include <asm/uaccess.h>
32 #include <asm/pgalloc.h>
33 #include <asm/pgtable.h>
34 #include <asm/delay.h>
35 #include <asm/processor.h>
39 #include <asm/unistd.h>
40 #include <asm/setup.h>
43 * Power management idle function
44 * Set in pm platform drivers (apc.c and pmc.c)
46 void (*sparc_idle)(void);
49 * Power-off handler instantiation for pm.h compliance
50 * This is done via auxio, but could be used as a fallback
51 * handler when auxio is not present-- unused for now...
53 void (*pm_power_off)(void) = machine_power_off;
54 EXPORT_SYMBOL(pm_power_off);
57 * sysctl - toggle power-off restriction for serial console
58 * systems in machine_power_off()
62 extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
64 struct task_struct *last_task_used_math = NULL;
65 struct thread_info *current_set[NR_CPUS];
68 * the idle loop on a Sparc... ;)
72 set_thread_flag(TIF_POLLING_NRFLAG);
74 /* endless idle loop with no priority at all */
76 while (!need_resched()) {
82 schedule_preempt_disabled();
86 /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
87 void machine_halt(void)
93 panic("Halt failed!");
96 void machine_restart(char * cmd)
104 p = strchr (reboot_command, '\n');
109 prom_reboot(reboot_command);
110 prom_feval ("reset");
111 panic("Reboot failed!");
114 void machine_power_off(void)
116 if (auxio_power_register &&
117 (strcmp(of_console_device->type, "serial") || scons_pwroff))
118 *auxio_power_register |= AUXIO_POWER_OFF;
122 void show_regs(struct pt_regs *r)
124 struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14];
126 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
127 r->psr, r->pc, r->npc, r->y, print_tainted());
128 printk("PC: <%pS>\n", (void *) r->pc);
129 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
130 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
131 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
132 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
133 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
134 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
135 printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
137 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
138 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
139 rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
140 printk("%%I: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
141 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
142 rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
146 * The show_stack is an external API which we do not use ourselves.
147 * The oops is printed in die_if_kernel.
149 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
151 unsigned long pc, fp;
152 unsigned long task_base;
153 struct reg_window32 *rw;
157 task_base = (unsigned long) task_stack_page(tsk);
159 task_base = (unsigned long) current_thread_info();
161 fp = (unsigned long) _ksp;
163 /* Bogus frame pointer? */
164 if (fp < (task_base + sizeof(struct thread_info)) ||
165 fp >= (task_base + (PAGE_SIZE << 1)))
167 rw = (struct reg_window32 *) fp;
169 printk("[%08lx : ", pc);
170 printk("%pS ] ", (void *) pc);
172 } while (++count < 16);
176 void dump_stack(void)
180 __asm__ __volatile__("mov %%fp, %0"
182 show_stack(current, ksp);
185 EXPORT_SYMBOL(dump_stack);
188 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
190 unsigned long thread_saved_pc(struct task_struct *tsk)
192 return task_thread_info(tsk)->kpc;
196 * Free current thread data structures etc..
198 void exit_thread(void)
201 if(last_task_used_math == current) {
203 if (test_thread_flag(TIF_USEDFPU)) {
205 /* Keep process from leaving FPU in a bogon state. */
206 put_psr(get_psr() | PSR_EF);
207 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
208 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
210 last_task_used_math = NULL;
212 clear_thread_flag(TIF_USEDFPU);
217 void flush_thread(void)
219 current_thread_info()->w_saved = 0;
222 if(last_task_used_math == current) {
224 if (test_thread_flag(TIF_USEDFPU)) {
227 put_psr(get_psr() | PSR_EF);
228 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
229 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
231 last_task_used_math = NULL;
233 clear_thread_flag(TIF_USEDFPU);
237 /* This task is no longer a kernel thread. */
238 if (current->thread.flags & SPARC_FLAG_KTHREAD) {
239 current->thread.flags &= ~SPARC_FLAG_KTHREAD;
241 /* We must fixup kregs as well. */
242 /* XXX This was not fixed for ti for a while, worked. Unused? */
243 current->thread.kregs = (struct pt_regs *)
244 (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
248 static inline struct sparc_stackf __user *
249 clone_stackframe(struct sparc_stackf __user *dst,
250 struct sparc_stackf __user *src)
252 unsigned long size, fp;
253 struct sparc_stackf *tmp;
254 struct sparc_stackf __user *sp;
256 if (get_user(tmp, &src->fp))
259 fp = (unsigned long) tmp;
260 size = (fp - ((unsigned long) src));
261 fp = (unsigned long) dst;
262 sp = (struct sparc_stackf __user *)(fp - size);
264 /* do_fork() grabs the parent semaphore, we must release it
265 * temporarily so we can build the child clone stack frame
266 * without deadlocking.
268 if (__copy_user(sp, src, size))
270 else if (put_user(fp, &sp->fp))
276 asmlinkage int sparc_do_fork(unsigned long clone_flags,
277 unsigned long stack_start,
278 struct pt_regs *regs,
279 unsigned long stack_size)
281 unsigned long parent_tid_ptr, child_tid_ptr;
282 unsigned long orig_i1 = regs->u_regs[UREG_I1];
285 parent_tid_ptr = regs->u_regs[UREG_I2];
286 child_tid_ptr = regs->u_regs[UREG_I4];
288 ret = do_fork(clone_flags, stack_start, stack_size,
289 (int __user *) parent_tid_ptr,
290 (int __user *) child_tid_ptr);
292 /* If we get an error and potentially restart the system
293 * call, we're screwed because copy_thread() clobbered
294 * the parent's %o1. So detect that case and restore it
297 if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
298 regs->u_regs[UREG_I1] = orig_i1;
303 /* Copy a Sparc thread. The fork() return value conventions
304 * under SunOS are nothing short of bletcherous:
305 * Parent --> %o0 == childs pid, %o1 == 0
306 * Child --> %o0 == parents pid, %o1 == 1
308 * NOTE: We have a separate fork kpsr/kwim because
309 * the parent could change these values between
310 * sys_fork invocation and when we reach here
311 * if the parent should sleep while trying to
312 * allocate the task_struct and kernel stack in
314 * XXX See comment above sys_vfork in sparc64. todo.
316 extern void ret_from_fork(void);
317 extern void ret_from_kernel_thread(void);
319 int copy_thread(unsigned long clone_flags, unsigned long sp,
320 unsigned long arg, struct task_struct *p)
322 struct thread_info *ti = task_thread_info(p);
323 struct pt_regs *childregs, *regs = current_pt_regs();
327 if(last_task_used_math == current) {
329 if (test_thread_flag(TIF_USEDFPU)) {
331 put_psr(get_psr() | PSR_EF);
332 fpsave(&p->thread.float_regs[0], &p->thread.fsr,
333 &p->thread.fpqueue[0], &p->thread.fpqdepth);
337 * p->thread_info new_stack childregs stack bottom
339 * V V (stk.fr.) V (pt_regs) V
340 * +----- - - - - - ------+===========+=============+
342 new_stack = task_stack_page(p) + THREAD_SIZE;
343 new_stack -= STACKFRAME_SZ + TRACEREG_SZ;
344 childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ);
347 * A new process must start with interrupts closed in 2.5,
348 * because this is how Mingo's scheduler works (see schedule_tail
349 * and finish_arch_switch). If we do not do it, a timer interrupt hits
350 * before we unlock, attempts to re-take the rq->lock, and then we die.
351 * Thus, kpsr|=PSR_PIL.
353 ti->ksp = (unsigned long) new_stack;
354 p->thread.kregs = childregs;
356 if (unlikely(p->flags & PF_KTHREAD)) {
359 memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
360 p->thread.flags |= SPARC_FLAG_KTHREAD;
361 p->thread.current_ds = KERNEL_DS;
362 ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
363 childregs->u_regs[UREG_G1] = sp; /* function */
364 childregs->u_regs[UREG_G2] = arg;
365 psr = childregs->psr = get_psr();
366 ti->kpsr = psr | PSR_PIL;
367 ti->kwim = 1 << (((psr & PSR_CWP) + 1) % nwindows);
370 memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
371 childregs->u_regs[UREG_FP] = sp;
372 p->thread.flags &= ~SPARC_FLAG_KTHREAD;
373 p->thread.current_ds = USER_DS;
374 ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
375 ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
376 ti->kwim = current->thread.fork_kwim;
378 if (sp != regs->u_regs[UREG_FP]) {
379 struct sparc_stackf __user *childstack;
380 struct sparc_stackf __user *parentstack;
383 * This is a clone() call with supplied user stack.
384 * Set some valid stack frames to give to the child.
386 childstack = (struct sparc_stackf __user *)
388 parentstack = (struct sparc_stackf __user *)
389 regs->u_regs[UREG_FP];
392 printk("clone: parent stack:\n");
393 show_stackframe(parentstack);
396 childstack = clone_stackframe(childstack, parentstack);
401 printk("clone: child stack:\n");
402 show_stackframe(childstack);
405 childregs->u_regs[UREG_FP] = (unsigned long)childstack;
409 /* FPU must be disabled on SMP. */
410 childregs->psr &= ~PSR_EF;
411 clear_tsk_thread_flag(p, TIF_USEDFPU);
414 /* Set the return value for the child. */
415 childregs->u_regs[UREG_I0] = current->pid;
416 childregs->u_regs[UREG_I1] = 1;
418 /* Set the return value for the parent. */
419 regs->u_regs[UREG_I1] = 0;
421 if (clone_flags & CLONE_SETTLS)
422 childregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
428 * fill in the fpu structure for a core dump.
430 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
433 memset(fpregs, 0, sizeof(*fpregs));
434 fpregs->pr_q_entrysize = 8;
438 if (test_thread_flag(TIF_USEDFPU)) {
439 put_psr(get_psr() | PSR_EF);
440 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
441 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
443 regs->psr &= ~(PSR_EF);
444 clear_thread_flag(TIF_USEDFPU);
448 if (current == last_task_used_math) {
449 put_psr(get_psr() | PSR_EF);
450 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr,
451 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth);
453 regs->psr &= ~(PSR_EF);
454 last_task_used_math = NULL;
458 memcpy(&fpregs->pr_fr.pr_regs[0],
459 ¤t->thread.float_regs[0],
460 (sizeof(unsigned long) * 32));
461 fpregs->pr_fsr = current->thread.fsr;
462 fpregs->pr_qcnt = current->thread.fpqdepth;
463 fpregs->pr_q_entrysize = 8;
465 if(fpregs->pr_qcnt != 0) {
466 memcpy(&fpregs->pr_q[0],
467 ¤t->thread.fpqueue[0],
468 sizeof(struct fpq) * fpregs->pr_qcnt);
470 /* Zero out the rest. */
471 memset(&fpregs->pr_q[fpregs->pr_qcnt], 0,
472 sizeof(struct fpq) * (32 - fpregs->pr_qcnt));
476 unsigned long get_wchan(struct task_struct *task)
478 unsigned long pc, fp, bias = 0;
479 unsigned long task_base = (unsigned long) task;
480 unsigned long ret = 0;
481 struct reg_window32 *rw;
484 if (!task || task == current ||
485 task->state == TASK_RUNNING)
488 fp = task_thread_info(task)->ksp + bias;
490 /* Bogus frame pointer? */
491 if (fp < (task_base + sizeof(struct thread_info)) ||
492 fp >= (task_base + (2 * PAGE_SIZE)))
494 rw = (struct reg_window32 *) fp;
496 if (!in_sched_functions(pc)) {
500 fp = rw->ins[6] + bias;
501 } while (++count < 16);