]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/powerpc/kernel/process.c
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[karo-tx-linux.git] / arch / powerpc / kernel / process.c
1 /*
2  *  Derived from "arch/i386/kernel/process.c"
3  *    Copyright (C) 1995  Linus Torvalds
4  *
5  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6  *  Paul Mackerras (paulus@cs.anu.edu.au)
7  *
8  *  PowerPC version
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  */
16
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/export.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/personality.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
41
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
44 #include <asm/io.h>
45 #include <asm/processor.h>
46 #include <asm/mmu.h>
47 #include <asm/prom.h>
48 #include <asm/machdep.h>
49 #include <asm/time.h>
50 #include <asm/runlatch.h>
51 #include <asm/syscalls.h>
52 #include <asm/switch_to.h>
53 #include <asm/debug.h>
54 #ifdef CONFIG_PPC64
55 #include <asm/firmware.h>
56 #endif
57 #include <linux/kprobes.h>
58 #include <linux/kdebug.h>
59
60 extern unsigned long _get_SP(void);
61
62 #ifndef CONFIG_SMP
63 struct task_struct *last_task_used_math = NULL;
64 struct task_struct *last_task_used_altivec = NULL;
65 struct task_struct *last_task_used_vsx = NULL;
66 struct task_struct *last_task_used_spe = NULL;
67 #endif
68
69 /*
70  * Make sure the floating-point register state in the
71  * the thread_struct is up to date for task tsk.
72  */
73 void flush_fp_to_thread(struct task_struct *tsk)
74 {
75         if (tsk->thread.regs) {
76                 /*
77                  * We need to disable preemption here because if we didn't,
78                  * another process could get scheduled after the regs->msr
79                  * test but before we have finished saving the FP registers
80                  * to the thread_struct.  That process could take over the
81                  * FPU, and then when we get scheduled again we would store
82                  * bogus values for the remaining FP registers.
83                  */
84                 preempt_disable();
85                 if (tsk->thread.regs->msr & MSR_FP) {
86 #ifdef CONFIG_SMP
87                         /*
88                          * This should only ever be called for current or
89                          * for a stopped child process.  Since we save away
90                          * the FP register state on context switch on SMP,
91                          * there is something wrong if a stopped child appears
92                          * to still have its FP state in the CPU registers.
93                          */
94                         BUG_ON(tsk != current);
95 #endif
96                         giveup_fpu(tsk);
97                 }
98                 preempt_enable();
99         }
100 }
101 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
102
103 void enable_kernel_fp(void)
104 {
105         WARN_ON(preemptible());
106
107 #ifdef CONFIG_SMP
108         if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
109                 giveup_fpu(current);
110         else
111                 giveup_fpu(NULL);       /* just enables FP for kernel */
112 #else
113         giveup_fpu(last_task_used_math);
114 #endif /* CONFIG_SMP */
115 }
116 EXPORT_SYMBOL(enable_kernel_fp);
117
118 #ifdef CONFIG_ALTIVEC
119 void enable_kernel_altivec(void)
120 {
121         WARN_ON(preemptible());
122
123 #ifdef CONFIG_SMP
124         if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
125                 giveup_altivec(current);
126         else
127                 giveup_altivec_notask();
128 #else
129         giveup_altivec(last_task_used_altivec);
130 #endif /* CONFIG_SMP */
131 }
132 EXPORT_SYMBOL(enable_kernel_altivec);
133
134 /*
135  * Make sure the VMX/Altivec register state in the
136  * the thread_struct is up to date for task tsk.
137  */
138 void flush_altivec_to_thread(struct task_struct *tsk)
139 {
140         if (tsk->thread.regs) {
141                 preempt_disable();
142                 if (tsk->thread.regs->msr & MSR_VEC) {
143 #ifdef CONFIG_SMP
144                         BUG_ON(tsk != current);
145 #endif
146                         giveup_altivec(tsk);
147                 }
148                 preempt_enable();
149         }
150 }
151 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
152 #endif /* CONFIG_ALTIVEC */
153
154 #ifdef CONFIG_VSX
155 #if 0
156 /* not currently used, but some crazy RAID module might want to later */
157 void enable_kernel_vsx(void)
158 {
159         WARN_ON(preemptible());
160
161 #ifdef CONFIG_SMP
162         if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
163                 giveup_vsx(current);
164         else
165                 giveup_vsx(NULL);       /* just enable vsx for kernel - force */
166 #else
167         giveup_vsx(last_task_used_vsx);
168 #endif /* CONFIG_SMP */
169 }
170 EXPORT_SYMBOL(enable_kernel_vsx);
171 #endif
172
173 void giveup_vsx(struct task_struct *tsk)
174 {
175         giveup_fpu(tsk);
176         giveup_altivec(tsk);
177         __giveup_vsx(tsk);
178 }
179
180 void flush_vsx_to_thread(struct task_struct *tsk)
181 {
182         if (tsk->thread.regs) {
183                 preempt_disable();
184                 if (tsk->thread.regs->msr & MSR_VSX) {
185 #ifdef CONFIG_SMP
186                         BUG_ON(tsk != current);
187 #endif
188                         giveup_vsx(tsk);
189                 }
190                 preempt_enable();
191         }
192 }
193 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
194 #endif /* CONFIG_VSX */
195
196 #ifdef CONFIG_SPE
197
198 void enable_kernel_spe(void)
199 {
200         WARN_ON(preemptible());
201
202 #ifdef CONFIG_SMP
203         if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
204                 giveup_spe(current);
205         else
206                 giveup_spe(NULL);       /* just enable SPE for kernel - force */
207 #else
208         giveup_spe(last_task_used_spe);
209 #endif /* __SMP __ */
210 }
211 EXPORT_SYMBOL(enable_kernel_spe);
212
213 void flush_spe_to_thread(struct task_struct *tsk)
214 {
215         if (tsk->thread.regs) {
216                 preempt_disable();
217                 if (tsk->thread.regs->msr & MSR_SPE) {
218 #ifdef CONFIG_SMP
219                         BUG_ON(tsk != current);
220 #endif
221                         tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
222                         giveup_spe(tsk);
223                 }
224                 preempt_enable();
225         }
226 }
227 #endif /* CONFIG_SPE */
228
229 #ifndef CONFIG_SMP
230 /*
231  * If we are doing lazy switching of CPU state (FP, altivec or SPE),
232  * and the current task has some state, discard it.
233  */
234 void discard_lazy_cpu_state(void)
235 {
236         preempt_disable();
237         if (last_task_used_math == current)
238                 last_task_used_math = NULL;
239 #ifdef CONFIG_ALTIVEC
240         if (last_task_used_altivec == current)
241                 last_task_used_altivec = NULL;
242 #endif /* CONFIG_ALTIVEC */
243 #ifdef CONFIG_VSX
244         if (last_task_used_vsx == current)
245                 last_task_used_vsx = NULL;
246 #endif /* CONFIG_VSX */
247 #ifdef CONFIG_SPE
248         if (last_task_used_spe == current)
249                 last_task_used_spe = NULL;
250 #endif
251         preempt_enable();
252 }
253 #endif /* CONFIG_SMP */
254
255 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
256 void do_send_trap(struct pt_regs *regs, unsigned long address,
257                   unsigned long error_code, int signal_code, int breakpt)
258 {
259         siginfo_t info;
260
261         current->thread.trap_nr = signal_code;
262         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
263                         11, SIGSEGV) == NOTIFY_STOP)
264                 return;
265
266         /* Deliver the signal to userspace */
267         info.si_signo = SIGTRAP;
268         info.si_errno = breakpt;        /* breakpoint or watchpoint id */
269         info.si_code = signal_code;
270         info.si_addr = (void __user *)address;
271         force_sig_info(SIGTRAP, &info, current);
272 }
273 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
274 void do_dabr(struct pt_regs *regs, unsigned long address,
275                     unsigned long error_code)
276 {
277         siginfo_t info;
278
279         current->thread.trap_nr = TRAP_HWBKPT;
280         if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
281                         11, SIGSEGV) == NOTIFY_STOP)
282                 return;
283
284         if (debugger_dabr_match(regs))
285                 return;
286
287         /* Clear the DABR */
288         set_dabr(0, 0);
289
290         /* Deliver the signal to userspace */
291         info.si_signo = SIGTRAP;
292         info.si_errno = 0;
293         info.si_code = TRAP_HWBKPT;
294         info.si_addr = (void __user *)address;
295         force_sig_info(SIGTRAP, &info, current);
296 }
297 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
298
299 static DEFINE_PER_CPU(unsigned long, current_dabr);
300
301 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
302 /*
303  * Set the debug registers back to their default "safe" values.
304  */
305 static void set_debug_reg_defaults(struct thread_struct *thread)
306 {
307         thread->iac1 = thread->iac2 = 0;
308 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
309         thread->iac3 = thread->iac4 = 0;
310 #endif
311         thread->dac1 = thread->dac2 = 0;
312 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
313         thread->dvc1 = thread->dvc2 = 0;
314 #endif
315         thread->dbcr0 = 0;
316 #ifdef CONFIG_BOOKE
317         /*
318          * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
319          */
320         thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |   \
321                         DBCR1_IAC3US | DBCR1_IAC4US;
322         /*
323          * Force Data Address Compare User/Supervisor bits to be User-only
324          * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
325          */
326         thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
327 #else
328         thread->dbcr1 = 0;
329 #endif
330 }
331
332 static void prime_debug_regs(struct thread_struct *thread)
333 {
334         mtspr(SPRN_IAC1, thread->iac1);
335         mtspr(SPRN_IAC2, thread->iac2);
336 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
337         mtspr(SPRN_IAC3, thread->iac3);
338         mtspr(SPRN_IAC4, thread->iac4);
339 #endif
340         mtspr(SPRN_DAC1, thread->dac1);
341         mtspr(SPRN_DAC2, thread->dac2);
342 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
343         mtspr(SPRN_DVC1, thread->dvc1);
344         mtspr(SPRN_DVC2, thread->dvc2);
345 #endif
346         mtspr(SPRN_DBCR0, thread->dbcr0);
347         mtspr(SPRN_DBCR1, thread->dbcr1);
348 #ifdef CONFIG_BOOKE
349         mtspr(SPRN_DBCR2, thread->dbcr2);
350 #endif
351 }
352 /*
353  * Unless neither the old or new thread are making use of the
354  * debug registers, set the debug registers from the values
355  * stored in the new thread.
356  */
357 static void switch_booke_debug_regs(struct thread_struct *new_thread)
358 {
359         if ((current->thread.dbcr0 & DBCR0_IDM)
360                 || (new_thread->dbcr0 & DBCR0_IDM))
361                         prime_debug_regs(new_thread);
362 }
363 #else   /* !CONFIG_PPC_ADV_DEBUG_REGS */
364 #ifndef CONFIG_HAVE_HW_BREAKPOINT
365 static void set_debug_reg_defaults(struct thread_struct *thread)
366 {
367         if (thread->dabr) {
368                 thread->dabr = 0;
369                 thread->dabrx = 0;
370                 set_dabr(0, 0);
371         }
372 }
373 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
374 #endif  /* CONFIG_PPC_ADV_DEBUG_REGS */
375
376 int set_dabr(unsigned long dabr, unsigned long dabrx)
377 {
378         __get_cpu_var(current_dabr) = dabr;
379
380         if (ppc_md.set_dabr)
381                 return ppc_md.set_dabr(dabr, dabrx);
382
383         /* XXX should we have a CPU_FTR_HAS_DABR ? */
384 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
385         mtspr(SPRN_DAC1, dabr);
386 #ifdef CONFIG_PPC_47x
387         isync();
388 #endif
389 #elif defined(CONFIG_PPC_BOOK3S)
390         mtspr(SPRN_DABR, dabr);
391         mtspr(SPRN_DABRX, dabrx);
392 #endif
393         return 0;
394 }
395
396 #ifdef CONFIG_PPC64
397 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
398 #endif
399
400 struct task_struct *__switch_to(struct task_struct *prev,
401         struct task_struct *new)
402 {
403         struct thread_struct *new_thread, *old_thread;
404         unsigned long flags;
405         struct task_struct *last;
406 #ifdef CONFIG_PPC_BOOK3S_64
407         struct ppc64_tlb_batch *batch;
408 #endif
409
410 #ifdef CONFIG_SMP
411         /* avoid complexity of lazy save/restore of fpu
412          * by just saving it every time we switch out if
413          * this task used the fpu during the last quantum.
414          *
415          * If it tries to use the fpu again, it'll trap and
416          * reload its fp regs.  So we don't have to do a restore
417          * every switch, just a save.
418          *  -- Cort
419          */
420         if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
421                 giveup_fpu(prev);
422 #ifdef CONFIG_ALTIVEC
423         /*
424          * If the previous thread used altivec in the last quantum
425          * (thus changing altivec regs) then save them.
426          * We used to check the VRSAVE register but not all apps
427          * set it, so we don't rely on it now (and in fact we need
428          * to save & restore VSCR even if VRSAVE == 0).  -- paulus
429          *
430          * On SMP we always save/restore altivec regs just to avoid the
431          * complexity of changing processors.
432          *  -- Cort
433          */
434         if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
435                 giveup_altivec(prev);
436 #endif /* CONFIG_ALTIVEC */
437 #ifdef CONFIG_VSX
438         if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
439                 /* VMX and FPU registers are already save here */
440                 __giveup_vsx(prev);
441 #endif /* CONFIG_VSX */
442 #ifdef CONFIG_SPE
443         /*
444          * If the previous thread used spe in the last quantum
445          * (thus changing spe regs) then save them.
446          *
447          * On SMP we always save/restore spe regs just to avoid the
448          * complexity of changing processors.
449          */
450         if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
451                 giveup_spe(prev);
452 #endif /* CONFIG_SPE */
453
454 #else  /* CONFIG_SMP */
455 #ifdef CONFIG_ALTIVEC
456         /* Avoid the trap.  On smp this this never happens since
457          * we don't set last_task_used_altivec -- Cort
458          */
459         if (new->thread.regs && last_task_used_altivec == new)
460                 new->thread.regs->msr |= MSR_VEC;
461 #endif /* CONFIG_ALTIVEC */
462 #ifdef CONFIG_VSX
463         if (new->thread.regs && last_task_used_vsx == new)
464                 new->thread.regs->msr |= MSR_VSX;
465 #endif /* CONFIG_VSX */
466 #ifdef CONFIG_SPE
467         /* Avoid the trap.  On smp this this never happens since
468          * we don't set last_task_used_spe
469          */
470         if (new->thread.regs && last_task_used_spe == new)
471                 new->thread.regs->msr |= MSR_SPE;
472 #endif /* CONFIG_SPE */
473
474 #endif /* CONFIG_SMP */
475
476 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
477         switch_booke_debug_regs(&new->thread);
478 #else
479 /*
480  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
481  * schedule DABR
482  */
483 #ifndef CONFIG_HAVE_HW_BREAKPOINT
484         if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
485                 set_dabr(new->thread.dabr, new->thread.dabrx);
486 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
487 #endif
488
489
490         new_thread = &new->thread;
491         old_thread = &current->thread;
492
493 #ifdef CONFIG_PPC64
494         /*
495          * Collect processor utilization data per process
496          */
497         if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
498                 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
499                 long unsigned start_tb, current_tb;
500                 start_tb = old_thread->start_tb;
501                 cu->current_tb = current_tb = mfspr(SPRN_PURR);
502                 old_thread->accum_tb += (current_tb - start_tb);
503                 new_thread->start_tb = current_tb;
504         }
505 #endif /* CONFIG_PPC64 */
506
507 #ifdef CONFIG_PPC_BOOK3S_64
508         batch = &__get_cpu_var(ppc64_tlb_batch);
509         if (batch->active) {
510                 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
511                 if (batch->index)
512                         __flush_tlb_pending(batch);
513                 batch->active = 0;
514         }
515 #endif /* CONFIG_PPC_BOOK3S_64 */
516
517         local_irq_save(flags);
518
519         /*
520          * We can't take a PMU exception inside _switch() since there is a
521          * window where the kernel stack SLB and the kernel stack are out
522          * of sync. Hard disable here.
523          */
524         hard_irq_disable();
525         last = _switch(old_thread, new_thread);
526
527 #ifdef CONFIG_PPC_BOOK3S_64
528         if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
529                 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
530                 batch = &__get_cpu_var(ppc64_tlb_batch);
531                 batch->active = 1;
532         }
533 #endif /* CONFIG_PPC_BOOK3S_64 */
534
535         local_irq_restore(flags);
536
537         return last;
538 }
539
540 static int instructions_to_print = 16;
541
542 static void show_instructions(struct pt_regs *regs)
543 {
544         int i;
545         unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
546                         sizeof(int));
547
548         printk("Instruction dump:");
549
550         for (i = 0; i < instructions_to_print; i++) {
551                 int instr;
552
553                 if (!(i % 8))
554                         printk("\n");
555
556 #if !defined(CONFIG_BOOKE)
557                 /* If executing with the IMMU off, adjust pc rather
558                  * than print XXXXXXXX.
559                  */
560                 if (!(regs->msr & MSR_IR))
561                         pc = (unsigned long)phys_to_virt(pc);
562 #endif
563
564                 /* We use __get_user here *only* to avoid an OOPS on a
565                  * bad address because the pc *should* only be a
566                  * kernel address.
567                  */
568                 if (!__kernel_text_address(pc) ||
569                      __get_user(instr, (unsigned int __user *)pc)) {
570                         printk(KERN_CONT "XXXXXXXX ");
571                 } else {
572                         if (regs->nip == pc)
573                                 printk(KERN_CONT "<%08x> ", instr);
574                         else
575                                 printk(KERN_CONT "%08x ", instr);
576                 }
577
578                 pc += sizeof(int);
579         }
580
581         printk("\n");
582 }
583
584 static struct regbit {
585         unsigned long bit;
586         const char *name;
587 } msr_bits[] = {
588 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
589         {MSR_SF,        "SF"},
590         {MSR_HV,        "HV"},
591 #endif
592         {MSR_VEC,       "VEC"},
593         {MSR_VSX,       "VSX"},
594 #ifdef CONFIG_BOOKE
595         {MSR_CE,        "CE"},
596 #endif
597         {MSR_EE,        "EE"},
598         {MSR_PR,        "PR"},
599         {MSR_FP,        "FP"},
600         {MSR_ME,        "ME"},
601 #ifdef CONFIG_BOOKE
602         {MSR_DE,        "DE"},
603 #else
604         {MSR_SE,        "SE"},
605         {MSR_BE,        "BE"},
606 #endif
607         {MSR_IR,        "IR"},
608         {MSR_DR,        "DR"},
609         {MSR_PMM,       "PMM"},
610 #ifndef CONFIG_BOOKE
611         {MSR_RI,        "RI"},
612         {MSR_LE,        "LE"},
613 #endif
614         {0,             NULL}
615 };
616
617 static void printbits(unsigned long val, struct regbit *bits)
618 {
619         const char *sep = "";
620
621         printk("<");
622         for (; bits->bit; ++bits)
623                 if (val & bits->bit) {
624                         printk("%s%s", sep, bits->name);
625                         sep = ",";
626                 }
627         printk(">");
628 }
629
630 #ifdef CONFIG_PPC64
631 #define REG             "%016lx"
632 #define REGS_PER_LINE   4
633 #define LAST_VOLATILE   13
634 #else
635 #define REG             "%08lx"
636 #define REGS_PER_LINE   8
637 #define LAST_VOLATILE   12
638 #endif
639
640 void show_regs(struct pt_regs * regs)
641 {
642         int i, trap;
643
644         printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
645                regs->nip, regs->link, regs->ctr);
646         printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
647                regs, regs->trap, print_tainted(), init_utsname()->release);
648         printk("MSR: "REG" ", regs->msr);
649         printbits(regs->msr, msr_bits);
650         printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
651 #ifdef CONFIG_PPC64
652         printk("SOFTE: %ld\n", regs->softe);
653 #endif
654         trap = TRAP(regs);
655         if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
656                 printk("CFAR: "REG"\n", regs->orig_gpr3);
657         if (trap == 0x300 || trap == 0x600)
658 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
659                 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
660 #else
661                 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
662 #endif
663         printk("TASK = %p[%d] '%s' THREAD: %p",
664                current, task_pid_nr(current), current->comm, task_thread_info(current));
665
666 #ifdef CONFIG_SMP
667         printk(" CPU: %d", raw_smp_processor_id());
668 #endif /* CONFIG_SMP */
669
670         for (i = 0;  i < 32;  i++) {
671                 if ((i % REGS_PER_LINE) == 0)
672                         printk("\nGPR%02d: ", i);
673                 printk(REG " ", regs->gpr[i]);
674                 if (i == LAST_VOLATILE && !FULL_REGS(regs))
675                         break;
676         }
677         printk("\n");
678 #ifdef CONFIG_KALLSYMS
679         /*
680          * Lookup NIP late so we have the best change of getting the
681          * above info out without failing
682          */
683         printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
684         printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
685 #endif
686         show_stack(current, (unsigned long *) regs->gpr[1]);
687         if (!user_mode(regs))
688                 show_instructions(regs);
689 }
690
691 void exit_thread(void)
692 {
693         discard_lazy_cpu_state();
694 }
695
696 void flush_thread(void)
697 {
698         discard_lazy_cpu_state();
699
700 #ifdef CONFIG_HAVE_HW_BREAKPOINT
701         flush_ptrace_hw_breakpoint(current);
702 #else /* CONFIG_HAVE_HW_BREAKPOINT */
703         set_debug_reg_defaults(&current->thread);
704 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
705 }
706
707 void
708 release_thread(struct task_struct *t)
709 {
710 }
711
712 /*
713  * this gets called so that we can store coprocessor state into memory and
714  * copy the current task into the new thread.
715  */
716 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
717 {
718         flush_fp_to_thread(src);
719         flush_altivec_to_thread(src);
720         flush_vsx_to_thread(src);
721         flush_spe_to_thread(src);
722 #ifdef CONFIG_HAVE_HW_BREAKPOINT
723         flush_ptrace_hw_breakpoint(src);
724 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
725
726         *dst = *src;
727         return 0;
728 }
729
730 /*
731  * Copy a thread..
732  */
733 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
734
735 int copy_thread(unsigned long clone_flags, unsigned long usp,
736                 unsigned long unused, struct task_struct *p,
737                 struct pt_regs *regs)
738 {
739         struct pt_regs *childregs, *kregs;
740         extern void ret_from_fork(void);
741         unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
742
743         CHECK_FULL_REGS(regs);
744         /* Copy registers */
745         sp -= sizeof(struct pt_regs);
746         childregs = (struct pt_regs *) sp;
747         *childregs = *regs;
748         if ((childregs->msr & MSR_PR) == 0) {
749                 /* for kernel thread, set `current' and stackptr in new task */
750                 childregs->gpr[1] = sp + sizeof(struct pt_regs);
751 #ifdef CONFIG_PPC32
752                 childregs->gpr[2] = (unsigned long) p;
753 #else
754                 clear_tsk_thread_flag(p, TIF_32BIT);
755 #endif
756                 p->thread.regs = NULL;  /* no user register state */
757         } else {
758                 childregs->gpr[1] = usp;
759                 p->thread.regs = childregs;
760                 if (clone_flags & CLONE_SETTLS) {
761 #ifdef CONFIG_PPC64
762                         if (!is_32bit_task())
763                                 childregs->gpr[13] = childregs->gpr[6];
764                         else
765 #endif
766                                 childregs->gpr[2] = childregs->gpr[6];
767                 }
768         }
769         childregs->gpr[3] = 0;  /* Result from fork() */
770         sp -= STACK_FRAME_OVERHEAD;
771
772         /*
773          * The way this works is that at some point in the future
774          * some task will call _switch to switch to the new task.
775          * That will pop off the stack frame created below and start
776          * the new task running at ret_from_fork.  The new task will
777          * do some house keeping and then return from the fork or clone
778          * system call, using the stack frame created above.
779          */
780         sp -= sizeof(struct pt_regs);
781         kregs = (struct pt_regs *) sp;
782         sp -= STACK_FRAME_OVERHEAD;
783         p->thread.ksp = sp;
784         p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
785                                 _ALIGN_UP(sizeof(struct thread_info), 16);
786
787 #ifdef CONFIG_PPC_STD_MMU_64
788         if (mmu_has_feature(MMU_FTR_SLB)) {
789                 unsigned long sp_vsid;
790                 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
791
792                 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
793                         sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
794                                 << SLB_VSID_SHIFT_1T;
795                 else
796                         sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
797                                 << SLB_VSID_SHIFT;
798                 sp_vsid |= SLB_VSID_KERNEL | llp;
799                 p->thread.ksp_vsid = sp_vsid;
800         }
801 #endif /* CONFIG_PPC_STD_MMU_64 */
802 #ifdef CONFIG_PPC64 
803         if (cpu_has_feature(CPU_FTR_DSCR)) {
804                 p->thread.dscr_inherit = current->thread.dscr_inherit;
805                 p->thread.dscr = current->thread.dscr;
806         }
807 #endif
808
809         /*
810          * The PPC64 ABI makes use of a TOC to contain function 
811          * pointers.  The function (ret_from_except) is actually a pointer
812          * to the TOC entry.  The first entry is a pointer to the actual
813          * function.
814          */
815 #ifdef CONFIG_PPC64
816         kregs->nip = *((unsigned long *)ret_from_fork);
817 #else
818         kregs->nip = (unsigned long)ret_from_fork;
819 #endif
820
821         return 0;
822 }
823
824 /*
825  * Set up a thread for executing a new program
826  */
827 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
828 {
829 #ifdef CONFIG_PPC64
830         unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
831 #endif
832
833         /*
834          * If we exec out of a kernel thread then thread.regs will not be
835          * set.  Do it now.
836          */
837         if (!current->thread.regs) {
838                 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
839                 current->thread.regs = regs - 1;
840         }
841
842         memset(regs->gpr, 0, sizeof(regs->gpr));
843         regs->ctr = 0;
844         regs->link = 0;
845         regs->xer = 0;
846         regs->ccr = 0;
847         regs->gpr[1] = sp;
848
849         /*
850          * We have just cleared all the nonvolatile GPRs, so make
851          * FULL_REGS(regs) return true.  This is necessary to allow
852          * ptrace to examine the thread immediately after exec.
853          */
854         regs->trap &= ~1UL;
855
856 #ifdef CONFIG_PPC32
857         regs->mq = 0;
858         regs->nip = start;
859         regs->msr = MSR_USER;
860 #else
861         if (!is_32bit_task()) {
862                 unsigned long entry, toc;
863
864                 /* start is a relocated pointer to the function descriptor for
865                  * the elf _start routine.  The first entry in the function
866                  * descriptor is the entry address of _start and the second
867                  * entry is the TOC value we need to use.
868                  */
869                 __get_user(entry, (unsigned long __user *)start);
870                 __get_user(toc, (unsigned long __user *)start+1);
871
872                 /* Check whether the e_entry function descriptor entries
873                  * need to be relocated before we can use them.
874                  */
875                 if (load_addr != 0) {
876                         entry += load_addr;
877                         toc   += load_addr;
878                 }
879                 regs->nip = entry;
880                 regs->gpr[2] = toc;
881                 regs->msr = MSR_USER64;
882         } else {
883                 regs->nip = start;
884                 regs->gpr[2] = 0;
885                 regs->msr = MSR_USER32;
886         }
887 #endif
888
889         discard_lazy_cpu_state();
890 #ifdef CONFIG_VSX
891         current->thread.used_vsr = 0;
892 #endif
893         memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
894         current->thread.fpscr.val = 0;
895 #ifdef CONFIG_ALTIVEC
896         memset(current->thread.vr, 0, sizeof(current->thread.vr));
897         memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
898         current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
899         current->thread.vrsave = 0;
900         current->thread.used_vr = 0;
901 #endif /* CONFIG_ALTIVEC */
902 #ifdef CONFIG_SPE
903         memset(current->thread.evr, 0, sizeof(current->thread.evr));
904         current->thread.acc = 0;
905         current->thread.spefscr = 0;
906         current->thread.used_spe = 0;
907 #endif /* CONFIG_SPE */
908 }
909
910 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
911                 | PR_FP_EXC_RES | PR_FP_EXC_INV)
912
913 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
914 {
915         struct pt_regs *regs = tsk->thread.regs;
916
917         /* This is a bit hairy.  If we are an SPE enabled  processor
918          * (have embedded fp) we store the IEEE exception enable flags in
919          * fpexc_mode.  fpexc_mode is also used for setting FP exception
920          * mode (asyn, precise, disabled) for 'Classic' FP. */
921         if (val & PR_FP_EXC_SW_ENABLE) {
922 #ifdef CONFIG_SPE
923                 if (cpu_has_feature(CPU_FTR_SPE)) {
924                         tsk->thread.fpexc_mode = val &
925                                 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
926                         return 0;
927                 } else {
928                         return -EINVAL;
929                 }
930 #else
931                 return -EINVAL;
932 #endif
933         }
934
935         /* on a CONFIG_SPE this does not hurt us.  The bits that
936          * __pack_fe01 use do not overlap with bits used for
937          * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
938          * on CONFIG_SPE implementations are reserved so writing to
939          * them does not change anything */
940         if (val > PR_FP_EXC_PRECISE)
941                 return -EINVAL;
942         tsk->thread.fpexc_mode = __pack_fe01(val);
943         if (regs != NULL && (regs->msr & MSR_FP) != 0)
944                 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
945                         | tsk->thread.fpexc_mode;
946         return 0;
947 }
948
949 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
950 {
951         unsigned int val;
952
953         if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
954 #ifdef CONFIG_SPE
955                 if (cpu_has_feature(CPU_FTR_SPE))
956                         val = tsk->thread.fpexc_mode;
957                 else
958                         return -EINVAL;
959 #else
960                 return -EINVAL;
961 #endif
962         else
963                 val = __unpack_fe01(tsk->thread.fpexc_mode);
964         return put_user(val, (unsigned int __user *) adr);
965 }
966
967 int set_endian(struct task_struct *tsk, unsigned int val)
968 {
969         struct pt_regs *regs = tsk->thread.regs;
970
971         if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
972             (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
973                 return -EINVAL;
974
975         if (regs == NULL)
976                 return -EINVAL;
977
978         if (val == PR_ENDIAN_BIG)
979                 regs->msr &= ~MSR_LE;
980         else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
981                 regs->msr |= MSR_LE;
982         else
983                 return -EINVAL;
984
985         return 0;
986 }
987
988 int get_endian(struct task_struct *tsk, unsigned long adr)
989 {
990         struct pt_regs *regs = tsk->thread.regs;
991         unsigned int val;
992
993         if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
994             !cpu_has_feature(CPU_FTR_REAL_LE))
995                 return -EINVAL;
996
997         if (regs == NULL)
998                 return -EINVAL;
999
1000         if (regs->msr & MSR_LE) {
1001                 if (cpu_has_feature(CPU_FTR_REAL_LE))
1002                         val = PR_ENDIAN_LITTLE;
1003                 else
1004                         val = PR_ENDIAN_PPC_LITTLE;
1005         } else
1006                 val = PR_ENDIAN_BIG;
1007
1008         return put_user(val, (unsigned int __user *)adr);
1009 }
1010
1011 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1012 {
1013         tsk->thread.align_ctl = val;
1014         return 0;
1015 }
1016
1017 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1018 {
1019         return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1020 }
1021
1022 #define TRUNC_PTR(x)    ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
1023
1024 int sys_clone(unsigned long clone_flags, unsigned long usp,
1025               int __user *parent_tidp, void __user *child_threadptr,
1026               int __user *child_tidp, int p6,
1027               struct pt_regs *regs)
1028 {
1029         CHECK_FULL_REGS(regs);
1030         if (usp == 0)
1031                 usp = regs->gpr[1];     /* stack pointer for child */
1032 #ifdef CONFIG_PPC64
1033         if (is_32bit_task()) {
1034                 parent_tidp = TRUNC_PTR(parent_tidp);
1035                 child_tidp = TRUNC_PTR(child_tidp);
1036         }
1037 #endif
1038         return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
1039 }
1040
1041 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
1042              unsigned long p4, unsigned long p5, unsigned long p6,
1043              struct pt_regs *regs)
1044 {
1045         CHECK_FULL_REGS(regs);
1046         return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
1047 }
1048
1049 int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
1050               unsigned long p4, unsigned long p5, unsigned long p6,
1051               struct pt_regs *regs)
1052 {
1053         CHECK_FULL_REGS(regs);
1054         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
1055                         regs, 0, NULL, NULL);
1056 }
1057
1058 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
1059                unsigned long a3, unsigned long a4, unsigned long a5,
1060                struct pt_regs *regs)
1061 {
1062         int error;
1063         char *filename;
1064
1065         filename = getname((const char __user *) a0);
1066         error = PTR_ERR(filename);
1067         if (IS_ERR(filename))
1068                 goto out;
1069         flush_fp_to_thread(current);
1070         flush_altivec_to_thread(current);
1071         flush_spe_to_thread(current);
1072         error = do_execve(filename,
1073                           (const char __user *const __user *) a1,
1074                           (const char __user *const __user *) a2, regs);
1075         putname(filename);
1076 out:
1077         return error;
1078 }
1079
1080 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1081                                   unsigned long nbytes)
1082 {
1083         unsigned long stack_page;
1084         unsigned long cpu = task_cpu(p);
1085
1086         /*
1087          * Avoid crashing if the stack has overflowed and corrupted
1088          * task_cpu(p), which is in the thread_info struct.
1089          */
1090         if (cpu < NR_CPUS && cpu_possible(cpu)) {
1091                 stack_page = (unsigned long) hardirq_ctx[cpu];
1092                 if (sp >= stack_page + sizeof(struct thread_struct)
1093                     && sp <= stack_page + THREAD_SIZE - nbytes)
1094                         return 1;
1095
1096                 stack_page = (unsigned long) softirq_ctx[cpu];
1097                 if (sp >= stack_page + sizeof(struct thread_struct)
1098                     && sp <= stack_page + THREAD_SIZE - nbytes)
1099                         return 1;
1100         }
1101         return 0;
1102 }
1103
1104 int validate_sp(unsigned long sp, struct task_struct *p,
1105                        unsigned long nbytes)
1106 {
1107         unsigned long stack_page = (unsigned long)task_stack_page(p);
1108
1109         if (sp >= stack_page + sizeof(struct thread_struct)
1110             && sp <= stack_page + THREAD_SIZE - nbytes)
1111                 return 1;
1112
1113         return valid_irq_stack(sp, p, nbytes);
1114 }
1115
1116 EXPORT_SYMBOL(validate_sp);
1117
1118 unsigned long get_wchan(struct task_struct *p)
1119 {
1120         unsigned long ip, sp;
1121         int count = 0;
1122
1123         if (!p || p == current || p->state == TASK_RUNNING)
1124                 return 0;
1125
1126         sp = p->thread.ksp;
1127         if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1128                 return 0;
1129
1130         do {
1131                 sp = *(unsigned long *)sp;
1132                 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1133                         return 0;
1134                 if (count > 0) {
1135                         ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1136                         if (!in_sched_functions(ip))
1137                                 return ip;
1138                 }
1139         } while (count++ < 16);
1140         return 0;
1141 }
1142
1143 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1144
1145 void show_stack(struct task_struct *tsk, unsigned long *stack)
1146 {
1147         unsigned long sp, ip, lr, newsp;
1148         int count = 0;
1149         int firstframe = 1;
1150 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1151         int curr_frame = current->curr_ret_stack;
1152         extern void return_to_handler(void);
1153         unsigned long rth = (unsigned long)return_to_handler;
1154         unsigned long mrth = -1;
1155 #ifdef CONFIG_PPC64
1156         extern void mod_return_to_handler(void);
1157         rth = *(unsigned long *)rth;
1158         mrth = (unsigned long)mod_return_to_handler;
1159         mrth = *(unsigned long *)mrth;
1160 #endif
1161 #endif
1162
1163         sp = (unsigned long) stack;
1164         if (tsk == NULL)
1165                 tsk = current;
1166         if (sp == 0) {
1167                 if (tsk == current)
1168                         asm("mr %0,1" : "=r" (sp));
1169                 else
1170                         sp = tsk->thread.ksp;
1171         }
1172
1173         lr = 0;
1174         printk("Call Trace:\n");
1175         do {
1176                 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1177                         return;
1178
1179                 stack = (unsigned long *) sp;
1180                 newsp = stack[0];
1181                 ip = stack[STACK_FRAME_LR_SAVE];
1182                 if (!firstframe || ip != lr) {
1183                         printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1184 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1185                         if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1186                                 printk(" (%pS)",
1187                                        (void *)current->ret_stack[curr_frame].ret);
1188                                 curr_frame--;
1189                         }
1190 #endif
1191                         if (firstframe)
1192                                 printk(" (unreliable)");
1193                         printk("\n");
1194                 }
1195                 firstframe = 0;
1196
1197                 /*
1198                  * See if this is an exception frame.
1199                  * We look for the "regshere" marker in the current frame.
1200                  */
1201                 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1202                     && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1203                         struct pt_regs *regs = (struct pt_regs *)
1204                                 (sp + STACK_FRAME_OVERHEAD);
1205                         lr = regs->link;
1206                         printk("--- Exception: %lx at %pS\n    LR = %pS\n",
1207                                regs->trap, (void *)regs->nip, (void *)lr);
1208                         firstframe = 1;
1209                 }
1210
1211                 sp = newsp;
1212         } while (count++ < kstack_depth_to_print);
1213 }
1214
1215 void dump_stack(void)
1216 {
1217         show_stack(current, NULL);
1218 }
1219 EXPORT_SYMBOL(dump_stack);
1220
1221 #ifdef CONFIG_PPC64
1222 /* Called with hard IRQs off */
1223 void __ppc64_runlatch_on(void)
1224 {
1225         struct thread_info *ti = current_thread_info();
1226         unsigned long ctrl;
1227
1228         ctrl = mfspr(SPRN_CTRLF);
1229         ctrl |= CTRL_RUNLATCH;
1230         mtspr(SPRN_CTRLT, ctrl);
1231
1232         ti->local_flags |= _TLF_RUNLATCH;
1233 }
1234
1235 /* Called with hard IRQs off */
1236 void __ppc64_runlatch_off(void)
1237 {
1238         struct thread_info *ti = current_thread_info();
1239         unsigned long ctrl;
1240
1241         ti->local_flags &= ~_TLF_RUNLATCH;
1242
1243         ctrl = mfspr(SPRN_CTRLF);
1244         ctrl &= ~CTRL_RUNLATCH;
1245         mtspr(SPRN_CTRLT, ctrl);
1246 }
1247 #endif /* CONFIG_PPC64 */
1248
1249 unsigned long arch_align_stack(unsigned long sp)
1250 {
1251         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1252                 sp -= get_random_int() & ~PAGE_MASK;
1253         return sp & ~0xf;
1254 }
1255
1256 static inline unsigned long brk_rnd(void)
1257 {
1258         unsigned long rnd = 0;
1259
1260         /* 8MB for 32bit, 1GB for 64bit */
1261         if (is_32bit_task())
1262                 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1263         else
1264                 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1265
1266         return rnd << PAGE_SHIFT;
1267 }
1268
1269 unsigned long arch_randomize_brk(struct mm_struct *mm)
1270 {
1271         unsigned long base = mm->brk;
1272         unsigned long ret;
1273
1274 #ifdef CONFIG_PPC_STD_MMU_64
1275         /*
1276          * If we are using 1TB segments and we are allowed to randomise
1277          * the heap, we can put it above 1TB so it is backed by a 1TB
1278          * segment. Otherwise the heap will be in the bottom 1TB
1279          * which always uses 256MB segments and this may result in a
1280          * performance penalty.
1281          */
1282         if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1283                 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1284 #endif
1285
1286         ret = PAGE_ALIGN(base + brk_rnd());
1287
1288         if (ret < mm->brk)
1289                 return mm->brk;
1290
1291         return ret;
1292 }
1293
1294 unsigned long randomize_et_dyn(unsigned long base)
1295 {
1296         unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1297
1298         if (ret < base)
1299                 return base;
1300
1301         return ret;
1302 }