1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
9 * I like traps on v9, :))))
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for jiffies */
15 #include <linux/kernel.h>
16 #include <linux/kallsyms.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
21 #include <linux/init.h>
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/sfafsr.h>
37 #include <asm/psrcompat.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
40 #include <asm/kdebug.h>
42 #include <linux/kmod.h>
45 struct notifier_block *sparc64die_chain;
46 static DEFINE_SPINLOCK(die_notifier_lock);
48 int register_die_notifier(struct notifier_block *nb)
52 spin_lock_irqsave(&die_notifier_lock, flags);
53 err = notifier_chain_register(&sparc64die_chain, nb);
54 spin_unlock_irqrestore(&die_notifier_lock, flags);
58 /* When an irrecoverable trap occurs at tl > 0, the trap entry
59 * code logs the trap state registers at every level in the trap
60 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
73 static void dump_tl1_traplog(struct tl1_traplog *p)
77 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
79 for (i = 0; i < 4; i++) {
81 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
82 "TNPC[%016lx] TT[%lx]\n",
84 p->trapstack[i].tstate, p->trapstack[i].tpc,
85 p->trapstack[i].tnpc, p->trapstack[i].tt);
89 void do_call_debug(struct pt_regs *regs)
91 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
94 void bad_trap(struct pt_regs *regs, long lvl)
99 if (notify_die(DIE_TRAP, "bad trap", regs,
100 0, lvl, SIGTRAP) == NOTIFY_STOP)
104 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
105 die_if_kernel(buffer, regs);
109 if (regs->tstate & TSTATE_PRIV) {
110 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
111 die_if_kernel(buffer, regs);
113 if (test_thread_flag(TIF_32BIT)) {
114 regs->tpc &= 0xffffffff;
115 regs->tnpc &= 0xffffffff;
117 info.si_signo = SIGILL;
119 info.si_code = ILL_ILLTRP;
120 info.si_addr = (void __user *)regs->tpc;
121 info.si_trapno = lvl;
122 force_sig_info(SIGILL, &info, current);
125 void bad_trap_tl1(struct pt_regs *regs, long lvl)
129 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
130 0, lvl, SIGTRAP) == NOTIFY_STOP)
133 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
135 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
136 die_if_kernel (buffer, regs);
139 #ifdef CONFIG_DEBUG_BUGVERBOSE
140 void do_BUG(const char *file, int line)
143 printk("kernel BUG at %s:%d!\n", file, line);
147 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
151 if (notify_die(DIE_TRAP, "instruction access exception", regs,
152 0, 0x8, SIGTRAP) == NOTIFY_STOP)
155 if (regs->tstate & TSTATE_PRIV) {
156 printk("spitfire_insn_access_exception: SFSR[%016lx] "
157 "SFAR[%016lx], going.\n", sfsr, sfar);
158 die_if_kernel("Iax", regs);
160 if (test_thread_flag(TIF_32BIT)) {
161 regs->tpc &= 0xffffffff;
162 regs->tnpc &= 0xffffffff;
164 info.si_signo = SIGSEGV;
166 info.si_code = SEGV_MAPERR;
167 info.si_addr = (void __user *)regs->tpc;
169 force_sig_info(SIGSEGV, &info, current);
172 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
174 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
175 0, 0x8, SIGTRAP) == NOTIFY_STOP)
178 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
179 spitfire_insn_access_exception(regs, sfsr, sfar);
182 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
186 if (notify_die(DIE_TRAP, "data access exception", regs,
187 0, 0x30, SIGTRAP) == NOTIFY_STOP)
190 if (regs->tstate & TSTATE_PRIV) {
191 /* Test if this comes from uaccess places. */
193 unsigned long g2 = regs->u_regs[UREG_G2];
195 if ((fixup = search_extables_range(regs->tpc, &g2))) {
196 /* Ouch, somebody is trying ugly VM hole tricks on us... */
197 #ifdef DEBUG_EXCEPTIONS
198 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
199 printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
200 "g2<%016lx>\n", regs->tpc, fixup, g2);
203 regs->tnpc = regs->tpc + 4;
204 regs->u_regs[UREG_G2] = g2;
208 printk("spitfire_data_access_exception: SFSR[%016lx] "
209 "SFAR[%016lx], going.\n", sfsr, sfar);
210 die_if_kernel("Dax", regs);
213 info.si_signo = SIGSEGV;
215 info.si_code = SEGV_MAPERR;
216 info.si_addr = (void __user *)sfar;
218 force_sig_info(SIGSEGV, &info, current);
221 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
223 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
224 0, 0x30, SIGTRAP) == NOTIFY_STOP)
227 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
228 spitfire_data_access_exception(regs, sfsr, sfar);
232 /* This is really pathetic... */
233 extern volatile int pci_poke_in_progress;
234 extern volatile int pci_poke_cpu;
235 extern volatile int pci_poke_faulted;
238 /* When access exceptions happen, we must do this. */
239 static void spitfire_clean_and_reenable_l1_caches(void)
243 if (tlb_type != spitfire)
247 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
248 spitfire_put_icache_tag(va, 0x0);
249 spitfire_put_dcache_tag(va, 0x0);
252 /* Re-enable in LSU. */
253 __asm__ __volatile__("flush %%g6\n\t"
255 "stxa %0, [%%g0] %1\n\t"
258 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
259 LSU_CONTROL_IM | LSU_CONTROL_DM),
260 "i" (ASI_LSU_CONTROL)
264 static void spitfire_enable_estate_errors(void)
266 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
269 : "r" (ESTATE_ERR_ALL),
270 "i" (ASI_ESTATE_ERROR_EN));
273 static char ecc_syndrome_table[] = {
274 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
275 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
276 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
277 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
278 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
279 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
280 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
281 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
282 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
283 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
284 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
285 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
286 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
287 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
288 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
289 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
290 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
291 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
292 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
293 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
294 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
295 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
296 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
297 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
298 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
299 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
300 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
301 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
302 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
303 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
304 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
305 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
308 static char *syndrome_unknown = "<Unknown>";
310 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
312 unsigned short scode;
313 char memmod_str[64], *p;
316 scode = ecc_syndrome_table[udbl & 0xff];
317 if (prom_getunumber(scode, afar,
318 memmod_str, sizeof(memmod_str)) == -1)
319 p = syndrome_unknown;
322 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
323 "Memory Module \"%s\"\n",
324 smp_processor_id(), scode, p);
328 scode = ecc_syndrome_table[udbh & 0xff];
329 if (prom_getunumber(scode, afar,
330 memmod_str, sizeof(memmod_str)) == -1)
331 p = syndrome_unknown;
334 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
335 "Memory Module \"%s\"\n",
336 smp_processor_id(), scode, p);
341 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
344 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
345 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
346 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
348 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
350 /* We always log it, even if someone is listening for this
353 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
354 0, TRAP_TYPE_CEE, SIGTRAP);
356 /* The Correctable ECC Error trap does not disable I/D caches. So
357 * we only have to restore the ESTATE Error Enable register.
359 spitfire_enable_estate_errors();
362 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
366 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
367 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
368 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
370 /* XXX add more human friendly logging of the error status
371 * XXX as is implemented for cheetah
374 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
376 /* We always log it, even if someone is listening for this
379 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
382 if (regs->tstate & TSTATE_PRIV) {
384 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
385 die_if_kernel("UE", regs);
388 /* XXX need more intelligent processing here, such as is implemented
389 * XXX for cheetah errors, in fact if the E-cache still holds the
390 * XXX line with bad parity this will loop
393 spitfire_clean_and_reenable_l1_caches();
394 spitfire_enable_estate_errors();
396 if (test_thread_flag(TIF_32BIT)) {
397 regs->tpc &= 0xffffffff;
398 regs->tnpc &= 0xffffffff;
400 info.si_signo = SIGBUS;
402 info.si_code = BUS_OBJERR;
403 info.si_addr = (void *)0;
405 force_sig_info(SIGBUS, &info, current);
408 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
410 unsigned long afsr, tt, udbh, udbl;
413 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
414 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
415 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
416 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
417 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
420 if (tt == TRAP_TYPE_DAE &&
421 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
422 spitfire_clean_and_reenable_l1_caches();
423 spitfire_enable_estate_errors();
425 pci_poke_faulted = 1;
426 regs->tnpc = regs->tpc + 4;
431 if (afsr & SFAFSR_UE)
432 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
434 if (tt == TRAP_TYPE_CEE) {
435 /* Handle the case where we took a CEE trap, but ACK'd
436 * only the UE state in the UDB error registers.
438 if (afsr & SFAFSR_UE) {
439 if (udbh & UDBE_CE) {
440 __asm__ __volatile__(
441 "stxa %0, [%1] %2\n\t"
444 : "r" (udbh & UDBE_CE),
445 "r" (0x0), "i" (ASI_UDB_ERROR_W));
447 if (udbl & UDBE_CE) {
448 __asm__ __volatile__(
449 "stxa %0, [%1] %2\n\t"
452 : "r" (udbl & UDBE_CE),
453 "r" (0x18), "i" (ASI_UDB_ERROR_W));
457 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
461 int cheetah_pcache_forced_on;
463 void cheetah_enable_pcache(void)
467 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
470 __asm__ __volatile__("ldxa [%%g0] %1, %0"
472 : "i" (ASI_DCU_CONTROL_REG));
473 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
474 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
477 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
480 /* Cheetah error trap handling. */
481 static unsigned long ecache_flush_physbase;
482 static unsigned long ecache_flush_linesize;
483 static unsigned long ecache_flush_size;
485 /* WARNING: The error trap handlers in assembly know the precise
486 * layout of the following structure.
488 * C-level handlers below use this information to log the error
489 * and then determine how to recover (if possible).
491 struct cheetah_err_info {
496 /*0x10*/u64 dcache_data[4]; /* The actual data */
497 /*0x30*/u64 dcache_index; /* D-cache index */
498 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
499 /*0x40*/u64 dcache_utag; /* D-cache microtag */
500 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
503 /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
504 /*0x90*/u64 icache_index; /* I-cache index */
505 /*0x98*/u64 icache_tag; /* I-cache phys tag */
506 /*0xa0*/u64 icache_utag; /* I-cache microtag */
507 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
508 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
509 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
512 /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
513 /*0xe0*/u64 ecache_index; /* E-cache index */
514 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
516 /*0xf0*/u64 __pad[32 - 30];
518 #define CHAFSR_INVALID ((u64)-1L)
520 /* This table is ordered in priority of errors and matches the
521 * AFAR overwrite policy as well.
524 struct afsr_error_table {
529 static const char CHAFSR_PERR_msg[] =
530 "System interface protocol error";
531 static const char CHAFSR_IERR_msg[] =
532 "Internal processor error";
533 static const char CHAFSR_ISAP_msg[] =
534 "System request parity error on incoming addresss";
535 static const char CHAFSR_UCU_msg[] =
536 "Uncorrectable E-cache ECC error for ifetch/data";
537 static const char CHAFSR_UCC_msg[] =
538 "SW Correctable E-cache ECC error for ifetch/data";
539 static const char CHAFSR_UE_msg[] =
540 "Uncorrectable system bus data ECC error for read";
541 static const char CHAFSR_EDU_msg[] =
542 "Uncorrectable E-cache ECC error for stmerge/blkld";
543 static const char CHAFSR_EMU_msg[] =
544 "Uncorrectable system bus MTAG error";
545 static const char CHAFSR_WDU_msg[] =
546 "Uncorrectable E-cache ECC error for writeback";
547 static const char CHAFSR_CPU_msg[] =
548 "Uncorrectable ECC error for copyout";
549 static const char CHAFSR_CE_msg[] =
550 "HW corrected system bus data ECC error for read";
551 static const char CHAFSR_EDC_msg[] =
552 "HW corrected E-cache ECC error for stmerge/blkld";
553 static const char CHAFSR_EMC_msg[] =
554 "HW corrected system bus MTAG ECC error";
555 static const char CHAFSR_WDC_msg[] =
556 "HW corrected E-cache ECC error for writeback";
557 static const char CHAFSR_CPC_msg[] =
558 "HW corrected ECC error for copyout";
559 static const char CHAFSR_TO_msg[] =
560 "Unmapped error from system bus";
561 static const char CHAFSR_BERR_msg[] =
562 "Bus error response from system bus";
563 static const char CHAFSR_IVC_msg[] =
564 "HW corrected system bus data ECC error for ivec read";
565 static const char CHAFSR_IVU_msg[] =
566 "Uncorrectable system bus data ECC error for ivec read";
567 static struct afsr_error_table __cheetah_error_table[] = {
568 { CHAFSR_PERR, CHAFSR_PERR_msg },
569 { CHAFSR_IERR, CHAFSR_IERR_msg },
570 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
571 { CHAFSR_UCU, CHAFSR_UCU_msg },
572 { CHAFSR_UCC, CHAFSR_UCC_msg },
573 { CHAFSR_UE, CHAFSR_UE_msg },
574 { CHAFSR_EDU, CHAFSR_EDU_msg },
575 { CHAFSR_EMU, CHAFSR_EMU_msg },
576 { CHAFSR_WDU, CHAFSR_WDU_msg },
577 { CHAFSR_CPU, CHAFSR_CPU_msg },
578 { CHAFSR_CE, CHAFSR_CE_msg },
579 { CHAFSR_EDC, CHAFSR_EDC_msg },
580 { CHAFSR_EMC, CHAFSR_EMC_msg },
581 { CHAFSR_WDC, CHAFSR_WDC_msg },
582 { CHAFSR_CPC, CHAFSR_CPC_msg },
583 { CHAFSR_TO, CHAFSR_TO_msg },
584 { CHAFSR_BERR, CHAFSR_BERR_msg },
585 /* These two do not update the AFAR. */
586 { CHAFSR_IVC, CHAFSR_IVC_msg },
587 { CHAFSR_IVU, CHAFSR_IVU_msg },
590 static const char CHPAFSR_DTO_msg[] =
591 "System bus unmapped error for prefetch/storequeue-read";
592 static const char CHPAFSR_DBERR_msg[] =
593 "System bus error for prefetch/storequeue-read";
594 static const char CHPAFSR_THCE_msg[] =
595 "Hardware corrected E-cache Tag ECC error";
596 static const char CHPAFSR_TSCE_msg[] =
597 "SW handled correctable E-cache Tag ECC error";
598 static const char CHPAFSR_TUE_msg[] =
599 "Uncorrectable E-cache Tag ECC error";
600 static const char CHPAFSR_DUE_msg[] =
601 "System bus uncorrectable data ECC error due to prefetch/store-fill";
602 static struct afsr_error_table __cheetah_plus_error_table[] = {
603 { CHAFSR_PERR, CHAFSR_PERR_msg },
604 { CHAFSR_IERR, CHAFSR_IERR_msg },
605 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
606 { CHAFSR_UCU, CHAFSR_UCU_msg },
607 { CHAFSR_UCC, CHAFSR_UCC_msg },
608 { CHAFSR_UE, CHAFSR_UE_msg },
609 { CHAFSR_EDU, CHAFSR_EDU_msg },
610 { CHAFSR_EMU, CHAFSR_EMU_msg },
611 { CHAFSR_WDU, CHAFSR_WDU_msg },
612 { CHAFSR_CPU, CHAFSR_CPU_msg },
613 { CHAFSR_CE, CHAFSR_CE_msg },
614 { CHAFSR_EDC, CHAFSR_EDC_msg },
615 { CHAFSR_EMC, CHAFSR_EMC_msg },
616 { CHAFSR_WDC, CHAFSR_WDC_msg },
617 { CHAFSR_CPC, CHAFSR_CPC_msg },
618 { CHAFSR_TO, CHAFSR_TO_msg },
619 { CHAFSR_BERR, CHAFSR_BERR_msg },
620 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
621 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
622 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
623 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
624 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
625 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
626 /* These two do not update the AFAR. */
627 { CHAFSR_IVC, CHAFSR_IVC_msg },
628 { CHAFSR_IVU, CHAFSR_IVU_msg },
631 static const char JPAFSR_JETO_msg[] =
632 "System interface protocol error, hw timeout caused";
633 static const char JPAFSR_SCE_msg[] =
634 "Parity error on system snoop results";
635 static const char JPAFSR_JEIC_msg[] =
636 "System interface protocol error, illegal command detected";
637 static const char JPAFSR_JEIT_msg[] =
638 "System interface protocol error, illegal ADTYPE detected";
639 static const char JPAFSR_OM_msg[] =
640 "Out of range memory error has occurred";
641 static const char JPAFSR_ETP_msg[] =
642 "Parity error on L2 cache tag SRAM";
643 static const char JPAFSR_UMS_msg[] =
644 "Error due to unsupported store";
645 static const char JPAFSR_RUE_msg[] =
646 "Uncorrectable ECC error from remote cache/memory";
647 static const char JPAFSR_RCE_msg[] =
648 "Correctable ECC error from remote cache/memory";
649 static const char JPAFSR_BP_msg[] =
650 "JBUS parity error on returned read data";
651 static const char JPAFSR_WBP_msg[] =
652 "JBUS parity error on data for writeback or block store";
653 static const char JPAFSR_FRC_msg[] =
654 "Foreign read to DRAM incurring correctable ECC error";
655 static const char JPAFSR_FRU_msg[] =
656 "Foreign read to DRAM incurring uncorrectable ECC error";
657 static struct afsr_error_table __jalapeno_error_table[] = {
658 { JPAFSR_JETO, JPAFSR_JETO_msg },
659 { JPAFSR_SCE, JPAFSR_SCE_msg },
660 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
661 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
662 { CHAFSR_PERR, CHAFSR_PERR_msg },
663 { CHAFSR_IERR, CHAFSR_IERR_msg },
664 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
665 { CHAFSR_UCU, CHAFSR_UCU_msg },
666 { CHAFSR_UCC, CHAFSR_UCC_msg },
667 { CHAFSR_UE, CHAFSR_UE_msg },
668 { CHAFSR_EDU, CHAFSR_EDU_msg },
669 { JPAFSR_OM, JPAFSR_OM_msg },
670 { CHAFSR_WDU, CHAFSR_WDU_msg },
671 { CHAFSR_CPU, CHAFSR_CPU_msg },
672 { CHAFSR_CE, CHAFSR_CE_msg },
673 { CHAFSR_EDC, CHAFSR_EDC_msg },
674 { JPAFSR_ETP, JPAFSR_ETP_msg },
675 { CHAFSR_WDC, CHAFSR_WDC_msg },
676 { CHAFSR_CPC, CHAFSR_CPC_msg },
677 { CHAFSR_TO, CHAFSR_TO_msg },
678 { CHAFSR_BERR, CHAFSR_BERR_msg },
679 { JPAFSR_UMS, JPAFSR_UMS_msg },
680 { JPAFSR_RUE, JPAFSR_RUE_msg },
681 { JPAFSR_RCE, JPAFSR_RCE_msg },
682 { JPAFSR_BP, JPAFSR_BP_msg },
683 { JPAFSR_WBP, JPAFSR_WBP_msg },
684 { JPAFSR_FRC, JPAFSR_FRC_msg },
685 { JPAFSR_FRU, JPAFSR_FRU_msg },
686 /* These two do not update the AFAR. */
687 { CHAFSR_IVU, CHAFSR_IVU_msg },
690 static struct afsr_error_table *cheetah_error_table;
691 static unsigned long cheetah_afsr_errors;
693 /* This is allocated at boot time based upon the largest hardware
694 * cpu ID in the system. We allocate two entries per cpu, one for
695 * TL==0 logging and one for TL >= 1 logging.
697 struct cheetah_err_info *cheetah_error_log;
699 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
701 struct cheetah_err_info *p;
702 int cpu = smp_processor_id();
704 if (!cheetah_error_log)
707 p = cheetah_error_log + (cpu * 2);
708 if ((afsr & CHAFSR_TL1) != 0UL)
714 extern unsigned int tl0_icpe[], tl1_icpe[];
715 extern unsigned int tl0_dcpe[], tl1_dcpe[];
716 extern unsigned int tl0_fecc[], tl1_fecc[];
717 extern unsigned int tl0_cee[], tl1_cee[];
718 extern unsigned int tl0_iae[], tl1_iae[];
719 extern unsigned int tl0_dae[], tl1_dae[];
720 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
721 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
722 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
723 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
724 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
726 void __init cheetah_ecache_flush_init(void)
728 unsigned long largest_size, smallest_linesize, order, ver;
729 int node, i, instance;
731 /* Scan all cpu device tree nodes, note two values:
732 * 1) largest E-cache size
733 * 2) smallest E-cache line size
736 smallest_linesize = ~0UL;
739 while (!cpu_find_by_instance(instance, &node, NULL)) {
742 val = prom_getintdefault(node, "ecache-size",
744 if (val > largest_size)
746 val = prom_getintdefault(node, "ecache-line-size", 64);
747 if (val < smallest_linesize)
748 smallest_linesize = val;
752 if (largest_size == 0UL || smallest_linesize == ~0UL) {
753 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
758 ecache_flush_size = (2 * largest_size);
759 ecache_flush_linesize = smallest_linesize;
761 /* Discover a physically contiguous chunk of physical
762 * memory in 'sp_banks' of size ecache_flush_size calculated
763 * above. Store the physical base of this area at
764 * ecache_flush_physbase.
766 for (node = 0; ; node++) {
767 if (sp_banks[node].num_bytes == 0)
769 if (sp_banks[node].num_bytes >= ecache_flush_size) {
770 ecache_flush_physbase = sp_banks[node].base_addr;
775 /* Note: Zero would be a valid value of ecache_flush_physbase so
776 * don't use that as the success test. :-)
778 if (sp_banks[node].num_bytes == 0) {
779 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
780 "contiguous physical memory.\n", ecache_flush_size);
784 /* Now allocate error trap reporting scoreboard. */
785 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
786 for (order = 0; order < MAX_ORDER; order++) {
787 if ((PAGE_SIZE << order) >= node)
790 cheetah_error_log = (struct cheetah_err_info *)
791 __get_free_pages(GFP_KERNEL, order);
792 if (!cheetah_error_log) {
793 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
794 "error logging scoreboard (%d bytes).\n", node);
797 memset(cheetah_error_log, 0, PAGE_SIZE << order);
799 /* Mark all AFSRs as invalid so that the trap handler will
800 * log new new information there.
802 for (i = 0; i < 2 * NR_CPUS; i++)
803 cheetah_error_log[i].afsr = CHAFSR_INVALID;
805 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
806 if ((ver >> 32) == 0x003e0016) {
807 cheetah_error_table = &__jalapeno_error_table[0];
808 cheetah_afsr_errors = JPAFSR_ERRORS;
809 } else if ((ver >> 32) == 0x003e0015) {
810 cheetah_error_table = &__cheetah_plus_error_table[0];
811 cheetah_afsr_errors = CHPAFSR_ERRORS;
813 cheetah_error_table = &__cheetah_error_table[0];
814 cheetah_afsr_errors = CHAFSR_ERRORS;
817 /* Now patch trap tables. */
818 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
819 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
820 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
821 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
822 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
823 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
824 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
825 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
826 if (tlb_type == cheetah_plus) {
827 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
828 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
829 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
830 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
835 static void cheetah_flush_ecache(void)
837 unsigned long flush_base = ecache_flush_physbase;
838 unsigned long flush_linesize = ecache_flush_linesize;
839 unsigned long flush_size = ecache_flush_size;
841 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
842 " bne,pt %%xcc, 1b\n\t"
843 " ldxa [%2 + %0] %3, %%g0\n\t"
845 : "0" (flush_size), "r" (flush_base),
846 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
849 static void cheetah_flush_ecache_line(unsigned long physaddr)
853 physaddr &= ~(8UL - 1UL);
854 physaddr = (ecache_flush_physbase +
855 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
856 alias = physaddr + (ecache_flush_size >> 1UL);
857 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
858 "ldxa [%1] %2, %%g0\n\t"
861 : "r" (physaddr), "r" (alias),
862 "i" (ASI_PHYS_USE_EC));
865 /* Unfortunately, the diagnostic access to the I-cache tags we need to
866 * use to clear the thing interferes with I-cache coherency transactions.
868 * So we must only flush the I-cache when it is disabled.
870 static void __cheetah_flush_icache(void)
872 unsigned int icache_size, icache_line_size;
875 icache_size = local_cpu_data().icache_size;
876 icache_line_size = local_cpu_data().icache_line_size;
878 /* Clear the valid bits in all the tags. */
879 for (addr = 0; addr < icache_size; addr += icache_line_size) {
880 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
883 : "r" (addr | (2 << 3)),
888 static void cheetah_flush_icache(void)
890 unsigned long dcu_save;
892 /* Save current DCU, disable I-cache. */
893 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
894 "or %0, %2, %%g1\n\t"
895 "stxa %%g1, [%%g0] %1\n\t"
898 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
901 __cheetah_flush_icache();
903 /* Restore DCU register */
904 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
907 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
910 static void cheetah_flush_dcache(void)
912 unsigned int dcache_size, dcache_line_size;
915 dcache_size = local_cpu_data().dcache_size;
916 dcache_line_size = local_cpu_data().dcache_line_size;
918 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
919 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
922 : "r" (addr), "i" (ASI_DCACHE_TAG));
926 /* In order to make the even parity correct we must do two things.
927 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
928 * Next, we clear out all 32-bytes of data for that line. Data of
929 * all-zero + tag parity value of zero == correct parity.
931 static void cheetah_plus_zap_dcache_parity(void)
933 unsigned int dcache_size, dcache_line_size;
936 dcache_size = local_cpu_data().dcache_size;
937 dcache_line_size = local_cpu_data().dcache_line_size;
939 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
940 unsigned long tag = (addr >> 14);
943 __asm__ __volatile__("membar #Sync\n\t"
944 "stxa %0, [%1] %2\n\t"
947 : "r" (tag), "r" (addr),
948 "i" (ASI_DCACHE_UTAG));
949 for (line = addr; line < addr + dcache_line_size; line += 8)
950 __asm__ __volatile__("membar #Sync\n\t"
951 "stxa %%g0, [%0] %1\n\t"
955 "i" (ASI_DCACHE_DATA));
959 /* Conversion tables used to frob Cheetah AFSR syndrome values into
960 * something palatable to the memory controller driver get_unumber
984 static unsigned char cheetah_ecc_syntab[] = {
985 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
986 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
987 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
988 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
989 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
990 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
991 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
992 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
993 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
994 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
995 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
996 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
997 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
998 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
999 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1000 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1001 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1002 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1003 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1004 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1005 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1006 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1007 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1008 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1009 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1010 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1011 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1012 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1013 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1014 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1015 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1016 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1018 static unsigned char cheetah_mtag_syntab[] = {
1029 /* Return the highest priority error conditon mentioned. */
1030 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1032 unsigned long tmp = 0;
1035 for (i = 0; cheetah_error_table[i].mask; i++) {
1036 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1042 static const char *cheetah_get_string(unsigned long bit)
1046 for (i = 0; cheetah_error_table[i].mask; i++) {
1047 if ((bit & cheetah_error_table[i].mask) != 0UL)
1048 return cheetah_error_table[i].name;
1053 extern int chmc_getunumber(int, unsigned long, char *, int);
1055 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1056 unsigned long afsr, unsigned long afar, int recoverable)
1058 unsigned long hipri;
1061 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1062 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1064 (afsr & CHAFSR_TL1) ? 1 : 0);
1065 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1066 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1067 regs->tpc, regs->tnpc, regs->tstate);
1068 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1069 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1070 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1071 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1072 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1073 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1074 hipri = cheetah_get_hipri(afsr);
1075 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1076 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1077 hipri, cheetah_get_string(hipri));
1079 /* Try to get unumber if relevant. */
1080 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1081 CHAFSR_CPC | CHAFSR_CPU | \
1082 CHAFSR_UE | CHAFSR_CE | \
1083 CHAFSR_EDC | CHAFSR_EDU | \
1084 CHAFSR_UCC | CHAFSR_UCU | \
1085 CHAFSR_WDU | CHAFSR_WDC)
1086 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1087 if (afsr & ESYND_ERRORS) {
1091 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1092 syndrome = cheetah_ecc_syntab[syndrome];
1093 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1095 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1096 (recoverable ? KERN_WARNING : KERN_CRIT),
1097 smp_processor_id(), unum);
1098 } else if (afsr & MSYND_ERRORS) {
1102 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1103 syndrome = cheetah_mtag_syntab[syndrome];
1104 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1106 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1107 (recoverable ? KERN_WARNING : KERN_CRIT),
1108 smp_processor_id(), unum);
1111 /* Now dump the cache snapshots. */
1112 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1113 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1114 (int) info->dcache_index,
1118 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1119 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1120 info->dcache_data[0],
1121 info->dcache_data[1],
1122 info->dcache_data[2],
1123 info->dcache_data[3]);
1124 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1125 "u[%016lx] l[%016lx]\n",
1126 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1127 (int) info->icache_index,
1132 info->icache_lower);
1133 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1134 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1135 info->icache_data[0],
1136 info->icache_data[1],
1137 info->icache_data[2],
1138 info->icache_data[3]);
1139 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1140 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1141 info->icache_data[4],
1142 info->icache_data[5],
1143 info->icache_data[6],
1144 info->icache_data[7]);
1145 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1146 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1147 (int) info->ecache_index, info->ecache_tag);
1148 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1149 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1150 info->ecache_data[0],
1151 info->ecache_data[1],
1152 info->ecache_data[2],
1153 info->ecache_data[3]);
1155 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1156 while (afsr != 0UL) {
1157 unsigned long bit = cheetah_get_hipri(afsr);
1159 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1160 (recoverable ? KERN_WARNING : KERN_CRIT),
1161 bit, cheetah_get_string(bit));
1167 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1170 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1172 unsigned long afsr, afar;
1175 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1178 if ((afsr & cheetah_afsr_errors) != 0) {
1180 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1188 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1190 : : "r" (afsr), "i" (ASI_AFSR));
1195 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1197 struct cheetah_err_info local_snapshot, *p;
1201 cheetah_flush_ecache();
1203 p = cheetah_get_error_log(afsr);
1205 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1207 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1208 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1212 /* Grab snapshot of logged error. */
1213 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1215 /* If the current trap snapshot does not match what the
1216 * trap handler passed along into our args, big trouble.
1217 * In such a case, mark the local copy as invalid.
1219 * Else, it matches and we mark the afsr in the non-local
1220 * copy as invalid so we may log new error traps there.
1222 if (p->afsr != afsr || p->afar != afar)
1223 local_snapshot.afsr = CHAFSR_INVALID;
1225 p->afsr = CHAFSR_INVALID;
1227 cheetah_flush_icache();
1228 cheetah_flush_dcache();
1230 /* Re-enable I-cache/D-cache */
1231 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1232 "or %%g1, %1, %%g1\n\t"
1233 "stxa %%g1, [%%g0] %0\n\t"
1236 : "i" (ASI_DCU_CONTROL_REG),
1237 "i" (DCU_DC | DCU_IC)
1240 /* Re-enable error reporting */
1241 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1242 "or %%g1, %1, %%g1\n\t"
1243 "stxa %%g1, [%%g0] %0\n\t"
1246 : "i" (ASI_ESTATE_ERROR_EN),
1247 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1250 /* Decide if we can continue after handling this trap and
1251 * logging the error.
1254 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1257 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1258 * error was logged while we had error reporting traps disabled.
1260 if (cheetah_recheck_errors(&local_snapshot)) {
1261 unsigned long new_afsr = local_snapshot.afsr;
1263 /* If we got a new asynchronous error, die... */
1264 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1265 CHAFSR_WDU | CHAFSR_CPU |
1266 CHAFSR_IVU | CHAFSR_UE |
1267 CHAFSR_BERR | CHAFSR_TO))
1272 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1275 panic("Irrecoverable Fast-ECC error trap.\n");
1277 /* Flush E-cache to kick the error trap handlers out. */
1278 cheetah_flush_ecache();
1281 /* Try to fix a correctable error by pushing the line out from
1282 * the E-cache. Recheck error reporting registers to see if the
1283 * problem is intermittent.
1285 static int cheetah_fix_ce(unsigned long physaddr)
1287 unsigned long orig_estate;
1288 unsigned long alias1, alias2;
1291 /* Make sure correctable error traps are disabled. */
1292 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1293 "andn %0, %1, %%g1\n\t"
1294 "stxa %%g1, [%%g0] %2\n\t"
1296 : "=&r" (orig_estate)
1297 : "i" (ESTATE_ERROR_CEEN),
1298 "i" (ASI_ESTATE_ERROR_EN)
1301 /* We calculate alias addresses that will force the
1302 * cache line in question out of the E-cache. Then
1303 * we bring it back in with an atomic instruction so
1304 * that we get it in some modified/exclusive state,
1305 * then we displace it again to try and get proper ECC
1306 * pushed back into the system.
1308 physaddr &= ~(8UL - 1UL);
1309 alias1 = (ecache_flush_physbase +
1310 (physaddr & ((ecache_flush_size >> 1) - 1)));
1311 alias2 = alias1 + (ecache_flush_size >> 1);
1312 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1313 "ldxa [%1] %3, %%g0\n\t"
1314 "casxa [%2] %3, %%g0, %%g0\n\t"
1315 "membar #StoreLoad | #StoreStore\n\t"
1316 "ldxa [%0] %3, %%g0\n\t"
1317 "ldxa [%1] %3, %%g0\n\t"
1320 : "r" (alias1), "r" (alias2),
1321 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1323 /* Did that trigger another error? */
1324 if (cheetah_recheck_errors(NULL)) {
1325 /* Try one more time. */
1326 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1328 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1329 if (cheetah_recheck_errors(NULL))
1334 /* No new error, intermittent problem. */
1338 /* Restore error enables. */
1339 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1341 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1346 /* Return non-zero if PADDR is a valid physical memory address. */
1347 static int cheetah_check_main_memory(unsigned long paddr)
1351 for (i = 0; ; i++) {
1352 if (sp_banks[i].num_bytes == 0)
1354 if (paddr >= sp_banks[i].base_addr &&
1355 paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1361 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1363 struct cheetah_err_info local_snapshot, *p;
1364 int recoverable, is_memory;
1366 p = cheetah_get_error_log(afsr);
1368 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1370 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1371 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1375 /* Grab snapshot of logged error. */
1376 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1378 /* If the current trap snapshot does not match what the
1379 * trap handler passed along into our args, big trouble.
1380 * In such a case, mark the local copy as invalid.
1382 * Else, it matches and we mark the afsr in the non-local
1383 * copy as invalid so we may log new error traps there.
1385 if (p->afsr != afsr || p->afar != afar)
1386 local_snapshot.afsr = CHAFSR_INVALID;
1388 p->afsr = CHAFSR_INVALID;
1390 is_memory = cheetah_check_main_memory(afar);
1392 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1393 /* XXX Might want to log the results of this operation
1394 * XXX somewhere... -DaveM
1396 cheetah_fix_ce(afar);
1400 int flush_all, flush_line;
1402 flush_all = flush_line = 0;
1403 if ((afsr & CHAFSR_EDC) != 0UL) {
1404 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1408 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1409 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1415 /* Trap handler only disabled I-cache, flush it. */
1416 cheetah_flush_icache();
1418 /* Re-enable I-cache */
1419 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1420 "or %%g1, %1, %%g1\n\t"
1421 "stxa %%g1, [%%g0] %0\n\t"
1424 : "i" (ASI_DCU_CONTROL_REG),
1429 cheetah_flush_ecache();
1430 else if (flush_line)
1431 cheetah_flush_ecache_line(afar);
1434 /* Re-enable error reporting */
1435 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1436 "or %%g1, %1, %%g1\n\t"
1437 "stxa %%g1, [%%g0] %0\n\t"
1440 : "i" (ASI_ESTATE_ERROR_EN),
1441 "i" (ESTATE_ERROR_CEEN)
1444 /* Decide if we can continue after handling this trap and
1445 * logging the error.
1448 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1451 /* Re-check AFSR/AFAR */
1452 (void) cheetah_recheck_errors(&local_snapshot);
1455 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1458 panic("Irrecoverable Correctable-ECC error trap.\n");
1461 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1463 struct cheetah_err_info local_snapshot, *p;
1464 int recoverable, is_memory;
1467 /* Check for the special PCI poke sequence. */
1468 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1469 cheetah_flush_icache();
1470 cheetah_flush_dcache();
1472 /* Re-enable I-cache/D-cache */
1473 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1474 "or %%g1, %1, %%g1\n\t"
1475 "stxa %%g1, [%%g0] %0\n\t"
1478 : "i" (ASI_DCU_CONTROL_REG),
1479 "i" (DCU_DC | DCU_IC)
1482 /* Re-enable error reporting */
1483 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1484 "or %%g1, %1, %%g1\n\t"
1485 "stxa %%g1, [%%g0] %0\n\t"
1488 : "i" (ASI_ESTATE_ERROR_EN),
1489 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1492 (void) cheetah_recheck_errors(NULL);
1494 pci_poke_faulted = 1;
1496 regs->tnpc = regs->tpc + 4;
1501 p = cheetah_get_error_log(afsr);
1503 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1505 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1506 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1510 /* Grab snapshot of logged error. */
1511 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1513 /* If the current trap snapshot does not match what the
1514 * trap handler passed along into our args, big trouble.
1515 * In such a case, mark the local copy as invalid.
1517 * Else, it matches and we mark the afsr in the non-local
1518 * copy as invalid so we may log new error traps there.
1520 if (p->afsr != afsr || p->afar != afar)
1521 local_snapshot.afsr = CHAFSR_INVALID;
1523 p->afsr = CHAFSR_INVALID;
1525 is_memory = cheetah_check_main_memory(afar);
1528 int flush_all, flush_line;
1530 flush_all = flush_line = 0;
1531 if ((afsr & CHAFSR_EDU) != 0UL) {
1532 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1536 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1537 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1543 cheetah_flush_icache();
1544 cheetah_flush_dcache();
1546 /* Re-enable I/D caches */
1547 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1548 "or %%g1, %1, %%g1\n\t"
1549 "stxa %%g1, [%%g0] %0\n\t"
1552 : "i" (ASI_DCU_CONTROL_REG),
1553 "i" (DCU_IC | DCU_DC)
1557 cheetah_flush_ecache();
1558 else if (flush_line)
1559 cheetah_flush_ecache_line(afar);
1562 /* Re-enable error reporting */
1563 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1564 "or %%g1, %1, %%g1\n\t"
1565 "stxa %%g1, [%%g0] %0\n\t"
1568 : "i" (ASI_ESTATE_ERROR_EN),
1569 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1572 /* Decide if we can continue after handling this trap and
1573 * logging the error.
1576 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1579 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1580 * error was logged while we had error reporting traps disabled.
1582 if (cheetah_recheck_errors(&local_snapshot)) {
1583 unsigned long new_afsr = local_snapshot.afsr;
1585 /* If we got a new asynchronous error, die... */
1586 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1587 CHAFSR_WDU | CHAFSR_CPU |
1588 CHAFSR_IVU | CHAFSR_UE |
1589 CHAFSR_BERR | CHAFSR_TO))
1594 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1596 /* "Recoverable" here means we try to yank the page from ever
1597 * being newly used again. This depends upon a few things:
1598 * 1) Must be main memory, and AFAR must be valid.
1599 * 2) If we trapped from user, OK.
1600 * 3) Else, if we trapped from kernel we must find exception
1601 * table entry (ie. we have to have been accessing user
1604 * If AFAR is not in main memory, or we trapped from kernel
1605 * and cannot find an exception table entry, it is unacceptable
1606 * to try and continue.
1608 if (recoverable && is_memory) {
1609 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1610 /* OK, usermode access. */
1613 unsigned long g2 = regs->u_regs[UREG_G2];
1614 unsigned long fixup = search_extables_range(regs->tpc, &g2);
1617 /* OK, kernel access to userspace. */
1621 /* BAD, privileged state is corrupted. */
1626 if (pfn_valid(afar >> PAGE_SHIFT))
1627 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1631 /* Only perform fixup if we still have a
1632 * recoverable condition.
1636 regs->tnpc = regs->tpc + 4;
1637 regs->u_regs[UREG_G2] = g2;
1646 panic("Irrecoverable deferred error trap.\n");
1649 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1651 * Bit0: 0=dcache,1=icache
1652 * Bit1: 0=recoverable,1=unrecoverable
1654 * The hardware has disabled both the I-cache and D-cache in
1655 * the %dcr register.
1657 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1660 __cheetah_flush_icache();
1662 cheetah_plus_zap_dcache_parity();
1663 cheetah_flush_dcache();
1665 /* Re-enable I-cache/D-cache */
1666 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1667 "or %%g1, %1, %%g1\n\t"
1668 "stxa %%g1, [%%g0] %0\n\t"
1671 : "i" (ASI_DCU_CONTROL_REG),
1672 "i" (DCU_DC | DCU_IC)
1676 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1678 (type & 0x1) ? 'I' : 'D',
1680 panic("Irrecoverable Cheetah+ parity error.");
1683 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1685 (type & 0x1) ? 'I' : 'D',
1689 void do_fpe_common(struct pt_regs *regs)
1691 if (regs->tstate & TSTATE_PRIV) {
1692 regs->tpc = regs->tnpc;
1695 unsigned long fsr = current_thread_info()->xfsr[0];
1698 if (test_thread_flag(TIF_32BIT)) {
1699 regs->tpc &= 0xffffffff;
1700 regs->tnpc &= 0xffffffff;
1702 info.si_signo = SIGFPE;
1704 info.si_addr = (void __user *)regs->tpc;
1706 info.si_code = __SI_FAULT;
1707 if ((fsr & 0x1c000) == (1 << 14)) {
1709 info.si_code = FPE_FLTINV;
1710 else if (fsr & 0x08)
1711 info.si_code = FPE_FLTOVF;
1712 else if (fsr & 0x04)
1713 info.si_code = FPE_FLTUND;
1714 else if (fsr & 0x02)
1715 info.si_code = FPE_FLTDIV;
1716 else if (fsr & 0x01)
1717 info.si_code = FPE_FLTRES;
1719 force_sig_info(SIGFPE, &info, current);
1723 void do_fpieee(struct pt_regs *regs)
1725 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1726 0, 0x24, SIGFPE) == NOTIFY_STOP)
1729 do_fpe_common(regs);
1732 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1734 void do_fpother(struct pt_regs *regs)
1736 struct fpustate *f = FPUSTATE;
1739 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1740 0, 0x25, SIGFPE) == NOTIFY_STOP)
1743 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1744 case (2 << 14): /* unfinished_FPop */
1745 case (3 << 14): /* unimplemented_FPop */
1746 ret = do_mathemu(regs, f);
1751 do_fpe_common(regs);
1754 void do_tof(struct pt_regs *regs)
1758 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1759 0, 0x26, SIGEMT) == NOTIFY_STOP)
1762 if (regs->tstate & TSTATE_PRIV)
1763 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1764 if (test_thread_flag(TIF_32BIT)) {
1765 regs->tpc &= 0xffffffff;
1766 regs->tnpc &= 0xffffffff;
1768 info.si_signo = SIGEMT;
1770 info.si_code = EMT_TAGOVF;
1771 info.si_addr = (void __user *)regs->tpc;
1773 force_sig_info(SIGEMT, &info, current);
1776 void do_div0(struct pt_regs *regs)
1780 if (notify_die(DIE_TRAP, "integer division by zero", regs,
1781 0, 0x28, SIGFPE) == NOTIFY_STOP)
1784 if (regs->tstate & TSTATE_PRIV)
1785 die_if_kernel("TL0: Kernel divide by zero.", regs);
1786 if (test_thread_flag(TIF_32BIT)) {
1787 regs->tpc &= 0xffffffff;
1788 regs->tnpc &= 0xffffffff;
1790 info.si_signo = SIGFPE;
1792 info.si_code = FPE_INTDIV;
1793 info.si_addr = (void __user *)regs->tpc;
1795 force_sig_info(SIGFPE, &info, current);
1798 void instruction_dump (unsigned int *pc)
1802 if ((((unsigned long) pc) & 3))
1805 printk("Instruction DUMP:");
1806 for (i = -3; i < 6; i++)
1807 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1811 static void user_instruction_dump (unsigned int __user *pc)
1814 unsigned int buf[9];
1816 if ((((unsigned long) pc) & 3))
1819 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1822 printk("Instruction DUMP:");
1823 for (i = 0; i < 9; i++)
1824 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1828 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1830 unsigned long pc, fp, thread_base, ksp;
1831 struct thread_info *tp = tsk->thread_info;
1832 struct reg_window *rw;
1835 ksp = (unsigned long) _ksp;
1837 if (tp == current_thread_info())
1840 fp = ksp + STACK_BIAS;
1841 thread_base = (unsigned long) tp;
1843 printk("Call Trace:");
1844 #ifdef CONFIG_KALLSYMS
1848 /* Bogus frame pointer? */
1849 if (fp < (thread_base + sizeof(struct thread_info)) ||
1850 fp >= (thread_base + THREAD_SIZE))
1852 rw = (struct reg_window *)fp;
1854 printk(" [%016lx] ", pc);
1855 print_symbol("%s\n", pc);
1856 fp = rw->ins[6] + STACK_BIAS;
1857 } while (++count < 16);
1858 #ifndef CONFIG_KALLSYMS
1863 void dump_stack(void)
1867 __asm__ __volatile__("mov %%fp, %0"
1869 show_stack(current, ksp);
1872 EXPORT_SYMBOL(dump_stack);
1874 static inline int is_kernel_stack(struct task_struct *task,
1875 struct reg_window *rw)
1877 unsigned long rw_addr = (unsigned long) rw;
1878 unsigned long thread_base, thread_end;
1880 if (rw_addr < PAGE_OFFSET) {
1881 if (task != &init_task)
1885 thread_base = (unsigned long) task->thread_info;
1886 thread_end = thread_base + sizeof(union thread_union);
1887 if (rw_addr >= thread_base &&
1888 rw_addr < thread_end &&
1895 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
1897 unsigned long fp = rw->ins[6];
1902 return (struct reg_window *) (fp + STACK_BIAS);
1905 void die_if_kernel(char *str, struct pt_regs *regs)
1907 static int die_counter;
1908 extern void __show_regs(struct pt_regs * regs);
1909 extern void smp_report_regs(void);
1912 /* Amuse the user. */
1915 " \"@'/ .. \\`@\"\n"
1919 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1920 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
1921 __asm__ __volatile__("flushw");
1923 if (regs->tstate & TSTATE_PRIV) {
1924 struct reg_window *rw = (struct reg_window *)
1925 (regs->u_regs[UREG_FP] + STACK_BIAS);
1927 /* Stop the back trace when we hit userland or we
1928 * find some badly aligned kernel stack.
1932 is_kernel_stack(current, rw)) {
1933 printk("Caller[%016lx]", rw->ins[7]);
1934 print_symbol(": %s", rw->ins[7]);
1937 rw = kernel_stack_up(rw);
1939 instruction_dump ((unsigned int *) regs->tpc);
1941 if (test_thread_flag(TIF_32BIT)) {
1942 regs->tpc &= 0xffffffff;
1943 regs->tnpc &= 0xffffffff;
1945 user_instruction_dump ((unsigned int __user *) regs->tpc);
1951 if (regs->tstate & TSTATE_PRIV)
1956 extern int handle_popc(u32 insn, struct pt_regs *regs);
1957 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1959 void do_illegal_instruction(struct pt_regs *regs)
1961 unsigned long pc = regs->tpc;
1962 unsigned long tstate = regs->tstate;
1966 if (notify_die(DIE_TRAP, "illegal instruction", regs,
1967 0, 0x10, SIGILL) == NOTIFY_STOP)
1970 if (tstate & TSTATE_PRIV)
1971 die_if_kernel("Kernel illegal instruction", regs);
1972 if (test_thread_flag(TIF_32BIT))
1974 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
1975 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1976 if (handle_popc(insn, regs))
1978 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1979 if (handle_ldf_stq(insn, regs))
1983 info.si_signo = SIGILL;
1985 info.si_code = ILL_ILLOPC;
1986 info.si_addr = (void __user *)pc;
1988 force_sig_info(SIGILL, &info, current);
1991 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1995 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
1996 0, 0x34, SIGSEGV) == NOTIFY_STOP)
1999 if (regs->tstate & TSTATE_PRIV) {
2000 extern void kernel_unaligned_trap(struct pt_regs *regs,
2003 unsigned long sfsr);
2005 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
2009 info.si_signo = SIGBUS;
2011 info.si_code = BUS_ADRALN;
2012 info.si_addr = (void __user *)sfar;
2014 force_sig_info(SIGBUS, &info, current);
2017 void do_privop(struct pt_regs *regs)
2021 if (notify_die(DIE_TRAP, "privileged operation", regs,
2022 0, 0x11, SIGILL) == NOTIFY_STOP)
2025 if (test_thread_flag(TIF_32BIT)) {
2026 regs->tpc &= 0xffffffff;
2027 regs->tnpc &= 0xffffffff;
2029 info.si_signo = SIGILL;
2031 info.si_code = ILL_PRVOPC;
2032 info.si_addr = (void __user *)regs->tpc;
2034 force_sig_info(SIGILL, &info, current);
2037 void do_privact(struct pt_regs *regs)
2042 /* Trap level 1 stuff or other traps we should never see... */
2043 void do_cee(struct pt_regs *regs)
2045 die_if_kernel("TL0: Cache Error Exception", regs);
2048 void do_cee_tl1(struct pt_regs *regs)
2050 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2051 die_if_kernel("TL1: Cache Error Exception", regs);
2054 void do_dae_tl1(struct pt_regs *regs)
2056 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2057 die_if_kernel("TL1: Data Access Exception", regs);
2060 void do_iae_tl1(struct pt_regs *regs)
2062 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2063 die_if_kernel("TL1: Instruction Access Exception", regs);
2066 void do_div0_tl1(struct pt_regs *regs)
2068 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2069 die_if_kernel("TL1: DIV0 Exception", regs);
2072 void do_fpdis_tl1(struct pt_regs *regs)
2074 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2075 die_if_kernel("TL1: FPU Disabled", regs);
2078 void do_fpieee_tl1(struct pt_regs *regs)
2080 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2081 die_if_kernel("TL1: FPU IEEE Exception", regs);
2084 void do_fpother_tl1(struct pt_regs *regs)
2086 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2087 die_if_kernel("TL1: FPU Other Exception", regs);
2090 void do_ill_tl1(struct pt_regs *regs)
2092 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2093 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2096 void do_irq_tl1(struct pt_regs *regs)
2098 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2099 die_if_kernel("TL1: IRQ Exception", regs);
2102 void do_lddfmna_tl1(struct pt_regs *regs)
2104 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2105 die_if_kernel("TL1: LDDF Exception", regs);
2108 void do_stdfmna_tl1(struct pt_regs *regs)
2110 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2111 die_if_kernel("TL1: STDF Exception", regs);
2114 void do_paw(struct pt_regs *regs)
2116 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2119 void do_paw_tl1(struct pt_regs *regs)
2121 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2122 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2125 void do_vaw(struct pt_regs *regs)
2127 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2130 void do_vaw_tl1(struct pt_regs *regs)
2132 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2133 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2136 void do_tof_tl1(struct pt_regs *regs)
2138 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2139 die_if_kernel("TL1: Tag Overflow Exception", regs);
2142 void do_getpsr(struct pt_regs *regs)
2144 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2145 regs->tpc = regs->tnpc;
2147 if (test_thread_flag(TIF_32BIT)) {
2148 regs->tpc &= 0xffffffff;
2149 regs->tnpc &= 0xffffffff;
2153 extern void thread_info_offsets_are_bolixed_dave(void);
2155 /* Only invoked on boot processor. */
2156 void __init trap_init(void)
2158 /* Compile time sanity check. */
2159 if (TI_TASK != offsetof(struct thread_info, task) ||
2160 TI_FLAGS != offsetof(struct thread_info, flags) ||
2161 TI_CPU != offsetof(struct thread_info, cpu) ||
2162 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2163 TI_KSP != offsetof(struct thread_info, ksp) ||
2164 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2165 TI_KREGS != offsetof(struct thread_info, kregs) ||
2166 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2167 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2168 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2169 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2170 TI_GSR != offsetof(struct thread_info, gsr) ||
2171 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2172 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2173 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2174 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2175 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2176 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2177 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2178 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2179 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2180 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2181 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2182 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2183 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2184 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2185 (TI_FPREGS & (64 - 1)))
2186 thread_info_offsets_are_bolixed_dave();
2188 /* Attach to the address space of init_task. On SMP we
2189 * do this in smp.c:smp_callin for other cpus.
2191 atomic_inc(&init_mm.mm_count);
2192 current->active_mm = &init_mm;