2 * fault.c: Page fault handlers for the Sparc.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
20 #include <linux/smp.h>
21 #include <linux/perf_event.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/kdebug.h>
26 #include <asm/system.h>
28 #include <asm/pgtable.h>
29 #include <asm/memreg.h>
30 #include <asm/openprom.h>
31 #include <asm/oplib.h>
33 #include <asm/traps.h>
34 #include <asm/uaccess.h>
36 extern int prom_node_root;
38 int show_unhandled_signals = 1;
40 /* At boot time we determine these two values necessary for setting
41 * up the segment maps and page table entries (pte's).
44 int num_segmaps, num_contexts;
47 /* various Virtual Address Cache parameters we find at boot time... */
49 int vac_size, vac_linesize, vac_do_hw_vac_flushes;
50 int vac_entries_per_context, vac_entries_per_segment;
51 int vac_entries_per_page;
53 /* Return how much physical memory we have. */
54 unsigned long probe_memory(void)
56 unsigned long total = 0;
59 for (i = 0; sp_banks[i].num_bytes; i++)
60 total += sp_banks[i].num_bytes;
65 extern void sun4c_complete_all_stores(void);
67 /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
68 asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
69 unsigned long svaddr, unsigned long aerr,
72 sun4c_complete_all_stores();
73 printk("FAULT: NMI received\n");
74 printk("SREGS: Synchronous Error %08lx\n", serr);
75 printk(" Synchronous Vaddr %08lx\n", svaddr);
76 printk(" Asynchronous Error %08lx\n", aerr);
77 printk(" Asynchronous Vaddr %08lx\n", avaddr);
79 printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
80 printk("REGISTER DUMP:\n");
85 static void unhandled_fault(unsigned long, struct task_struct *,
86 struct pt_regs *) __attribute__ ((noreturn));
88 static void unhandled_fault(unsigned long address, struct task_struct *tsk,
91 if((unsigned long) address < PAGE_SIZE) {
93 "Unable to handle kernel NULL pointer dereference\n");
95 printk(KERN_ALERT "Unable to handle kernel paging request "
96 "at virtual address %08lx\n", address);
98 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
99 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
100 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
101 (tsk->mm ? (unsigned long) tsk->mm->pgd :
102 (unsigned long) tsk->active_mm->pgd));
103 die_if_kernel("Oops", regs);
106 asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
107 unsigned long address)
114 i = search_extables_range(ret_pc, &g2);
117 /* load & store will be handled by fixup */
121 /* store will be handled by fixup, load will bump out */
122 /* for _to_ macros */
123 insn = *((unsigned int *) pc);
124 if ((insn >> 21) & 1)
129 /* load will be handled by fixup, store will bump out */
130 /* for _from_ macros */
131 insn = *((unsigned int *) pc);
132 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
140 memset(®s, 0, sizeof (regs));
143 __asm__ __volatile__(
147 "nop\n" : "=r" (regs.psr));
148 unhandled_fault(address, current, ®s);
155 show_signal_msg(struct pt_regs *regs, int sig, int code,
156 unsigned long address, struct task_struct *tsk)
158 if (!unhandled_signal(tsk, sig))
161 if (!printk_ratelimit())
164 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
165 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
166 tsk->comm, task_pid_nr(tsk), address,
167 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
168 (void *)regs->u_regs[UREG_FP], code);
170 print_vma_addr(KERN_CONT " in ", regs->pc);
172 printk(KERN_CONT "\n");
175 static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
183 info.si_addr = (void __user *) addr;
186 if (unlikely(show_unhandled_signals))
187 show_signal_msg(regs, sig, info.si_code,
190 force_sig_info (sig, &info, current);
193 extern unsigned long safe_compute_effective_address(struct pt_regs *,
196 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
203 if (regs->psr & PSR_PS) {
204 insn = *(unsigned int *) regs->pc;
206 __get_user(insn, (unsigned int *) regs->pc);
209 return safe_compute_effective_address(regs, insn);
212 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
215 unsigned long addr = compute_si_addr(regs, text_fault);
217 __do_fault_siginfo(code, sig, regs, addr);
220 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
221 unsigned long address)
223 struct vm_area_struct *vma;
224 struct task_struct *tsk = current;
225 struct mm_struct *mm = tsk->mm;
228 int from_user = !(regs->psr & PSR_PS);
235 * We fault-in kernel-space virtual memory on-demand. The
236 * 'reference' page table is init_mm.pgd.
238 * NOTE! We MUST NOT take any locks for this case. We may
239 * be in an interrupt or a critical region, and should
240 * only copy the information from the master page table,
244 if (!ARCH_SUN4C && address >= TASK_SIZE)
248 * If we're in an interrupt or have no user
249 * context, we must not take the fault..
251 if (in_atomic() || !mm)
254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
256 down_read(&mm->mmap_sem);
259 * The kernel referencing a bad kernel pointer can lock up
260 * a sun4c machine completely, so we must attempt recovery.
262 if(!from_user && address >= PAGE_OFFSET)
265 vma = find_vma(mm, address);
268 if(vma->vm_start <= address)
270 if(!(vma->vm_flags & VM_GROWSDOWN))
272 if(expand_stack(vma, address))
275 * Ok, we have a good vm_area for this memory access, so
281 if(!(vma->vm_flags & VM_WRITE))
284 /* Allow reads even for write-only mappings */
285 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
290 * If for any reason at all we couldn't handle the fault,
291 * make sure we exit gracefully rather than endlessly redo
294 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
295 if (unlikely(fault & VM_FAULT_ERROR)) {
296 if (fault & VM_FAULT_OOM)
298 else if (fault & VM_FAULT_SIGBUS)
302 if (fault & VM_FAULT_MAJOR) {
304 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
307 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
309 up_read(&mm->mmap_sem);
313 * Something tried to access memory that isn't in our memory map..
314 * Fix it, but check if it's kernel or user first..
317 up_read(&mm->mmap_sem);
319 bad_area_nosemaphore:
320 /* User mode accesses just cause a SIGSEGV */
322 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
326 /* Is this in ex_table? */
328 g2 = regs->u_regs[UREG_G2];
330 fixup = search_extables_range(regs->pc, &g2);
331 if (fixup > 10) { /* Values below are reserved for other things */
332 extern const unsigned __memset_start[];
333 extern const unsigned __memset_end[];
334 extern const unsigned __csum_partial_copy_start[];
335 extern const unsigned __csum_partial_copy_end[];
337 #ifdef DEBUG_EXCEPTIONS
338 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
339 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
340 regs->pc, fixup, g2);
342 if ((regs->pc >= (unsigned long)__memset_start &&
343 regs->pc < (unsigned long)__memset_end) ||
344 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
345 regs->pc < (unsigned long)__csum_partial_copy_end)) {
346 regs->u_regs[UREG_I4] = address;
347 regs->u_regs[UREG_I5] = regs->pc;
349 regs->u_regs[UREG_G2] = g2;
351 regs->npc = regs->pc + 4;
356 unhandled_fault (address, tsk, regs);
360 * We ran out of memory, or some other thing happened to us that made
361 * us unable to handle the page fault gracefully.
364 up_read(&mm->mmap_sem);
366 pagefault_out_of_memory();
372 up_read(&mm->mmap_sem);
373 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
380 * Synchronize this task's top level page-table
381 * with the 'reference' page table.
383 int offset = pgd_index(address);
387 pgd = tsk->active_mm->pgd + offset;
388 pgd_k = init_mm.pgd + offset;
390 if (!pgd_present(*pgd)) {
391 if (!pgd_present(*pgd_k))
392 goto bad_area_nosemaphore;
393 pgd_val(*pgd) = pgd_val(*pgd_k);
397 pmd = pmd_offset(pgd, address);
398 pmd_k = pmd_offset(pgd_k, address);
400 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
401 goto bad_area_nosemaphore;
407 asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
408 unsigned long address)
410 extern void sun4c_update_mmu_cache(struct vm_area_struct *,
411 unsigned long,pte_t *);
412 extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
413 struct task_struct *tsk = current;
414 struct mm_struct *mm = tsk->mm;
421 !(regs->psr & PSR_PS)) {
422 unsigned int insn, __user *ip;
424 ip = (unsigned int __user *)regs->pc;
425 if (!get_user(insn, ip)) {
426 if ((insn & 0xc1680000) == 0xc0680000)
432 /* We are oopsing. */
433 do_sparc_fault(regs, text_fault, write, address);
434 BUG(); /* P3 Oops already, you bitch */
437 pgdp = pgd_offset(mm, address);
438 ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
440 if (pgd_val(*pgdp)) {
442 if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
443 == (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
446 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
447 _SUN4C_PAGE_MODIFIED |
451 local_irq_save(flags);
452 if (sun4c_get_segmap(address) != invalid_segment) {
453 sun4c_put_pte(address, pte_val(*ptep));
454 local_irq_restore(flags);
457 local_irq_restore(flags);
460 if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
461 == (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
464 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
467 local_irq_save(flags);
468 if (sun4c_get_segmap(address) != invalid_segment) {
469 sun4c_put_pte(address, pte_val(*ptep));
470 local_irq_restore(flags);
473 local_irq_restore(flags);
478 /* This conditional is 'interesting'. */
479 if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
480 && (pte_val(*ptep) & _SUN4C_PAGE_VALID))
481 /* Note: It is safe to not grab the MMAP semaphore here because
482 * we know that update_mmu_cache() will not sleep for
483 * any reason (at least not in the current implementation)
484 * and therefore there is no danger of another thread getting
485 * on the CPU and doing a shrink_mmap() on this vma.
487 sun4c_update_mmu_cache (find_vma(current->mm, address), address,
490 do_sparc_fault(regs, text_fault, write, address);
493 /* This always deals with user addresses. */
494 static void force_user_fault(unsigned long address, int write)
496 struct vm_area_struct *vma;
497 struct task_struct *tsk = current;
498 struct mm_struct *mm = tsk->mm;
503 down_read(&mm->mmap_sem);
504 vma = find_vma(mm, address);
507 if(vma->vm_start <= address)
509 if(!(vma->vm_flags & VM_GROWSDOWN))
511 if(expand_stack(vma, address))
516 if(!(vma->vm_flags & VM_WRITE))
519 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
522 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
523 case VM_FAULT_SIGBUS:
527 up_read(&mm->mmap_sem);
530 up_read(&mm->mmap_sem);
531 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
535 up_read(&mm->mmap_sem);
536 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
539 static void check_stack_aligned(unsigned long sp)
542 force_sig(SIGILL, current);
545 void window_overflow_fault(void)
549 sp = current_thread_info()->rwbuf_stkptrs[0];
550 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
551 force_user_fault(sp + 0x38, 1);
552 force_user_fault(sp, 1);
554 check_stack_aligned(sp);
557 void window_underflow_fault(unsigned long sp)
559 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
560 force_user_fault(sp + 0x38, 0);
561 force_user_fault(sp, 0);
563 check_stack_aligned(sp);
566 void window_ret_fault(struct pt_regs *regs)
570 sp = regs->u_regs[UREG_FP];
571 if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
572 force_user_fault(sp + 0x38, 0);
573 force_user_fault(sp, 0);
575 check_stack_aligned(sp);