2 * arch/microblaze/mm/fault.c
4 * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
6 * Derived from "arch/ppc/mm/fault.c"
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
9 * Derived from "arch/i386/mm/fault.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 * Modified by Cort Dougan and Paul Mackerras.
14 * This file is subject to the terms and conditions of the GNU General
15 * Public License. See the file COPYING in the main directory of this
16 * archive for more details.
20 #include <linux/module.h>
21 #include <linux/signal.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/errno.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
27 #include <linux/ptrace.h>
28 #include <linux/mman.h>
30 #include <linux/interrupt.h>
33 #include <asm/pgtable.h>
35 #include <asm/mmu_context.h>
36 #include <asm/system.h>
37 #include <linux/uaccess.h>
38 #include <asm/exceptions.h>
40 #if defined(CONFIG_KGDB)
41 int debugger_kernel_faults = 1;
44 static unsigned long pte_misses; /* updated by do_page_fault() */
45 static unsigned long pte_errors; /* updated by do_page_fault() */
48 * Check whether the instruction at regs->pc is a store using
49 * an update addressing form which will update r1.
51 static int store_updates_sp(struct pt_regs *regs)
55 if (get_user(inst, (unsigned int *)regs->pc))
57 /* check for 1 in the rD field */
58 if (((inst >> 21) & 0x1f) != 1)
60 /* check for store opcodes */
61 if ((inst & 0xd0000000) == 0xd0000000)
68 * bad_page_fault is called when we have a bad access from the kernel.
69 * It is called from do_page_fault above and from some of the procedures
72 void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
74 const struct exception_table_entry *fixup;
76 /* Are we prepared to handle this fault? */
77 fixup = search_exception_tables(regs->pc);
79 regs->pc = fixup->fixup;
83 /* kernel has accessed a bad area */
84 #if defined(CONFIG_KGDB)
85 if (debugger_kernel_faults)
88 die("kernel access of bad area", regs, sig);
92 * The error_code parameter is ESR for a data fault,
93 * 0 for an instruction fault.
95 void do_page_fault(struct pt_regs *regs, unsigned long address,
96 unsigned long error_code)
98 struct vm_area_struct *vma;
99 struct mm_struct *mm = current->mm;
101 int code = SEGV_MAPERR;
102 int is_write = error_code & ESR_S;
106 regs->esr = error_code;
108 /* On a kernel SLB miss we can only check for a valid exception entry */
109 if (kernel_mode(regs) && (address >= TASK_SIZE)) {
110 printk(KERN_WARNING "kernel task_size exceed");
111 _exception(SIGSEGV, regs, code, address);
114 /* for instr TLB miss and instr storage exception ESR_S is undefined */
115 if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
118 #if defined(CONFIG_KGDB)
119 if (debugger_fault_handler && regs->trap == 0x300) {
120 debugger_fault_handler(regs);
123 #endif /* CONFIG_KGDB */
125 if (in_atomic() || !mm) {
126 if (kernel_mode(regs))
127 goto bad_area_nosemaphore;
129 /* in_atomic() in user mode is really bad,
130 as is current->mm == NULL. */
131 printk(KERN_EMERG "Page fault in user mode with "
132 "in_atomic(), mm = %p\n", mm);
133 printk(KERN_EMERG "r15 = %lx MSR = %lx\n",
134 regs->r15, regs->msr);
135 die("Weird page fault", regs, SIGSEGV);
138 /* When running in the kernel we expect faults to occur only to
139 * addresses in user space. All other faults represent errors in the
140 * kernel and should generate an OOPS. Unfortunately, in the case of an
141 * erroneous fault occurring in a code path which already holds mmap_sem
142 * we will deadlock attempting to validate the fault against the
143 * address space. Luckily the kernel only validly references user
144 * space from well defined areas of code, which are listed in the
147 * As the vast majority of faults will be valid we will only perform
148 * the source reference check when there is a possibility of a deadlock.
149 * Attempt to lock the address space, if we cannot we then validate the
150 * source. If this is invalid we can skip the address space check,
151 * thus avoiding the deadlock.
153 if (!down_read_trylock(&mm->mmap_sem)) {
154 if (kernel_mode(regs) && !search_exception_tables(regs->pc))
155 goto bad_area_nosemaphore;
157 down_read(&mm->mmap_sem);
160 vma = find_vma(mm, address);
164 if (vma->vm_start <= address)
167 if (!(vma->vm_flags & VM_GROWSDOWN))
174 * N.B. The ABI allows programs to access up to
175 * a few hundred bytes below the stack pointer (TBD).
176 * The kernel signal delivery code writes up to about 1.5kB
177 * below the stack pointer (r1) before decrementing it.
178 * The exec code can write slightly over 640kB to the stack
179 * before setting the user r1. Thus we allow the stack to
180 * expand to 1MB without further checks.
182 if (address + 0x100000 < vma->vm_end) {
184 /* get user regs even if this fault is in kernel mode */
185 struct pt_regs *uregs = current->thread.regs;
190 * A user-mode access to an address a long way below
191 * the stack pointer is only valid if the instruction
192 * is one which would update the stack pointer to the
193 * address accessed if the instruction completed,
194 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
195 * (or the byte, halfword, float or double forms).
197 * If we don't check this then any write to the area
198 * between the last mapped region and the stack will
199 * expand the stack rather than segfaulting.
201 if (address + 2048 < uregs->r1
202 && (kernel_mode(regs) || !store_updates_sp(regs)))
205 if (expand_stack(vma, address))
213 if (!(vma->vm_flags & VM_WRITE))
217 /* protection fault */
218 if (error_code & 0x08000000)
220 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
225 * If for any reason at all we couldn't handle the fault,
226 * make sure we exit gracefully rather than endlessly redo
230 fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
231 if (unlikely(fault & VM_FAULT_ERROR)) {
232 if (fault & VM_FAULT_OOM)
234 else if (fault & VM_FAULT_SIGBUS)
238 if (fault & VM_FAULT_MAJOR)
242 up_read(&mm->mmap_sem);
244 * keep track of tlb+htab misses that are good addrs but
245 * just need pte's created via handle_mm_fault()
252 up_read(&mm->mmap_sem);
254 bad_area_nosemaphore:
257 /* User mode accesses cause a SIGSEGV */
258 if (user_mode(regs)) {
259 _exception(SIGSEGV, regs, code, address);
260 /* info.si_signo = SIGSEGV;
263 info.si_addr = (void *) address;
264 force_sig_info(SIGSEGV, &info, current);*/
268 bad_page_fault(regs, address, SIGSEGV);
272 * We ran out of memory, or some other thing happened to us that made
273 * us unable to handle the page fault gracefully.
276 if (current->pid == 1) {
278 down_read(&mm->mmap_sem);
281 up_read(&mm->mmap_sem);
282 printk(KERN_WARNING "VM: killing process %s\n", current->comm);
285 bad_page_fault(regs, address, SIGKILL);
289 up_read(&mm->mmap_sem);
290 if (user_mode(regs)) {
291 info.si_signo = SIGBUS;
293 info.si_code = BUS_ADRERR;
294 info.si_addr = (void __user *)address;
295 force_sig_info(SIGBUS, &info, current);
298 bad_page_fault(regs, address, SIGBUS);