2 * linux/arch/frv/mm/fault.c
4 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
5 * - Written by David Howells (dhowells@redhat.com)
6 * - Derived from arch/m68knommu/mm/fault.c
7 * - Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
8 * - Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
12 * linux/arch/m68k/mm/fault.c
14 * Copyright (C) 1995 Hamish Macdonald
17 #include <linux/mman.h>
19 #include <linux/kernel.h>
20 #include <linux/ptrace.h>
21 #include <linux/hardirq.h>
23 #include <asm/system.h>
24 #include <asm/pgtable.h>
25 #include <asm/uaccess.h>
26 #include <asm/gdb-stub.h>
28 /*****************************************************************************/
30 * This routine handles page faults. It determines the problem, and
31 * then passes it off to one of the appropriate routines.
33 asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear0)
35 struct vm_area_struct *vma;
37 unsigned long _pme, lrai, lrad, fixup;
45 const char *atxc[16] = {
46 [0x0] = "mmu-miss", [0x8] = "multi-dat", [0x9] = "multi-sat",
47 [0xa] = "tlb-miss", [0xc] = "privilege", [0xd] = "write-prot",
50 printk("do_page_fault(%d,%lx [%s],%lx)\n",
51 datammu, esr0, atxc[esr0 >> 20 & 0xf], ear0);
57 * We fault-in kernel-space virtual memory on-demand. The
58 * 'reference' page table is init_mm.pgd.
60 * NOTE! We MUST NOT take any locks for this case. We may
61 * be in an interrupt or a critical region, and should
62 * only copy the information from the master page table,
65 * This verifies that the fault happens in kernel space
66 * and that the fault was a page not present (invalid) error
68 if (!user_mode(__frame) && (esr0 & ESR0_ATXC) == ESR0_ATXC_AMRTLB_MISS) {
69 if (ear0 >= VMALLOC_START && ear0 < VMALLOC_END)
70 goto kernel_pte_fault;
71 if (ear0 >= PKMAP_BASE && ear0 < PKMAP_END)
72 goto kernel_pte_fault;
75 info.si_code = SEGV_MAPERR;
78 * If we're in an interrupt or have no user
79 * context, we must not take the fault..
81 if (in_atomic() || !mm)
84 down_read(&mm->mmap_sem);
86 vma = find_vma(mm, ear0);
89 if (vma->vm_start <= ear0)
91 if (!(vma->vm_flags & VM_GROWSDOWN))
94 if (user_mode(__frame)) {
96 * accessing the stack below %esp is always a bug.
97 * The "+ 32" is there due to some instructions (like
98 * pusha) doing post-decrement on the stack and that
99 * doesn't show up until later..
101 if ((ear0 & PAGE_MASK) + 2 * PAGE_SIZE < __frame->sp) {
103 printk("[%d] ### Access below stack @%lx (sp=%lx)\n",
104 current->pid, ear0, __frame->sp);
105 show_registers(__frame);
106 printk("[%d] ### Code: [%08lx] %02x %02x %02x %02x %02x %02x %02x %02x\n",
109 ((u8*)__frame->pc)[0],
110 ((u8*)__frame->pc)[1],
111 ((u8*)__frame->pc)[2],
112 ((u8*)__frame->pc)[3],
113 ((u8*)__frame->pc)[4],
114 ((u8*)__frame->pc)[5],
115 ((u8*)__frame->pc)[6],
116 ((u8*)__frame->pc)[7]
123 if (expand_stack(vma, ear0))
127 * Ok, we have a good vm_area for this memory access, so
131 info.si_code = SEGV_ACCERR;
133 switch (esr0 & ESR0_ATXC) {
135 /* handle write to write protected page */
136 case ESR0_ATXC_WP_EXCEP:
137 #ifdef TEST_VERIFY_AREA
138 if (!(user_mode(__frame)))
139 printk("WP fault at %08lx\n", __frame->pc);
141 if (!(vma->vm_flags & VM_WRITE))
146 /* handle read from protected page */
147 case ESR0_ATXC_PRIV_EXCEP:
150 /* handle read, write or exec on absent page
151 * - can't support write without permitting read
152 * - don't support execute without permitting read and vice-versa
154 case ESR0_ATXC_AMRTLB_MISS:
155 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
161 * If for any reason at all we couldn't handle the fault,
162 * make sure we exit gracefully rather than endlessly redo
165 switch (handle_mm_fault(mm, vma, ear0, write)) {
172 case VM_FAULT_SIGBUS:
178 up_read(&mm->mmap_sem);
182 * Something tried to access memory that isn't in our memory map..
183 * Fix it, but check if it's kernel or user first..
186 up_read(&mm->mmap_sem);
188 /* User mode accesses just cause a SIGSEGV */
189 if (user_mode(__frame)) {
190 info.si_signo = SIGSEGV;
192 /* info.si_code has been set above */
193 info.si_addr = (void *) ear0;
194 force_sig_info(SIGSEGV, &info, current);
199 /* are we prepared to handle this kernel fault? */
200 if ((fixup = search_exception_table(__frame->pc)) != 0) {
206 * Oops. The kernel tried to access some bad page. We'll have to
207 * terminate things with extreme prejudice.
212 if (ear0 < PAGE_SIZE)
213 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
215 printk(KERN_ALERT "Unable to handle kernel paging request");
216 printk(" at virtual addr %08lx\n", ear0);
217 printk(" PC : %08lx\n", __frame->pc);
218 printk(" EXC : esr0=%08lx ear0=%08lx\n", esr0, ear0);
220 asm("lrai %1,%0,#1,#0,#0" : "=&r"(lrai) : "r"(ear0));
221 asm("lrad %1,%0,#1,#0,#0" : "=&r"(lrad) : "r"(ear0));
223 printk(KERN_ALERT " LRAI: %08lx\n", lrai);
224 printk(KERN_ALERT " LRAD: %08lx\n", lrad);
226 __break_hijack_kernel_event();
228 pge = pgd_offset(current->mm, ear0);
229 pue = pud_offset(pge, ear0);
230 _pme = pue->pue[0].ste[0];
232 printk(KERN_ALERT " PGE : %8p { PME %08lx }\n", pge, _pme);
234 if (_pme & xAMPRx_V) {
235 unsigned long dampr, damlr, val;
237 asm volatile("movsg dampr2,%0 ! movgs %2,dampr2 ! movsg damlr2,%1"
238 : "=&r"(dampr), "=r"(damlr)
239 : "r" (_pme | xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V)
242 pte = (pte_t *) damlr + __pte_index(ear0);
245 asm volatile("movgs %0,dampr2" :: "r" (dampr));
247 printk(KERN_ALERT " PTE : %8p { %08lx }\n", pte, val);
250 die_if_kernel("Oops\n");
254 * We ran out of memory, or some other thing happened to us that made
255 * us unable to handle the page fault gracefully.
258 up_read(&mm->mmap_sem);
259 printk("VM: killing process %s\n", current->comm);
260 if (user_mode(__frame))
265 up_read(&mm->mmap_sem);
268 * Send a sigbus, regardless of whether we were in kernel
271 info.si_signo = SIGBUS;
273 info.si_code = BUS_ADRERR;
274 info.si_addr = (void *) ear0;
275 force_sig_info(SIGBUS, &info, current);
277 /* Kernel mode? Handle exceptions or die */
278 if (!user_mode(__frame))
283 * The fault was caused by a kernel PTE (such as installed by vmalloc or kmap)
288 * Synchronize this task's top level page-table
289 * with the 'reference' page table.
291 * Do _not_ use "tsk" here. We might be inside
292 * an interrupt in the middle of a task switch..
294 int index = pgd_index(ear0);
300 pgd = (pgd_t *) __get_TTBR();
301 pgd = (pgd_t *)__va(pgd) + index;
302 pgd_k = ((pgd_t *)(init_mm.pgd)) + index;
304 if (!pgd_present(*pgd_k))
306 //set_pgd(pgd, *pgd_k); /////// gcc ICE's on this line
308 pud_k = pud_offset(pgd_k, ear0);
309 if (!pud_present(*pud_k))
312 pmd_k = pmd_offset(pud_k, ear0);
313 if (!pmd_present(*pmd_k))
316 pud = pud_offset(pgd, ear0);
317 pmd = pmd_offset(pud, ear0);
318 set_pmd(pmd, *pmd_k);
320 pte_k = pte_offset_kernel(pmd_k, ear0);
321 if (!pte_present(*pte_k))
325 } /* end do_page_fault() */