2 * Copyright (C) 1995 Linus Torvalds
5 #include <linux/signal.h>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/string.h>
10 #include <linux/types.h>
11 #include <linux/ptrace.h>
12 #include <linux/mman.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/tty.h>
18 #include <linux/vt_kern.h> /* For unblank_screen() */
19 #include <linux/highmem.h>
20 #include <linux/bootmem.h> /* for max_low_pfn */
21 #include <linux/vmalloc.h>
22 #include <linux/module.h>
23 #include <linux/kprobes.h>
24 #include <linux/uaccess.h>
25 #include <linux/kdebug.h>
27 #include <asm/system.h>
29 #include <asm/segment.h>
32 * Page fault error code bits
33 * bit 0 == 0 means no page found, 1 means protection fault
34 * bit 1 == 0 means read, 1 means write
35 * bit 2 == 0 means kernel, 1 means user-mode
36 * bit 3 == 1 means use of reserved bit detected
37 * bit 4 == 1 means fault was an instruction fetch
39 #define PF_PROT (1<<0)
40 #define PF_WRITE (1<<1)
41 #define PF_USER (1<<2)
42 #define PF_RSVD (1<<3)
43 #define PF_INSTR (1<<4)
45 static inline int notify_page_fault(struct pt_regs *regs)
50 /* kprobe_running() needs smp_processor_id() */
51 if (!user_mode_vm(regs)) {
53 if (kprobe_running() && kprobe_fault_handler(regs, 14))
66 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
67 * Check that here and ignore it.
70 * Sometimes the CPU reports invalid exceptions on prefetch.
71 * Check that here and ignore it.
73 * Opcode checker based on code by Richard Brunner
75 static int is_prefetch(struct pt_regs *regs, unsigned long addr,
76 unsigned long error_code)
81 unsigned char *max_instr;
84 if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
85 boot_cpu_data.x86 >= 6)) {
86 /* Catch an obscure case of prefetch inside an NX page. */
87 if (nx_enabled && (error_code & PF_INSTR))
93 /* If it was a exec fault ignore */
94 if (error_code & PF_INSTR)
98 instr = (unsigned char *)convert_ip_to_linear(current, regs);
99 max_instr = instr + 15;
101 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
104 while (scan_more && instr < max_instr) {
105 unsigned char opcode;
106 unsigned char instr_hi;
107 unsigned char instr_lo;
109 if (probe_kernel_address(instr, opcode))
112 instr_hi = opcode & 0xf0;
113 instr_lo = opcode & 0x0f;
120 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
121 * In X86_64 long mode, the CPU will signal invalid
122 * opcode if some of these prefixes are present so
123 * X86_64 will never get here anyway
125 scan_more = ((instr_lo & 7) == 0x6);
130 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
131 * Need to figure out under what instruction mode the
132 * instruction was issued. Could check the LDT for lm,
133 * but for now it's good enough to assume that long
134 * mode only uses well known segments or kernel.
136 scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
140 /* 0x64 thru 0x67 are valid prefixes in all modes. */
141 scan_more = (instr_lo & 0xC) == 0x4;
144 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
145 scan_more = !instr_lo || (instr_lo>>1) == 1;
148 /* Prefetch instruction is 0x0F0D or 0x0F18 */
151 if (probe_kernel_address(instr, opcode))
153 prefetch = (instr_lo == 0xF) &&
154 (opcode == 0x0D || opcode == 0x18);
164 static void force_sig_info_fault(int si_signo, int si_code,
165 unsigned long address, struct task_struct *tsk)
169 info.si_signo = si_signo;
171 info.si_code = si_code;
172 info.si_addr = (void __user *)address;
173 force_sig_info(si_signo, &info, tsk);
177 static int bad_address(void *p)
180 return probe_kernel_address((unsigned long *)p, dummy);
184 void dump_pagetable(unsigned long address)
187 __typeof__(pte_val(__pte(0))) page;
190 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
191 #ifdef CONFIG_X86_PAE
192 printk("*pdpt = %016Lx ", page);
193 if ((page >> PAGE_SHIFT) < max_low_pfn
194 && page & _PAGE_PRESENT) {
196 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
197 & (PTRS_PER_PMD - 1)];
198 printk(KERN_CONT "*pde = %016Lx ", page);
202 printk("*pde = %08lx ", page);
206 * We must not directly access the pte in the highpte
207 * case if the page table is located in highmem.
208 * And let's rather not kmap-atomic the pte, just in case
209 * it's allocated already.
211 if ((page >> PAGE_SHIFT) < max_low_pfn
212 && (page & _PAGE_PRESENT)
213 && !(page & _PAGE_PSE)) {
215 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
216 & (PTRS_PER_PTE - 1)];
217 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
221 #else /* CONFIG_X86_64 */
227 pgd = (pgd_t *)read_cr3();
229 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
230 pgd += pgd_index(address);
231 if (bad_address(pgd)) goto bad;
232 printk("PGD %lx ", pgd_val(*pgd));
233 if (!pgd_present(*pgd)) goto ret;
235 pud = pud_offset(pgd, address);
236 if (bad_address(pud)) goto bad;
237 printk("PUD %lx ", pud_val(*pud));
238 if (!pud_present(*pud)) goto ret;
240 pmd = pmd_offset(pud, address);
241 if (bad_address(pmd)) goto bad;
242 printk("PMD %lx ", pmd_val(*pmd));
243 if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
245 pte = pte_offset_kernel(pmd, address);
246 if (bad_address(pte)) goto bad;
247 printk("PTE %lx", pte_val(*pte));
257 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
259 unsigned index = pgd_index(address);
265 pgd_k = init_mm.pgd + index;
267 if (!pgd_present(*pgd_k))
271 * set_pgd(pgd, *pgd_k); here would be useless on PAE
272 * and redundant with the set_pmd() on non-PAE. As would
276 pud = pud_offset(pgd, address);
277 pud_k = pud_offset(pgd_k, address);
278 if (!pud_present(*pud_k))
281 pmd = pmd_offset(pud, address);
282 pmd_k = pmd_offset(pud_k, address);
283 if (!pmd_present(*pmd_k))
285 if (!pmd_present(*pmd)) {
286 set_pmd(pmd, *pmd_k);
287 arch_flush_lazy_mmu_mode();
289 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
295 static const char errata93_warning[] =
296 KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
297 KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
298 KERN_ERR "******* Please consider a BIOS update.\n"
299 KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
302 /* Workaround for K8 erratum #93 & buggy BIOS.
303 BIOS SMM functions are required to use a specific workaround
304 to avoid corruption of the 64bit RIP register on C stepping K8.
305 A lot of BIOS that didn't get tested properly miss this.
306 The OS sees this as a page fault with the upper 32bits of RIP cleared.
307 Try to work around it here.
308 Note we only handle faults in kernel here.
309 Does nothing for X86_32
311 static int is_errata93(struct pt_regs *regs, unsigned long address)
315 if (address != regs->ip)
317 if ((address >> 32) != 0)
319 address |= 0xffffffffUL << 32;
320 if ((address >= (u64)_stext && address <= (u64)_etext) ||
321 (address >= MODULES_VADDR && address <= MODULES_END)) {
323 printk(errata93_warning);
334 * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
335 * addresses >4GB. We catch this in the page fault handler because these
336 * addresses are not reachable. Just detect this case and return. Any code
337 * segment in LDT is compatibility mode.
339 static int is_errata100(struct pt_regs *regs, unsigned long address)
342 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
349 void do_invalid_op(struct pt_regs *, unsigned long);
351 static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
353 #ifdef CONFIG_X86_F00F_BUG
356 * Pentium F0 0F C7 C8 bug workaround.
358 if (boot_cpu_data.f00f_bug) {
359 nr = (address - idt_descr.address) >> 3;
362 do_invalid_op(regs, 0);
370 static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
371 unsigned long address)
374 if (!oops_may_print())
377 #ifdef CONFIG_X86_PAE
378 if (error_code & PF_INSTR) {
380 pte_t *pte = lookup_address(address, &level);
382 if (pte && pte_present(*pte) && !pte_exec(*pte))
383 printk(KERN_CRIT "kernel tried to execute "
384 "NX-protected page - exploit attempt? "
385 "(uid: %d)\n", current->uid);
388 printk(KERN_ALERT "BUG: unable to handle kernel ");
389 if (address < PAGE_SIZE)
390 printk(KERN_CONT "NULL pointer dereference");
392 printk(KERN_CONT "paging request");
393 printk(KERN_CONT " at %08lx\n", address);
395 printk(KERN_ALERT "IP:");
396 printk_address(regs->ip, 1);
397 dump_pagetable(address);
398 #else /* CONFIG_X86_64 */
399 printk(KERN_ALERT "BUG: unable to handle kernel ");
400 if (address < PAGE_SIZE)
401 printk(KERN_CONT "NULL pointer dereference");
403 printk(KERN_CONT "paging request");
404 printk(KERN_CONT " at %016lx\n", address);
406 printk(KERN_ALERT "IP:");
407 printk_address(regs->ip, 1);
408 dump_pagetable(address);
413 static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
414 unsigned long error_code)
416 unsigned long flags = oops_begin();
417 struct task_struct *tsk;
419 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
420 current->comm, address);
421 dump_pagetable(address);
423 tsk->thread.cr2 = address;
424 tsk->thread.trap_no = 14;
425 tsk->thread.error_code = error_code;
426 if (__die("Bad pagetable", regs, error_code))
428 oops_end(flags, regs, SIGKILL);
433 * Handle a fault on the vmalloc or module mapping area
435 * This assumes no large pages in there.
437 static inline int vmalloc_fault(unsigned long address)
440 unsigned long pgd_paddr;
444 * Synchronize this task's top level page-table
445 * with the 'reference' page table.
447 * Do _not_ use "current" here. We might be inside
448 * an interrupt in the middle of a task switch..
450 pgd_paddr = read_cr3();
451 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
454 pte_k = pte_offset_kernel(pmd_k, address);
455 if (!pte_present(*pte_k))
459 pgd_t *pgd, *pgd_ref;
460 pud_t *pud, *pud_ref;
461 pmd_t *pmd, *pmd_ref;
462 pte_t *pte, *pte_ref;
464 /* Copy kernel mappings over when needed. This can also
465 happen within a race in page table update. In the later
468 pgd = pgd_offset(current->mm ?: &init_mm, address);
469 pgd_ref = pgd_offset_k(address);
470 if (pgd_none(*pgd_ref))
473 set_pgd(pgd, *pgd_ref);
475 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
477 /* Below here mismatches are bugs because these lower tables
480 pud = pud_offset(pgd, address);
481 pud_ref = pud_offset(pgd_ref, address);
482 if (pud_none(*pud_ref))
484 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
486 pmd = pmd_offset(pud, address);
487 pmd_ref = pmd_offset(pud_ref, address);
488 if (pmd_none(*pmd_ref))
490 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
492 pte_ref = pte_offset_kernel(pmd_ref, address);
493 if (!pte_present(*pte_ref))
495 pte = pte_offset_kernel(pmd, address);
496 /* Don't use pte_page here, because the mappings can point
497 outside mem_map, and the NUMA hash lookup cannot handle
499 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
505 int show_unhandled_signals = 1;
508 * This routine handles page faults. It determines the address,
509 * and the problem, and then passes it off to one of the appropriate
512 void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
514 struct task_struct *tsk;
515 struct mm_struct *mm;
516 struct vm_area_struct *vma;
517 unsigned long address;
522 * We can fault from pretty much anywhere, with unknown IRQ state.
524 trace_hardirqs_fixup();
528 prefetchw(&mm->mmap_sem);
530 /* get the address */
531 address = read_cr2();
533 si_code = SEGV_MAPERR;
535 if (notify_page_fault(regs))
539 * We fault-in kernel-space virtual memory on-demand. The
540 * 'reference' page table is init_mm.pgd.
542 * NOTE! We MUST NOT take any locks for this case. We may
543 * be in an interrupt or a critical region, and should
544 * only copy the information from the master page table,
547 * This verifies that the fault happens in kernel space
548 * (error_code & 4) == 0, and that the fault was not a
549 * protection error (error_code & 9) == 0.
551 if (unlikely(address >= TASK_SIZE)) {
552 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
553 vmalloc_fault(address) >= 0)
556 * Don't take the mm semaphore here. If we fixup a prefetch
557 * fault we could otherwise deadlock.
559 goto bad_area_nosemaphore;
562 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
563 fault has been handled. */
564 if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
568 * If we're in an interrupt, have no user context or are running in an
569 * atomic region then we must not take the fault.
571 if (in_atomic() || !mm)
572 goto bad_area_nosemaphore;
574 /* When running in the kernel we expect faults to occur only to
575 * addresses in user space. All other faults represent errors in the
576 * kernel and should generate an OOPS. Unfortunately, in the case of an
577 * erroneous fault occurring in a code path which already holds mmap_sem
578 * we will deadlock attempting to validate the fault against the
579 * address space. Luckily the kernel only validly references user
580 * space from well defined areas of code, which are listed in the
583 * As the vast majority of faults will be valid we will only perform
584 * the source reference check when there is a possibility of a deadlock.
585 * Attempt to lock the address space, if we cannot we then validate the
586 * source. If this is invalid we can skip the address space check,
587 * thus avoiding the deadlock.
589 if (!down_read_trylock(&mm->mmap_sem)) {
590 if ((error_code & PF_USER) == 0 &&
591 !search_exception_tables(regs->ip))
592 goto bad_area_nosemaphore;
593 down_read(&mm->mmap_sem);
596 vma = find_vma(mm, address);
599 if (vma->vm_start <= address)
601 if (!(vma->vm_flags & VM_GROWSDOWN))
603 if (error_code & PF_USER) {
605 * Accessing the stack below %sp is always a bug.
606 * The large cushion allows instructions like enter
607 * and pusha to work. ("enter $65535,$31" pushes
608 * 32 pointers and then decrements %sp by 65535.)
610 if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
613 if (expand_stack(vma, address))
616 * Ok, we have a good vm_area for this memory access, so
620 si_code = SEGV_ACCERR;
622 switch (error_code & (PF_PROT|PF_WRITE)) {
623 default: /* 3: write, present */
625 case PF_WRITE: /* write, not present */
626 if (!(vma->vm_flags & VM_WRITE))
630 case PF_PROT: /* read, present */
632 case 0: /* read, not present */
633 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
639 * If for any reason at all we couldn't handle the fault,
640 * make sure we exit gracefully rather than endlessly redo
643 fault = handle_mm_fault(mm, vma, address, write);
644 if (unlikely(fault & VM_FAULT_ERROR)) {
645 if (fault & VM_FAULT_OOM)
647 else if (fault & VM_FAULT_SIGBUS)
651 if (fault & VM_FAULT_MAJOR)
658 * Did it hit the DOS screen memory VA from vm86 mode?
660 if (v8086_mode(regs)) {
661 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
663 tsk->thread.screen_bitmap |= 1 << bit;
666 up_read(&mm->mmap_sem);
670 * Something tried to access memory that isn't in our memory map..
671 * Fix it, but check if it's kernel or user first..
674 up_read(&mm->mmap_sem);
676 bad_area_nosemaphore:
677 /* User mode accesses just cause a SIGSEGV */
678 if (error_code & PF_USER) {
680 * It's possible to have interrupts off here.
685 * Valid to do another page fault here because this one came
688 if (is_prefetch(regs, address, error_code))
691 if (is_errata100(regs, address))
694 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
695 printk_ratelimit()) {
698 "%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
700 "%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
702 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
703 tsk->comm, task_pid_nr(tsk), address, regs->ip,
704 regs->sp, error_code);
705 print_vma_addr(" in ", regs->ip);
708 tsk->thread.cr2 = address;
709 /* Kernel addresses are always protection faults */
710 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
711 tsk->thread.trap_no = 14;
712 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
716 if (is_f00f_bug(regs, address))
720 /* Are we prepared to handle this kernel fault? */
721 if (fixup_exception(regs))
725 * Valid to do another page fault here, because if this fault
726 * had been triggered by is_prefetch fixup_exception would have
729 if (is_prefetch(regs, address, error_code))
732 if (is_errata93(regs, address))
736 * Oops. The kernel tried to access some bad page. We'll have to
737 * terminate things with extreme prejudice.
742 show_fault_oops(regs, error_code, address);
744 tsk->thread.cr2 = address;
745 tsk->thread.trap_no = 14;
746 tsk->thread.error_code = error_code;
747 die("Oops", regs, error_code);
752 * We ran out of memory, or some other thing happened to us that made
753 * us unable to handle the page fault gracefully.
756 up_read(&mm->mmap_sem);
757 if (is_global_init(tsk)) {
759 down_read(&mm->mmap_sem);
762 printk("VM: killing process %s\n", tsk->comm);
763 if (error_code & PF_USER)
764 do_group_exit(SIGKILL);
768 up_read(&mm->mmap_sem);
770 /* Kernel mode? Handle exceptions or die */
771 if (!(error_code & PF_USER))
774 /* User space => ok to do another page fault */
775 if (is_prefetch(regs, address, error_code))
778 tsk->thread.cr2 = address;
779 tsk->thread.error_code = error_code;
780 tsk->thread.trap_no = 14;
781 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
784 void vmalloc_sync_all(void)
788 * Note that races in the updates of insync and start aren't
789 * problematic: insync can only get set bits added, and updates to
790 * start are only improving performance (without affecting correctness
793 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
794 static unsigned long start = TASK_SIZE;
795 unsigned long address;
797 if (SHARED_KERNEL_PMD)
800 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
801 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
802 if (!test_bit(pgd_index(address), insync)) {
806 spin_lock_irqsave(&pgd_lock, flags);
807 for (page = pgd_list; page; page =
808 (struct page *)page->index)
809 if (!vmalloc_sync_one(page_address(page),
811 BUG_ON(page != pgd_list);
814 spin_unlock_irqrestore(&pgd_lock, flags);
816 set_bit(pgd_index(address), insync);
818 if (address == start && test_bit(pgd_index(address), insync))
819 start = address + PGDIR_SIZE;
821 #else /* CONFIG_X86_64 */
823 * Note that races in the updates of insync and start aren't
824 * problematic: insync can only get set bits added, and updates to
825 * start are only improving performance (without affecting correctness
828 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
829 static unsigned long start = VMALLOC_START & PGDIR_MASK;
830 unsigned long address;
832 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
833 if (!test_bit(pgd_index(address), insync)) {
834 const pgd_t *pgd_ref = pgd_offset_k(address);
837 if (pgd_none(*pgd_ref))
839 spin_lock(&pgd_lock);
840 list_for_each_entry(page, &pgd_list, lru) {
842 pgd = (pgd_t *)page_address(page) + pgd_index(address);
844 set_pgd(pgd, *pgd_ref);
846 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
848 spin_unlock(&pgd_lock);
849 set_bit(pgd_index(address), insync);
851 if (address == start)
852 start = address + PGDIR_SIZE;
854 /* Check that there is no need to do the same for the modules area. */
855 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
856 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
857 (__START_KERNEL & PGDIR_MASK)));