3 * Copyright IBM Corp. 1999
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (uweigand@de.ibm.com)
7 * Derived from "arch/i386/mm/fault.c"
8 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/kernel_stat.h>
12 #include <linux/perf_event.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
22 #include <linux/compat.h>
23 #include <linux/smp.h>
24 #include <linux/kdebug.h>
25 #include <linux/init.h>
26 #include <linux/console.h>
27 #include <linux/module.h>
28 #include <linux/hardirq.h>
29 #include <linux/kprobes.h>
30 #include <linux/uaccess.h>
31 #include <linux/hugetlb.h>
32 #include <asm/asm-offsets.h>
33 #include <asm/pgtable.h>
35 #include <asm/mmu_context.h>
36 #include <asm/facility.h>
37 #include "../kernel/entry.h"
40 #define __FAIL_ADDR_MASK 0x7ffff000
41 #define __SUBCODE_MASK 0x0200
42 #define __PF_RES_FIELD 0ULL
43 #else /* CONFIG_64BIT */
44 #define __FAIL_ADDR_MASK -4096L
45 #define __SUBCODE_MASK 0x0600
46 #define __PF_RES_FIELD 0x8000000000000000ULL
47 #endif /* CONFIG_64BIT */
49 #define VM_FAULT_BADCONTEXT 0x010000
50 #define VM_FAULT_BADMAP 0x020000
51 #define VM_FAULT_BADACCESS 0x040000
52 #define VM_FAULT_SIGNAL 0x080000
53 #define VM_FAULT_PFAULT 0x100000
55 static unsigned long store_indication __read_mostly;
58 static int __init fault_init(void)
60 if (test_facility(75))
61 store_indication = 0xc00;
64 early_initcall(fault_init);
67 static inline int notify_page_fault(struct pt_regs *regs)
71 /* kprobe_running() needs smp_processor_id() */
72 if (kprobes_built_in() && !user_mode(regs)) {
74 if (kprobe_running() && kprobe_fault_handler(regs, 14))
83 * Unlock any spinlocks which will prevent us from getting the
86 void bust_spinlocks(int yes)
91 int loglevel_save = console_loglevel;
95 * OK, the message is on the console. Now we call printk()
96 * without oops_in_progress set so that printk will give klogd
97 * a poke. Hold onto your hats...
99 console_loglevel = 15;
101 console_loglevel = loglevel_save;
106 * Returns the address space associated with the fault.
107 * Returns 0 for kernel space and 1 for user space.
109 static inline int user_space_fault(struct pt_regs *regs)
111 unsigned long trans_exc_code;
114 * The lowest two bits of the translation exception
115 * identification indicate which paging table was used.
117 trans_exc_code = regs->int_parm_long & 3;
118 if (trans_exc_code == 3) /* home space -> kernel */
122 if (trans_exc_code == 2) /* secondary space -> set_fs */
123 return current->thread.mm_segment.ar4;
124 if (current->flags & PF_VCPU)
129 static int bad_address(void *p)
133 return probe_kernel_address((unsigned long *)p, dummy);
137 static void dump_pagetable(unsigned long asce, unsigned long address)
139 unsigned long *table = __va(asce & PAGE_MASK);
141 pr_alert("AS:%016lx ", asce);
142 switch (asce & _ASCE_TYPE_MASK) {
143 case _ASCE_TYPE_REGION1:
144 table = table + ((address >> 53) & 0x7ff);
145 if (bad_address(table))
147 pr_cont("R1:%016lx ", *table);
148 if (*table & _REGION_ENTRY_INVALID)
150 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
152 case _ASCE_TYPE_REGION2:
153 table = table + ((address >> 42) & 0x7ff);
154 if (bad_address(table))
156 pr_cont("R2:%016lx ", *table);
157 if (*table & _REGION_ENTRY_INVALID)
159 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
161 case _ASCE_TYPE_REGION3:
162 table = table + ((address >> 31) & 0x7ff);
163 if (bad_address(table))
165 pr_cont("R3:%016lx ", *table);
166 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
168 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
170 case _ASCE_TYPE_SEGMENT:
171 table = table + ((address >> 20) & 0x7ff);
172 if (bad_address(table))
174 pr_cont(KERN_CONT "S:%016lx ", *table);
175 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
177 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
179 table = table + ((address >> 12) & 0xff);
180 if (bad_address(table))
182 pr_cont("P:%016lx ", *table);
190 #else /* CONFIG_64BIT */
192 static void dump_pagetable(unsigned long asce, unsigned long address)
194 unsigned long *table = __va(asce & PAGE_MASK);
196 pr_alert("AS:%08lx ", asce);
197 table = table + ((address >> 20) & 0x7ff);
198 if (bad_address(table))
200 pr_cont("S:%08lx ", *table);
201 if (*table & _SEGMENT_ENTRY_INVALID)
203 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
204 table = table + ((address >> 12) & 0xff);
205 if (bad_address(table))
207 pr_cont("P:%08lx ", *table);
215 #endif /* CONFIG_64BIT */
217 static void dump_fault_info(struct pt_regs *regs)
221 pr_alert("Fault in ");
222 switch (regs->int_parm_long & 3) {
224 pr_cont("home space ");
227 pr_cont("secondary space ");
230 pr_cont("access register ");
233 pr_cont("primary space ");
236 pr_cont("mode while using ");
237 if (!user_space_fault(regs)) {
238 asce = S390_lowcore.kernel_asce;
242 else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
243 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
249 asce = S390_lowcore.user_asce;
253 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
256 static inline void report_user_fault(struct pt_regs *regs, long signr)
258 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
260 if (!unhandled_signal(current, signr))
262 if (!printk_ratelimit())
264 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d",
265 regs->int_code & 0xffff, regs->int_code >> 17);
266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
267 printk(KERN_CONT "\n");
268 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
269 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
270 dump_fault_info(regs);
275 * Send SIGSEGV to task. This is an external routine
276 * to keep the stack usage of do_page_fault small.
278 static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
282 report_user_fault(regs, SIGSEGV);
283 si.si_signo = SIGSEGV;
284 si.si_code = si_code;
285 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
286 force_sig_info(SIGSEGV, &si, current);
289 static noinline void do_no_context(struct pt_regs *regs)
291 const struct exception_table_entry *fixup;
292 unsigned long address;
294 /* Are we prepared to handle this kernel fault? */
295 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
297 regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
302 * Oops. The kernel tried to access some bad page. We'll have to
303 * terminate things with extreme prejudice.
305 address = regs->int_parm_long & __FAIL_ADDR_MASK;
306 if (!user_space_fault(regs))
307 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
308 " in virtual kernel address space\n");
310 printk(KERN_ALERT "Unable to handle kernel paging request"
311 " in virtual user address space\n");
312 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
313 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
314 dump_fault_info(regs);
319 static noinline void do_low_address(struct pt_regs *regs)
321 /* Low-address protection hit in kernel mode means
322 NULL pointer write access in kernel mode. */
323 if (regs->psw.mask & PSW_MASK_PSTATE) {
324 /* Low-address protection hit in user mode 'cannot happen'. */
325 die (regs, "Low-address protection");
332 static noinline void do_sigbus(struct pt_regs *regs)
334 struct task_struct *tsk = current;
338 * Send a sigbus, regardless of whether we were in kernel
341 si.si_signo = SIGBUS;
343 si.si_code = BUS_ADRERR;
344 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
345 force_sig_info(SIGBUS, &si, tsk);
348 static noinline void do_fault_error(struct pt_regs *regs, int fault)
353 case VM_FAULT_BADACCESS:
354 case VM_FAULT_BADMAP:
355 /* Bad memory access. Check if it is kernel or user space. */
356 if (user_mode(regs)) {
357 /* User mode accesses just cause a SIGSEGV */
358 si_code = (fault == VM_FAULT_BADMAP) ?
359 SEGV_MAPERR : SEGV_ACCERR;
360 do_sigsegv(regs, si_code);
363 case VM_FAULT_BADCONTEXT:
364 case VM_FAULT_PFAULT:
367 case VM_FAULT_SIGNAL:
368 if (!user_mode(regs))
371 default: /* fault & VM_FAULT_ERROR */
372 if (fault & VM_FAULT_OOM) {
373 if (!user_mode(regs))
376 pagefault_out_of_memory();
377 } else if (fault & VM_FAULT_SIGSEGV) {
378 /* Kernel mode? Handle exceptions or die */
379 if (!user_mode(regs))
382 do_sigsegv(regs, SEGV_MAPERR);
383 } else if (fault & VM_FAULT_SIGBUS) {
384 /* Kernel mode? Handle exceptions or die */
385 if (!user_mode(regs))
396 * This routine handles page faults. It determines the address,
397 * and the problem, and then passes it off to one of the appropriate
400 * interruption code (int_code):
401 * 04 Protection -> Write-Protection (suprression)
402 * 10 Segment translation -> Not present (nullification)
403 * 11 Page translation -> Not present (nullification)
404 * 3b Region third trans. -> Not present (nullification)
406 static inline int do_exception(struct pt_regs *regs, int access)
411 struct task_struct *tsk;
412 struct mm_struct *mm;
413 struct vm_area_struct *vma;
414 unsigned long trans_exc_code;
415 unsigned long address;
421 * The instruction that caused the program check has
422 * been nullified. Don't signal single step via SIGTRAP.
424 clear_pt_regs_flag(regs, PIF_PER_TRAP);
426 if (notify_page_fault(regs))
430 trans_exc_code = regs->int_parm_long;
433 * Verify that the fault happened in user space, that
434 * we are not in an interrupt and that there is a
437 fault = VM_FAULT_BADCONTEXT;
438 if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
441 address = trans_exc_code & __FAIL_ADDR_MASK;
442 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
443 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
445 flags |= FAULT_FLAG_USER;
446 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
447 flags |= FAULT_FLAG_WRITE;
448 down_read(&mm->mmap_sem);
451 gmap = (current->flags & PF_VCPU) ?
452 (struct gmap *) S390_lowcore.gmap : NULL;
454 current->thread.gmap_addr = address;
455 address = __gmap_translate(gmap, address);
456 if (address == -EFAULT) {
457 fault = VM_FAULT_BADMAP;
460 if (gmap->pfault_enabled)
461 flags |= FAULT_FLAG_RETRY_NOWAIT;
466 fault = VM_FAULT_BADMAP;
467 vma = find_vma(mm, address);
471 if (unlikely(vma->vm_start > address)) {
472 if (!(vma->vm_flags & VM_GROWSDOWN))
474 if (expand_stack(vma, address))
479 * Ok, we have a good vm_area for this memory access, so
482 fault = VM_FAULT_BADACCESS;
483 if (unlikely(!(vma->vm_flags & access)))
486 if (is_vm_hugetlb_page(vma))
487 address &= HPAGE_MASK;
489 * If for any reason at all we couldn't handle the fault,
490 * make sure we exit gracefully rather than endlessly redo
493 fault = handle_mm_fault(mm, vma, address, flags);
494 /* No reason to continue if interrupted by SIGKILL. */
495 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
496 fault = VM_FAULT_SIGNAL;
499 if (unlikely(fault & VM_FAULT_ERROR))
503 * Major/minor page fault accounting is only done on the
504 * initial attempt. If we go through a retry, it is extremely
505 * likely that the page will be found in page cache at that point.
507 if (flags & FAULT_FLAG_ALLOW_RETRY) {
508 if (fault & VM_FAULT_MAJOR) {
510 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
514 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
517 if (fault & VM_FAULT_RETRY) {
519 if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
520 /* FAULT_FLAG_RETRY_NOWAIT has been set,
521 * mmap_sem has not been released */
522 current->thread.gmap_pfault = 1;
523 fault = VM_FAULT_PFAULT;
527 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
529 flags &= ~(FAULT_FLAG_ALLOW_RETRY |
530 FAULT_FLAG_RETRY_NOWAIT);
531 flags |= FAULT_FLAG_TRIED;
532 down_read(&mm->mmap_sem);
538 address = __gmap_link(gmap, current->thread.gmap_addr,
540 if (address == -EFAULT) {
541 fault = VM_FAULT_BADMAP;
544 if (address == -ENOMEM) {
545 fault = VM_FAULT_OOM;
552 up_read(&mm->mmap_sem);
557 void do_protection_exception(struct pt_regs *regs)
559 unsigned long trans_exc_code;
562 trans_exc_code = regs->int_parm_long;
564 * Protection exceptions are suppressing, decrement psw address.
565 * The exception to this rule are aborted transactions, for these
566 * the PSW already points to the correct location.
568 if (!(regs->int_code & 0x200))
569 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
571 * Check for low-address protection. This needs to be treated
572 * as a special case because the translation exception code
573 * field is not guaranteed to contain valid data in this case.
575 if (unlikely(!(trans_exc_code & 4))) {
576 do_low_address(regs);
579 fault = do_exception(regs, VM_WRITE);
581 do_fault_error(regs, fault);
583 NOKPROBE_SYMBOL(do_protection_exception);
585 void do_dat_exception(struct pt_regs *regs)
589 access = VM_READ | VM_EXEC | VM_WRITE;
590 fault = do_exception(regs, access);
592 do_fault_error(regs, fault);
594 NOKPROBE_SYMBOL(do_dat_exception);
598 * 'pfault' pseudo page faults routines.
600 static int pfault_disable;
602 static int __init nopfault(char *str)
608 __setup("nopfault", nopfault);
610 struct pfault_refbk {
619 } __attribute__ ((packed, aligned(8)));
621 int pfault_init(void)
623 struct pfault_refbk refbk = {
628 .refgaddr = __LC_CURRENT_PID,
629 .refselmk = 1ULL << 48,
630 .refcmpmk = 1ULL << 48,
631 .reserved = __PF_RES_FIELD };
637 " diag %1,%0,0x258\n"
642 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
646 void pfault_fini(void)
648 struct pfault_refbk refbk = {
661 : : "a" (&refbk), "m" (refbk) : "cc");
664 static DEFINE_SPINLOCK(pfault_lock);
665 static LIST_HEAD(pfault_list);
667 static void pfault_interrupt(struct ext_code ext_code,
668 unsigned int param32, unsigned long param64)
670 struct task_struct *tsk;
675 * Get the external interruption subcode & pfault
676 * initial/completion signal bit. VM stores this
677 * in the 'cpu address' field associated with the
678 * external interrupt.
680 subcode = ext_code.subcode;
681 if ((subcode & 0xff00) != __SUBCODE_MASK)
683 inc_irq_stat(IRQEXT_PFL);
684 /* Get the token (= pid of the affected task). */
685 pid = sizeof(void *) == 4 ? param32 : param64;
687 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
689 get_task_struct(tsk);
693 spin_lock(&pfault_lock);
694 if (subcode & 0x0080) {
695 /* signal bit is set -> a page has been swapped in by VM */
696 if (tsk->thread.pfault_wait == 1) {
697 /* Initial interrupt was faster than the completion
698 * interrupt. pfault_wait is valid. Set pfault_wait
699 * back to zero and wake up the process. This can
700 * safely be done because the task is still sleeping
701 * and can't produce new pfaults. */
702 tsk->thread.pfault_wait = 0;
703 list_del(&tsk->thread.list);
704 wake_up_process(tsk);
705 put_task_struct(tsk);
707 /* Completion interrupt was faster than initial
708 * interrupt. Set pfault_wait to -1 so the initial
709 * interrupt doesn't put the task to sleep.
710 * If the task is not running, ignore the completion
711 * interrupt since it must be a leftover of a PFAULT
712 * CANCEL operation which didn't remove all pending
713 * completion interrupts. */
714 if (tsk->state == TASK_RUNNING)
715 tsk->thread.pfault_wait = -1;
718 /* signal bit not set -> a real page is missing. */
719 if (WARN_ON_ONCE(tsk != current))
721 if (tsk->thread.pfault_wait == 1) {
722 /* Already on the list with a reference: put to sleep */
723 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
724 set_tsk_need_resched(tsk);
725 } else if (tsk->thread.pfault_wait == -1) {
726 /* Completion interrupt was faster than the initial
727 * interrupt (pfault_wait == -1). Set pfault_wait
728 * back to zero and exit. */
729 tsk->thread.pfault_wait = 0;
731 /* Initial interrupt arrived before completion
732 * interrupt. Let the task sleep.
733 * An extra task reference is needed since a different
734 * cpu may set the task state to TASK_RUNNING again
735 * before the scheduler is reached. */
736 get_task_struct(tsk);
737 tsk->thread.pfault_wait = 1;
738 list_add(&tsk->thread.list, &pfault_list);
739 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
740 set_tsk_need_resched(tsk);
744 spin_unlock(&pfault_lock);
745 put_task_struct(tsk);
748 static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
751 struct thread_struct *thread, *next;
752 struct task_struct *tsk;
754 switch (action & ~CPU_TASKS_FROZEN) {
756 spin_lock_irq(&pfault_lock);
757 list_for_each_entry_safe(thread, next, &pfault_list, list) {
758 thread->pfault_wait = 0;
759 list_del(&thread->list);
760 tsk = container_of(thread, struct task_struct, thread);
761 wake_up_process(tsk);
762 put_task_struct(tsk);
764 spin_unlock_irq(&pfault_lock);
772 static int __init pfault_irq_init(void)
776 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
779 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
782 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
783 hotcpu_notifier(pfault_cpu_notify, 0);
787 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
792 early_initcall(pfault_irq_init);
794 #endif /* CONFIG_PFAULT */