2 * arch/sh/mm/tlb-flush_64.c
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
6 * Copyright (C) 2003 - 2009 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/signal.h>
13 #include <linux/rwsem.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
22 #include <linux/smp.h>
23 #include <linux/perf_event.h>
24 #include <linux/interrupt.h>
27 #include <asm/uaccess.h>
28 #include <asm/pgalloc.h>
29 #include <asm/mmu_context.h>
31 extern void die(const char *,struct pt_regs *,long);
33 #define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
34 #define PPROT(flag) PFLAG(pgprot_val(prot),flag)
36 static inline void print_prots(pgprot_t prot)
38 printk("prot is 0x%016llx\n",pgprot_val(prot));
40 printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
41 PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
44 static inline void print_vma(struct vm_area_struct *vma)
46 printk("vma start 0x%08lx\n", vma->vm_start);
47 printk("vma end 0x%08lx\n", vma->vm_end);
49 print_prots(vma->vm_page_prot);
50 printk("vm_flags 0x%08lx\n", vma->vm_flags);
53 static inline void print_task(struct task_struct *tsk)
55 printk("Task pid %d\n", task_pid_nr(tsk));
58 static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
66 dir = pgd_offset(mm, address);
70 pud = pud_offset(dir, address);
74 pmd = pmd_offset(pud, address);
78 pte = pte_offset_kernel(pmd, address);
80 if (pte_none(entry) || !pte_present(entry))
87 * This routine handles page faults. It determines the address,
88 * and the problem, and then passes it off to one of the appropriate
91 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
92 unsigned long textaccess, unsigned long address)
94 struct task_struct *tsk;
96 struct vm_area_struct * vma;
97 const struct exception_table_entry *fixup;
102 * Note this is now called with interrupts still disabled
103 * This is to cope with being called for a missing IO port
104 * address with interrupts disabled. This should be fixed as
105 * soon as we have a better 'fast path' miss handler.
107 * Plus take care how you try and debug this stuff.
108 * For example, writing debug data to a port which you
109 * have just faulted on is not going to work.
115 /* Not an IO address, so reenable interrupts */
118 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
121 * If we're in an interrupt or have no user
122 * context, we must not take the fault..
124 if (in_atomic() || !mm)
127 /* TLB misses upon some cache flushes get done under cli() */
128 down_read(&mm->mmap_sem);
130 vma = find_vma(mm, address);
135 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
137 address,regs->pc,textaccess,writeaccess);
142 if (vma->vm_start <= address) {
146 if (!(vma->vm_flags & VM_GROWSDOWN)) {
149 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
151 address,regs->pc,textaccess,writeaccess);
158 if (expand_stack(vma, address)) {
161 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
163 address,regs->pc,textaccess,writeaccess);
169 * Ok, we have a good vm_area for this memory access, so
174 if (!(vma->vm_flags & VM_EXEC))
178 if (!(vma->vm_flags & VM_WRITE))
181 if (!(vma->vm_flags & VM_READ))
187 * If for any reason at all we couldn't handle the fault,
188 * make sure we exit gracefully rather than endlessly redo
191 fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
192 if (unlikely(fault & VM_FAULT_ERROR)) {
193 if (fault & VM_FAULT_OOM)
195 else if (fault & VM_FAULT_SIGBUS)
200 if (fault & VM_FAULT_MAJOR) {
202 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
206 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
210 /* If we get here, the page fault has been handled. Do the TLB refill
211 now from the newly-setup PTE, to avoid having to fault again right
212 away on the same instruction. */
213 pte = lookup_pte (mm, address);
215 /* From empirical evidence, we can get here, due to
216 !pte_present(pte). (e.g. if a swap-in occurs, and the page
217 is swapped back out again before the process that wanted it
218 gets rescheduled?) */
222 __do_tlb_refill(address, textaccess, pte);
226 up_read(&mm->mmap_sem);
230 * Something tried to access memory that isn't in our memory map..
231 * Fix it, but check if it's kernel or user first..
235 printk("fault:bad area\n");
237 up_read(&mm->mmap_sem);
239 if (user_mode(regs)) {
243 /* This is really to help debug faults when starting
244 * usermode, so only need a few */
246 printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
247 address, task_pid_nr(current), current->comm,
248 (unsigned long) regs->pc);
253 if (is_global_init(tsk)) {
254 panic("INIT had user mode bad_area\n");
256 tsk->thread.address = address;
257 tsk->thread.error_code = writeaccess;
258 info.si_signo = SIGSEGV;
260 info.si_addr = (void *) address;
261 force_sig_info(SIGSEGV, &info, tsk);
267 printk("fault:No context\n");
269 /* Are we prepared to handle this kernel fault? */
270 fixup = search_exception_tables(regs->pc);
272 regs->pc = fixup->fixup;
277 * Oops. The kernel tried to access some bad page. We'll have to
278 * terminate things with extreme prejudice.
281 if (address < PAGE_SIZE)
282 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
284 printk(KERN_ALERT "Unable to handle kernel paging request");
285 printk(" at virtual address %08lx\n", address);
286 printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
287 die("Oops", regs, writeaccess);
291 * We ran out of memory, or some other thing happened to us that made
292 * us unable to handle the page fault gracefully.
295 up_read(&mm->mmap_sem);
296 if (!user_mode(regs))
298 pagefault_out_of_memory();
302 printk("fault:Do sigbus\n");
303 up_read(&mm->mmap_sem);
306 * Send a sigbus, regardless of whether we were in kernel
309 tsk->thread.address = address;
310 tsk->thread.error_code = writeaccess;
311 tsk->thread.trap_no = 14;
312 force_sig(SIGBUS, tsk);
314 /* Kernel mode? Handle exceptions or die */
315 if (!user_mode(regs))
319 void local_flush_tlb_one(unsigned long asid, unsigned long page)
321 unsigned long long match, pteh=0, lpage;
325 * Sign-extend based on neff.
327 lpage = neff_sign_extend(page);
328 match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
331 for_each_itlb_entry(tlb) {
332 asm volatile ("getcfg %1, 0, %0"
337 __flush_tlb_slot(tlb);
342 for_each_dtlb_entry(tlb) {
343 asm volatile ("getcfg %1, 0, %0"
348 __flush_tlb_slot(tlb);
355 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
361 local_irq_save(flags);
362 local_flush_tlb_one(get_asid(), page);
363 local_irq_restore(flags);
367 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
371 unsigned long long match, pteh=0, pteh_epn, pteh_low;
373 unsigned int cpu = smp_processor_id();
374 struct mm_struct *mm;
377 if (cpu_context(cpu, mm) == NO_CONTEXT)
380 local_irq_save(flags);
385 match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
388 for_each_itlb_entry(tlb) {
389 asm volatile ("getcfg %1, 0, %0"
393 pteh_epn = pteh & PAGE_MASK;
394 pteh_low = pteh & ~PAGE_MASK;
396 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
397 __flush_tlb_slot(tlb);
401 for_each_dtlb_entry(tlb) {
402 asm volatile ("getcfg %1, 0, %0"
406 pteh_epn = pteh & PAGE_MASK;
407 pteh_low = pteh & ~PAGE_MASK;
409 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
410 __flush_tlb_slot(tlb);
413 local_irq_restore(flags);
416 void local_flush_tlb_mm(struct mm_struct *mm)
419 unsigned int cpu = smp_processor_id();
421 if (cpu_context(cpu, mm) == NO_CONTEXT)
424 local_irq_save(flags);
426 cpu_context(cpu, mm) = NO_CONTEXT;
427 if (mm == current->mm)
428 activate_context(mm, cpu);
430 local_irq_restore(flags);
433 void local_flush_tlb_all(void)
435 /* Invalidate all, including shared pages, excluding fixed TLBs */
436 unsigned long flags, tlb;
438 local_irq_save(flags);
440 /* Flush each ITLB entry */
441 for_each_itlb_entry(tlb)
442 __flush_tlb_slot(tlb);
444 /* Flush each DTLB entry */
445 for_each_dtlb_entry(tlb)
446 __flush_tlb_slot(tlb);
448 local_irq_restore(flags);
451 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
453 /* FIXME: Optimize this later.. */
457 void __flush_tlb_global(void)
462 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)