1 /* include/asm-generic/tlb.h
3 * Generic TLB shootdown code
5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 #ifndef _ASM_GENERIC__TLB_H
14 #define _ASM_GENERIC__TLB_H
16 #include <linux/swap.h>
17 #include <linux/quicklist.h>
18 #include <asm/pgalloc.h>
19 #include <asm/tlbflush.h>
22 * For UP we don't need to worry about TLB flush
23 * and page free order so much..
26 #ifdef ARCH_FREE_PTR_NR
27 #define FREE_PTR_NR ARCH_FREE_PTR_NR
29 #define FREE_PTE_NR 506
31 #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
34 #define tlb_fast_mode(tlb) 1
37 /* struct mmu_gather is an opaque type used by the mm code for passing around
38 * any data needed by arch specific code for tlb_remove_page.
42 unsigned int nr; /* set to ~0U means fast mode */
43 unsigned int need_flush;/* Really unmapped some ptes? */
44 unsigned int fullmm; /* non-zero means full mm flush */
45 struct page * pages[FREE_PTE_NR];
48 /* Users of the generic TLB shootdown code must declare this storage space. */
49 DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
52 * Return a pointer to an initialized struct mmu_gather.
54 static inline struct mmu_gather *
55 tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
57 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
61 /* Use fast mode if only one CPU is online */
62 tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
64 tlb->fullmm = full_mm_flush;
70 tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
76 if (!tlb_fast_mode(tlb)) {
77 free_pages_and_swap_cache(tlb->pages, tlb->nr);
83 * Called at the end of the shootdown operation to free up any resources
87 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
89 #ifdef CONFIG_QUICKLIST
90 tlb->need_flush += &__get_cpu_var(quicklist)[0].nr_pages != 0;
92 tlb_flush_mmu(tlb, start, end);
94 /* keep the page table cache within bounds */
97 put_cpu_var(mmu_gathers);
101 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
102 * handling the additional races in SMP caused by other CPUs caching valid
103 * mappings in their TLBs.
105 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
108 if (tlb_fast_mode(tlb)) {
109 free_page_and_swap_cache(page);
112 tlb->pages[tlb->nr++] = page;
113 if (tlb->nr >= FREE_PTE_NR)
114 tlb_flush_mmu(tlb, 0, 0);
118 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
120 * Record the fact that pte's were really umapped in ->need_flush, so we can
121 * later optimise away the tlb invalidate. This helps when userspace is
122 * unmapping already-unmapped pages, which happens quite a lot.
124 #define tlb_remove_tlb_entry(tlb, ptep, address) \
126 tlb->need_flush = 1; \
127 __tlb_remove_tlb_entry(tlb, ptep, address); \
130 #define pte_free_tlb(tlb, ptep) \
132 tlb->need_flush = 1; \
133 __pte_free_tlb(tlb, ptep); \
136 #ifndef __ARCH_HAS_4LEVEL_HACK
137 #define pud_free_tlb(tlb, pudp) \
139 tlb->need_flush = 1; \
140 __pud_free_tlb(tlb, pudp); \
144 #define pmd_free_tlb(tlb, pmdp) \
146 tlb->need_flush = 1; \
147 __pmd_free_tlb(tlb, pmdp); \
150 #define tlb_migrate_finish(mm) do {} while (0)
152 #endif /* _ASM_GENERIC__TLB_H */