1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
4 #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
5 #define FIRST_USER_ADDRESS 0
7 #define _PAGE_BIT_PRESENT 0
9 #define _PAGE_BIT_USER 2
10 #define _PAGE_BIT_PWT 3
11 #define _PAGE_BIT_PCD 4
12 #define _PAGE_BIT_ACCESSED 5
13 #define _PAGE_BIT_DIRTY 6
14 #define _PAGE_BIT_FILE 6
15 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
18 #define _PAGE_BIT_UNUSED2 10
19 #define _PAGE_BIT_UNUSED3 11
20 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
23 * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a
24 * sign-extended value on 32-bit with all 1's in the upper word,
25 * which preserves the upper pte values on 64-bit ptes:
27 #define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT)
28 #define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW)
29 #define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER)
30 #define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT)
31 #define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD)
32 #define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED)
33 #define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY)
34 #define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */
35 #define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */
36 #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
37 #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
38 #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
40 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
41 #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
46 /* If _PAGE_PRESENT is clear, we use these: */
47 #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */
48 #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
49 pte_present gives true */
51 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
52 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
54 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
56 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
57 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
59 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
60 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
61 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
62 #define PAGE_COPY PAGE_COPY_NOEXEC
63 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
64 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
67 #define _PAGE_KERNEL_EXEC \
68 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
69 #define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX)
72 extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
73 #endif /* __ASSEMBLY__ */
75 #define __PAGE_KERNEL_EXEC \
76 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
77 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
80 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
81 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
82 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
83 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
84 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
85 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
86 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
89 # define MAKE_GLOBAL(x) __pgprot((x))
91 # define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
94 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
95 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
96 #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
97 #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX)
98 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
99 #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
100 #define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
101 #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
102 #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
105 #define __P000 PAGE_NONE
106 #define __P001 PAGE_READONLY
107 #define __P010 PAGE_COPY
108 #define __P011 PAGE_COPY
109 #define __P100 PAGE_READONLY_EXEC
110 #define __P101 PAGE_READONLY_EXEC
111 #define __P110 PAGE_COPY_EXEC
112 #define __P111 PAGE_COPY_EXEC
114 #define __S000 PAGE_NONE
115 #define __S001 PAGE_READONLY
116 #define __S010 PAGE_SHARED
117 #define __S011 PAGE_SHARED
118 #define __S100 PAGE_READONLY_EXEC
119 #define __S101 PAGE_READONLY_EXEC
120 #define __S110 PAGE_SHARED_EXEC
121 #define __S111 PAGE_SHARED_EXEC
126 * ZERO_PAGE is a global shared page that is always zero: used
127 * for zero-mapped memory areas etc..
129 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
130 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
134 * The following only work if pte_present() is true.
135 * Undefined behaviour if not..
137 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
138 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
139 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
140 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
141 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
142 static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; }
143 static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); }
145 static inline int pmd_large(pmd_t pte) {
146 return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
147 (_PAGE_PSE|_PAGE_PRESENT);
150 static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_DIRTY); }
151 static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_ACCESSED); }
152 static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_RW); }
153 static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_NX); }
154 static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); }
155 static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); }
156 static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); }
157 static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); }
158 static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_PSE); }
159 static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); }
160 static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_GLOBAL); }
162 extern pteval_t __supported_pte_mask;
164 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
166 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
167 pgprot_val(pgprot)) & __supported_pte_mask);
170 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
172 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
173 pgprot_val(pgprot)) & __supported_pte_mask);
176 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
178 pteval_t val = pte_val(pte);
181 * Chop off the NX bit (if present), and add the NX portion of
182 * the newprot (if present):
184 val &= _PAGE_CHG_MASK & ~_PAGE_NX;
185 val |= pgprot_val(newprot) & __supported_pte_mask;
190 #ifdef CONFIG_PARAVIRT
191 #include <asm/paravirt.h>
192 #else /* !CONFIG_PARAVIRT */
193 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
194 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
196 #define set_pte_present(mm, addr, ptep, pte) \
197 native_set_pte_present(mm, addr, ptep, pte)
198 #define set_pte_atomic(ptep, pte) \
199 native_set_pte_atomic(ptep, pte)
201 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
203 #ifndef __PAGETABLE_PUD_FOLDED
204 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
205 #define pgd_clear(pgd) native_pgd_clear(pgd)
209 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
212 #ifndef __PAGETABLE_PMD_FOLDED
213 #define pud_clear(pud) native_pud_clear(pud)
216 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
217 #define pmd_clear(pmd) native_pmd_clear(pmd)
219 #define pte_update(mm, addr, ptep) do { } while (0)
220 #define pte_update_defer(mm, addr, ptep) do { } while (0)
221 #endif /* CONFIG_PARAVIRT */
223 #endif /* __ASSEMBLY__ */
226 # include "pgtable_32.h"
228 # include "pgtable_64.h"
233 /* local pte updates need not use xchg for locking */
234 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
238 /* Pure native function needs no input for mm, addr */
239 native_pte_clear(NULL, 0, ptep);
243 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
244 pte_t *ptep , pte_t pte)
246 native_set_pte(ptep, pte);
249 #ifndef CONFIG_PARAVIRT
251 * Rules for using pte_update - it must be called after any PTE update which
252 * has not been done using the set_pte / clear_pte interfaces. It is used by
253 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
254 * updates should either be sets, clears, or set_pte_atomic for P->P
255 * transitions, which means this hook should only be called for user PTEs.
256 * This hook implies a P->P protection or access change has taken place, which
257 * requires a subsequent TLB flush. The notification can optionally be delayed
258 * until the TLB flush event by using the pte_update_defer form of the
259 * interface, but care must be taken to assure that the flush happens while
260 * still holding the same page table lock so that the shadow and primary pages
261 * do not become out of sync on SMP.
263 #define pte_update(mm, addr, ptep) do { } while (0)
264 #define pte_update_defer(mm, addr, ptep) do { } while (0)
268 * We only update the dirty/accessed state if we set
269 * the dirty bit by hand in the kernel, since the hardware
270 * will do the accessed bit for us, and we don't want to
271 * race with other CPU's that might be updating the dirty
272 * bit at the same time.
274 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
275 #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
277 int __changed = !pte_same(*(ptep), entry); \
278 if (__changed && dirty) { \
280 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
281 flush_tlb_page(vma, address); \
286 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
287 #define ptep_test_and_clear_young(vma, addr, ptep) ({ \
289 if (pte_young(*(ptep))) \
290 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
293 pte_update((vma)->vm_mm, addr, ptep); \
297 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
298 #define ptep_clear_flush_young(vma, address, ptep) \
301 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
303 flush_tlb_page(vma, address); \
307 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
308 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
310 pte_t pte = native_ptep_get_and_clear(ptep);
311 pte_update(mm, addr, ptep);
315 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
316 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
321 * Full address destruction in progress; paravirt does not
322 * care about updates and native needs no locking
324 pte = native_local_ptep_get_and_clear(ptep);
326 pte = ptep_get_and_clear(mm, addr, ptep);
331 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
332 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
334 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
335 pte_update(mm, addr, ptep);
338 #include <asm-generic/pgtable.h>
339 #endif /* __ASSEMBLY__ */
341 #endif /* _ASM_X86_PGTABLE_H */