1 #ifndef _ASM_GENERIC_PGTABLE_H
2 #define _ASM_GENERIC_PGTABLE_H
7 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
8 extern int ptep_set_access_flags(struct vm_area_struct *vma,
9 unsigned long address, pte_t *ptep,
10 pte_t entry, int dirty);
13 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
14 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
15 unsigned long address, pmd_t *pmdp,
16 pmd_t entry, int dirty);
19 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
20 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
21 unsigned long address,
29 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
34 #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
35 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
36 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
37 unsigned long address,
45 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
48 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
49 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
50 unsigned long address,
56 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
59 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
60 int ptep_clear_flush_young(struct vm_area_struct *vma,
61 unsigned long address, pte_t *ptep);
64 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
65 int pmdp_clear_flush_young(struct vm_area_struct *vma,
66 unsigned long address, pmd_t *pmdp);
69 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
70 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
71 unsigned long address,
75 pte_clear(mm, address, ptep);
80 #ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
81 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
82 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
83 unsigned long address,
87 pmd_clear(mm, address, pmdp);
90 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
93 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
94 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
95 unsigned long address, pte_t *ptep,
99 pte = ptep_get_and_clear(mm, address, ptep);
105 * Some architectures may be able to avoid expensive synchronization
106 * primitives when modifications are made to PTE's which are already
107 * not present, or in the process of an address space destruction.
109 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
110 static inline void pte_clear_not_present_full(struct mm_struct *mm,
111 unsigned long address,
115 pte_clear(mm, address, ptep);
119 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
120 extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
121 unsigned long address,
125 #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
126 extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
127 unsigned long address,
131 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
133 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
135 pte_t old_pte = *ptep;
136 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
140 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
141 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
142 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
143 unsigned long address, pmd_t *pmdp)
145 pmd_t old_pmd = *pmdp;
146 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
148 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
149 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
150 unsigned long address, pmd_t *pmdp)
154 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
157 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
158 extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
159 unsigned long address,
163 #ifndef __HAVE_ARCH_PTE_SAME
164 static inline int pte_same(pte_t pte_a, pte_t pte_b)
166 return pte_val(pte_a) == pte_val(pte_b);
170 #ifndef __HAVE_ARCH_PMD_SAME
171 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
172 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
174 return pmd_val(pmd_a) == pmd_val(pmd_b);
176 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
177 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
182 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
185 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
186 #define page_test_dirty(page) (0)
189 #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
190 #define page_clear_dirty(page, mapped) do { } while (0)
193 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
194 #define pte_maybe_dirty(pte) pte_dirty(pte)
196 #define pte_maybe_dirty(pte) (1)
199 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
200 #define page_test_and_clear_young(page) (0)
203 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
204 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
207 #ifndef __HAVE_ARCH_MOVE_PTE
208 #define move_pte(pte, prot, old_addr, new_addr) (pte)
211 #ifndef flush_tlb_fix_spurious_fault
212 #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
215 #ifndef pgprot_noncached
216 #define pgprot_noncached(prot) (prot)
219 #ifndef pgprot_writecombine
220 #define pgprot_writecombine pgprot_noncached
224 * When walking page tables, get the address of the next boundary,
225 * or the end address of the range if that comes earlier. Although no
226 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
229 #define pgd_addr_end(addr, end) \
230 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
231 (__boundary - 1 < (end) - 1)? __boundary: (end); \
235 #define pud_addr_end(addr, end) \
236 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
237 (__boundary - 1 < (end) - 1)? __boundary: (end); \
242 #define pmd_addr_end(addr, end) \
243 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
244 (__boundary - 1 < (end) - 1)? __boundary: (end); \
249 * When walking page tables, we usually want to skip any p?d_none entries;
250 * and any p?d_bad entries - reporting the error before resetting to none.
251 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
253 void pgd_clear_bad(pgd_t *);
254 void pud_clear_bad(pud_t *);
255 void pmd_clear_bad(pmd_t *);
257 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
261 if (unlikely(pgd_bad(*pgd))) {
268 static inline int pud_none_or_clear_bad(pud_t *pud)
272 if (unlikely(pud_bad(*pud))) {
279 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
283 if (unlikely(pmd_bad(*pmd))) {
290 static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
295 * Get the current pte state, but zero it out to make it
296 * non-present, preventing the hardware from asynchronously
299 return ptep_get_and_clear(mm, addr, ptep);
302 static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
304 pte_t *ptep, pte_t pte)
307 * The pte is non-present, so there's no hardware state to
310 set_pte_at(mm, addr, ptep, pte);
313 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
315 * Start a pte protection read-modify-write transaction, which
316 * protects against asynchronous hardware modifications to the pte.
317 * The intention is not to prevent the hardware from making pte
318 * updates, but to prevent any updates it may make from being lost.
320 * This does not protect against other software modifications of the
321 * pte; the appropriate pte lock must be held over the transation.
323 * Note that this interface is intended to be batchable, meaning that
324 * ptep_modify_prot_commit may not actually update the pte, but merely
325 * queue the update to be done at some later time. The update must be
326 * actually committed before the pte lock is released, however.
328 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
332 return __ptep_modify_prot_start(mm, addr, ptep);
336 * Commit an update to a pte, leaving any hardware-controlled bits in
337 * the PTE unmodified.
339 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
341 pte_t *ptep, pte_t pte)
343 __ptep_modify_prot_commit(mm, addr, ptep, pte);
345 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
346 #endif /* CONFIG_MMU */
349 * A facility to provide lazy MMU batching. This allows PTE updates and
350 * page invalidations to be delayed until a call to leave lazy MMU mode
351 * is issued. Some architectures may benefit from doing this, and it is
352 * beneficial for both shadow and direct mode hypervisors, which may batch
353 * the PTE updates which happen during this window. Note that using this
354 * interface requires that read hazards be removed from the code. A read
355 * hazard could result in the direct mode hypervisor case, since the actual
356 * write to the page tables may not yet have taken place, so reads though
357 * a raw PTE pointer after it has been modified are not guaranteed to be
358 * up to date. This mode can only be entered and left under the protection of
359 * the page table locks for all page tables which may be modified. In the UP
360 * case, this is required so that preemption is disabled, and in the SMP case,
361 * it must synchronize the delayed page table writes properly on other CPUs.
363 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
364 #define arch_enter_lazy_mmu_mode() do {} while (0)
365 #define arch_leave_lazy_mmu_mode() do {} while (0)
366 #define arch_flush_lazy_mmu_mode() do {} while (0)
370 * A facility to provide batching of the reload of page tables and
371 * other process state with the actual context switch code for
372 * paravirtualized guests. By convention, only one of the batched
373 * update (lazy) modes (CPU, MMU) should be active at any given time,
374 * entry should never be nested, and entry and exits should always be
375 * paired. This is for sanity of maintaining and reasoning about the
376 * kernel code. In this case, the exit (end of the context switch) is
377 * in architecture-specific code, and so doesn't need a generic
380 #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
381 #define arch_start_context_switch(prev) do {} while (0)
384 #ifndef __HAVE_PFNMAP_TRACKING
386 * Interface that can be used by architecture code to keep track of
387 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
389 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
390 * for physical range indicated by pfn and size.
392 static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
393 unsigned long pfn, unsigned long size)
399 * Interface that can be used by architecture code to keep track of
400 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
402 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
403 * copied through copy_page_range().
405 static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
411 * Interface that can be used by architecture code to keep track of
412 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
414 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
415 * untrack can be called for a specific region indicated by pfn and size or
416 * can be for the entire vma (in which case size can be zero).
418 static inline void untrack_pfn_vma(struct vm_area_struct *vma,
419 unsigned long pfn, unsigned long size)
423 extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
424 unsigned long pfn, unsigned long size);
425 extern int track_pfn_vma_copy(struct vm_area_struct *vma);
426 extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
430 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
431 static inline int pmd_trans_huge(pmd_t pmd)
435 static inline int pmd_trans_splitting(pmd_t pmd)
439 #ifndef __HAVE_ARCH_PMD_WRITE
440 static inline int pmd_write(pmd_t pmd)
445 #endif /* __HAVE_ARCH_PMD_WRITE */
448 #endif /* !__ASSEMBLY__ */
450 #endif /* _ASM_GENERIC_PGTABLE_H */