1 #ifndef _ASM_GENERIC_PGTABLE_H
2 #define _ASM_GENERIC_PGTABLE_H
7 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
8 extern int ptep_set_access_flags(struct vm_area_struct *vma,
9 unsigned long address, pte_t *ptep,
10 pte_t entry, int dirty);
13 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
14 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
15 unsigned long address, pmd_t *pmdp,
16 pmd_t entry, int dirty);
19 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
20 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
21 unsigned long address,
29 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
34 #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
35 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
36 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
37 unsigned long address,
45 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
48 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
49 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
50 unsigned long address,
56 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
59 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
60 int ptep_clear_flush_young(struct vm_area_struct *vma,
61 unsigned long address, pte_t *ptep);
64 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
65 int pmdp_clear_flush_young(struct vm_area_struct *vma,
66 unsigned long address, pmd_t *pmdp);
69 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
70 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
71 unsigned long address,
75 pte_clear(mm, address, ptep);
80 #ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
81 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
82 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
83 unsigned long address,
87 pmd_clear(mm, address, pmdp);
90 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
91 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
92 unsigned long address,
98 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
101 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
102 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
103 unsigned long address, pte_t *ptep,
107 pte = ptep_get_and_clear(mm, address, ptep);
113 * Some architectures may be able to avoid expensive synchronization
114 * primitives when modifications are made to PTE's which are already
115 * not present, or in the process of an address space destruction.
117 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
118 static inline void pte_clear_not_present_full(struct mm_struct *mm,
119 unsigned long address,
123 pte_clear(mm, address, ptep);
127 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
128 extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
129 unsigned long address,
133 #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
134 extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
135 unsigned long address,
139 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
141 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
143 pte_t old_pte = *ptep;
144 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
148 #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
149 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
150 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
151 unsigned long address, pmd_t *pmdp)
153 pmd_t old_pmd = *pmdp;
154 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
156 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
157 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
158 unsigned long address, pmd_t *pmdp)
162 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
165 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
166 extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
167 unsigned long address,
171 #ifndef __HAVE_ARCH_PTE_SAME
172 static inline int pte_same(pte_t pte_a, pte_t pte_b)
174 return pte_val(pte_a) == pte_val(pte_b);
178 #ifndef __HAVE_ARCH_PMD_SAME
179 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
180 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
182 return pmd_val(pmd_a) == pmd_val(pmd_b);
184 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
185 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
190 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
193 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
194 #define page_test_dirty(page) (0)
197 #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
198 #define page_clear_dirty(page, mapped) do { } while (0)
201 #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
202 #define pte_maybe_dirty(pte) pte_dirty(pte)
204 #define pte_maybe_dirty(pte) (1)
207 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
208 #define page_test_and_clear_young(page) (0)
211 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
212 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
215 #ifndef __HAVE_ARCH_MOVE_PTE
216 #define move_pte(pte, prot, old_addr, new_addr) (pte)
219 #ifndef flush_tlb_fix_spurious_fault
220 #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
223 #ifndef pgprot_noncached
224 #define pgprot_noncached(prot) (prot)
227 #ifndef pgprot_writecombine
228 #define pgprot_writecombine pgprot_noncached
232 * When walking page tables, get the address of the next boundary,
233 * or the end address of the range if that comes earlier. Although no
234 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
237 #define pgd_addr_end(addr, end) \
238 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
239 (__boundary - 1 < (end) - 1)? __boundary: (end); \
243 #define pud_addr_end(addr, end) \
244 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
245 (__boundary - 1 < (end) - 1)? __boundary: (end); \
250 #define pmd_addr_end(addr, end) \
251 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
252 (__boundary - 1 < (end) - 1)? __boundary: (end); \
257 * When walking page tables, we usually want to skip any p?d_none entries;
258 * and any p?d_bad entries - reporting the error before resetting to none.
259 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
261 void pgd_clear_bad(pgd_t *);
262 void pud_clear_bad(pud_t *);
263 void pmd_clear_bad(pmd_t *);
265 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
269 if (unlikely(pgd_bad(*pgd))) {
276 static inline int pud_none_or_clear_bad(pud_t *pud)
280 if (unlikely(pud_bad(*pud))) {
287 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
291 if (unlikely(pmd_bad(*pmd))) {
298 static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
303 * Get the current pte state, but zero it out to make it
304 * non-present, preventing the hardware from asynchronously
307 return ptep_get_and_clear(mm, addr, ptep);
310 static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
312 pte_t *ptep, pte_t pte)
315 * The pte is non-present, so there's no hardware state to
318 set_pte_at(mm, addr, ptep, pte);
321 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
323 * Start a pte protection read-modify-write transaction, which
324 * protects against asynchronous hardware modifications to the pte.
325 * The intention is not to prevent the hardware from making pte
326 * updates, but to prevent any updates it may make from being lost.
328 * This does not protect against other software modifications of the
329 * pte; the appropriate pte lock must be held over the transation.
331 * Note that this interface is intended to be batchable, meaning that
332 * ptep_modify_prot_commit may not actually update the pte, but merely
333 * queue the update to be done at some later time. The update must be
334 * actually committed before the pte lock is released, however.
336 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
340 return __ptep_modify_prot_start(mm, addr, ptep);
344 * Commit an update to a pte, leaving any hardware-controlled bits in
345 * the PTE unmodified.
347 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
349 pte_t *ptep, pte_t pte)
351 __ptep_modify_prot_commit(mm, addr, ptep, pte);
353 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
354 #endif /* CONFIG_MMU */
357 * A facility to provide lazy MMU batching. This allows PTE updates and
358 * page invalidations to be delayed until a call to leave lazy MMU mode
359 * is issued. Some architectures may benefit from doing this, and it is
360 * beneficial for both shadow and direct mode hypervisors, which may batch
361 * the PTE updates which happen during this window. Note that using this
362 * interface requires that read hazards be removed from the code. A read
363 * hazard could result in the direct mode hypervisor case, since the actual
364 * write to the page tables may not yet have taken place, so reads though
365 * a raw PTE pointer after it has been modified are not guaranteed to be
366 * up to date. This mode can only be entered and left under the protection of
367 * the page table locks for all page tables which may be modified. In the UP
368 * case, this is required so that preemption is disabled, and in the SMP case,
369 * it must synchronize the delayed page table writes properly on other CPUs.
371 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
372 #define arch_enter_lazy_mmu_mode() do {} while (0)
373 #define arch_leave_lazy_mmu_mode() do {} while (0)
374 #define arch_flush_lazy_mmu_mode() do {} while (0)
378 * A facility to provide batching of the reload of page tables and
379 * other process state with the actual context switch code for
380 * paravirtualized guests. By convention, only one of the batched
381 * update (lazy) modes (CPU, MMU) should be active at any given time,
382 * entry should never be nested, and entry and exits should always be
383 * paired. This is for sanity of maintaining and reasoning about the
384 * kernel code. In this case, the exit (end of the context switch) is
385 * in architecture-specific code, and so doesn't need a generic
388 #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
389 #define arch_start_context_switch(prev) do {} while (0)
392 #ifndef __HAVE_PFNMAP_TRACKING
394 * Interface that can be used by architecture code to keep track of
395 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
397 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
398 * for physical range indicated by pfn and size.
400 static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
401 unsigned long pfn, unsigned long size)
407 * Interface that can be used by architecture code to keep track of
408 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
410 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
411 * copied through copy_page_range().
413 static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
419 * Interface that can be used by architecture code to keep track of
420 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
422 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
423 * untrack can be called for a specific region indicated by pfn and size or
424 * can be for the entire vma (in which case size can be zero).
426 static inline void untrack_pfn_vma(struct vm_area_struct *vma,
427 unsigned long pfn, unsigned long size)
431 extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
432 unsigned long pfn, unsigned long size);
433 extern int track_pfn_vma_copy(struct vm_area_struct *vma);
434 extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
438 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
439 static inline int pmd_trans_huge(pmd_t pmd)
443 static inline int pmd_trans_splitting(pmd_t pmd)
447 #ifndef __HAVE_ARCH_PMD_WRITE
448 static inline int pmd_write(pmd_t pmd)
453 #endif /* __HAVE_ARCH_PMD_WRITE */
456 #endif /* !__ASSEMBLY__ */
458 #endif /* _ASM_GENERIC_PGTABLE_H */