2 * Macros and functions to manipulate Meta page tables.
5 #ifndef _METAG_PGTABLE_H
6 #define _METAG_PGTABLE_H
8 #include <asm/pgtable-bits.h>
9 #define __ARCH_USE_5LEVEL_HACK
10 #include <asm-generic/pgtable-nopmd.h>
12 /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
13 #if PAGE_OFFSET >= LINGLOBAL_BASE
14 #define CONSISTENT_START 0xF7000000
15 #define CONSISTENT_END 0xF73FFFFF
16 #define VMALLOC_START 0xF8000000
17 #define VMALLOC_END 0xFFFEFFFF
19 #define CONSISTENT_START 0x77000000
20 #define CONSISTENT_END 0x773FFFFF
21 #define VMALLOC_START 0x78000000
22 #define VMALLOC_END 0x7FFFFFFF
26 * The Linux memory management assumes a three-level page table setup. On
27 * Meta, we use that, but "fold" the mid level into the top-level page
31 /* PGDIR_SHIFT determines the size of the area a second-level page table can
32 * map. This is always 4MB.
35 #define PGDIR_SHIFT 22
36 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
37 #define PGDIR_MASK (~(PGDIR_SIZE-1))
40 * Entries per page directory level: we use a two-level, so
41 * we don't really have any PMD directory physically. First level tables
42 * always map 2Gb (local or global) at a granularity of 4MB, second-level
43 * tables map 4MB with a granularity between 4MB and 4kB (between 1 and
46 #define PTRS_PER_PTE (PGDIR_SIZE/PAGE_SIZE)
47 #define HPTRS_PER_PTE (PGDIR_SIZE/HPAGE_SIZE)
48 #define PTRS_PER_PGD 512
50 #define USER_PTRS_PER_PGD 256
51 #define FIRST_USER_ADDRESS META_MEMORY_BASE
52 #define FIRST_USER_PGD_NR pgd_index(FIRST_USER_ADDRESS)
54 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
57 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
58 _PAGE_ACCESSED | _PAGE_CACHEABLE)
59 #define PAGE_SHARED_C PAGE_SHARED
60 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
62 #define PAGE_COPY_C PAGE_COPY
64 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
66 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
67 _PAGE_ACCESSED | _PAGE_WRITE | \
68 _PAGE_CACHEABLE | _PAGE_KERNEL)
70 #define __P000 PAGE_NONE
71 #define __P001 PAGE_READONLY
72 #define __P010 PAGE_COPY
73 #define __P011 PAGE_COPY
74 #define __P100 PAGE_READONLY
75 #define __P101 PAGE_READONLY
76 #define __P110 PAGE_COPY_C
77 #define __P111 PAGE_COPY_C
79 #define __S000 PAGE_NONE
80 #define __S001 PAGE_READONLY
81 #define __S010 PAGE_SHARED
82 #define __S011 PAGE_SHARED
83 #define __S100 PAGE_READONLY
84 #define __S101 PAGE_READONLY
85 #define __S110 PAGE_SHARED_C
86 #define __S111 PAGE_SHARED_C
92 /* zero page used for uninitialized stuff */
93 extern unsigned long empty_zero_page;
94 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
96 /* Certain architectures need to do special things when pte's
97 * within a page table are directly modified. Thus, the following
98 * hook is made available.
100 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
101 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
103 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
105 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
107 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
109 #define pte_none(x) (!pte_val(x))
110 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
111 #define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0)
113 #define pmd_none(x) (!pmd_val(x))
114 #define pmd_bad(x) ((pmd_val(x) & ~(PAGE_MASK | _PAGE_SZ_MASK)) \
115 != (_PAGE_TABLE & ~_PAGE_SZ_MASK))
116 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
117 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
119 #define pte_page(x) pfn_to_page(pte_pfn(x))
122 * The following only work if pte_present() is true.
123 * Undefined behaviour if not..
126 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
127 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
128 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
129 static inline int pte_special(pte_t pte) { return 0; }
131 static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
132 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
133 static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
134 static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
135 static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
136 static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
137 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
138 static inline pte_t pte_mkhuge(pte_t pte) { return pte; }
141 * Macro and implementation to make a page protection as uncacheable.
143 #define pgprot_writecombine(prot) \
144 __pgprot(pgprot_val(prot) & ~(_PAGE_CACHE_CTRL1 | _PAGE_CACHE_CTRL0))
146 #define pgprot_noncached(prot) \
147 __pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)
151 * Conversion functions: convert a page and protection to a page entry,
152 * and a page entry and page directory to the page they refer to.
155 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
157 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
159 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
163 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
165 unsigned long paddr = pmd_val(pmd) & PAGE_MASK;
168 return (unsigned long)__va(paddr);
171 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
172 #define pmd_page_shift(pmd) (12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \
174 #define pmd_num_ptrs(pmd) (PGDIR_SIZE >> pmd_page_shift(pmd))
177 * Each pgd is only 2k, mapping 2Gb (local or global). If we're in global
178 * space drop the top bit before indexing the pgd.
180 #if PAGE_OFFSET >= LINGLOBAL_BASE
181 #define pgd_index(address) ((((address) & ~0x80000000) >> PGDIR_SHIFT) \
184 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
187 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
189 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
191 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
193 /* Find an entry in the second-level page table.. */
194 #if !defined(CONFIG_HUGETLB_PAGE)
195 /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */
196 # define pte_index(pmd, address) \
197 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
199 /* some pages are huge, so read 1st level pt to find out */
200 # define pte_index(pmd, address) \
201 (((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1))
203 #define pte_offset_kernel(dir, address) \
204 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(*(dir), address))
205 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
206 #define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
208 #define pte_unmap(pte) do { } while (0)
209 #define pte_unmap_nested(pte) do { } while (0)
211 #define pte_ERROR(e) \
212 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
213 #define pgd_ERROR(e) \
214 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
217 * Meta doesn't have any external MMU info: the kernel page
218 * tables contain all the necessary information.
220 static inline void update_mmu_cache(struct vm_area_struct *vma,
221 unsigned long address, pte_t *pte)
226 * Encode and decode a swap entry (must be !pte_none(e) && !pte_present(e))
227 * Since PAGE_PRESENT is bit 1, we can use the bits above that.
229 #define __swp_type(x) (((x).val >> 1) & 0xff)
230 #define __swp_offset(x) ((x).val >> 10)
231 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \
233 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
234 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
236 #define kern_addr_valid(addr) (1)
239 * No page table caches to initialise
241 #define pgtable_cache_init() do { } while (0)
243 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
244 void paging_init(unsigned long mem_end);
246 #ifdef CONFIG_METAG_META12
247 /* This is a workaround for an issue in Meta 1 cores. These cores cache
248 * invalid entries in the TLB so we always need to flush whenever we add
249 * a new pte. Unfortunately we can only flush the whole TLB not shoot down
250 * single entries so this is sub-optimal. This implementation ensures that
251 * we will get a flush at the second attempt, so we may still get repeated
252 * faults, we just don't overflow the kernel stack handling them.
254 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
255 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
257 int __changed = !pte_same(*(__ptep), __entry); \
259 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
261 flush_tlb_page(__vma, __address); \
266 #include <asm-generic/pgtable.h>
268 #endif /* __ASSEMBLY__ */
269 #endif /* _METAG_PGTABLE_H */