2 * Macros and functions to manipulate Meta page tables.
5 #ifndef _METAG_PGTABLE_H
6 #define _METAG_PGTABLE_H
8 #include <asm-generic/pgtable-nopmd.h>
10 /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
11 #if PAGE_OFFSET >= LINGLOBAL_BASE
12 #define CONSISTENT_START 0xF7000000
13 #define CONSISTENT_END 0xF73FFFFF
14 #define VMALLOC_START 0xF8000000
15 #define VMALLOC_END 0xFFFEFFFF
17 #define CONSISTENT_START 0x77000000
18 #define CONSISTENT_END 0x773FFFFF
19 #define VMALLOC_START 0x78000000
20 #define VMALLOC_END 0x7FFFFFFF
24 * Definitions for MMU descriptors
26 * These are the hardware bits in the MMCU pte entries.
27 * Derived from the Meta toolkit headers.
29 #define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
30 #define _PAGE_WRITE MMCU_ENTRY_WR_BIT
31 #define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
32 /* Write combine bit - this can cause writes to occur out of order */
33 #define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
34 /* Sys coherent bit - this bit is never used by Linux */
35 #define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
36 #define _PAGE_ALWAYS_ZERO_1 0x020
37 #define _PAGE_CACHE_CTRL0 0x040
38 #define _PAGE_CACHE_CTRL1 0x080
39 #define _PAGE_ALWAYS_ZERO_2 0x100
40 #define _PAGE_ALWAYS_ZERO_3 0x200
41 #define _PAGE_ALWAYS_ZERO_4 0x400
42 #define _PAGE_ALWAYS_ZERO_5 0x800
44 /* These are software bits that we stuff into the gaps in the hardware
45 * pte entries that are not used. Note, these DO get stored in the actual
46 * hardware, but the hardware just does not use them.
48 #define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
49 #define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
50 #define _PAGE_FILE _PAGE_ALWAYS_ZERO_3
52 /* Pages owned, and protected by, the kernel. */
53 #define _PAGE_KERNEL _PAGE_PRIV
55 /* No cacheing of this page */
56 #define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
57 /* burst cacheing - good for data streaming */
58 #define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
59 /* One cache way per thread */
60 #define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
61 /* Full on cacheing */
62 #define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
64 #define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
66 /* which bits are used for cache control ... */
67 #define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
70 /* This is a mask of the bits that pte_modify is allowed to change. */
71 #define _PAGE_CHG_MASK (PAGE_MASK)
73 #define _PAGE_SZ_SHIFT 1
74 #define _PAGE_SZ_4K (0x0)
75 #define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
76 #define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
77 #define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
78 #define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
79 #define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
80 #define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
81 #define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
82 #define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
83 #define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
84 #define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
85 #define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
87 #if defined(CONFIG_PAGE_SIZE_4K)
88 #define _PAGE_SZ (_PAGE_SZ_4K)
89 #elif defined(CONFIG_PAGE_SIZE_8K)
90 #define _PAGE_SZ (_PAGE_SZ_8K)
91 #elif defined(CONFIG_PAGE_SIZE_16K)
92 #define _PAGE_SZ (_PAGE_SZ_16K)
94 #define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
96 #if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
97 # define _PAGE_SZHUGE (_PAGE_SZ_8K)
98 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
99 # define _PAGE_SZHUGE (_PAGE_SZ_16K)
100 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
101 # define _PAGE_SZHUGE (_PAGE_SZ_32K)
102 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
103 # define _PAGE_SZHUGE (_PAGE_SZ_64K)
104 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
105 # define _PAGE_SZHUGE (_PAGE_SZ_128K)
106 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
107 # define _PAGE_SZHUGE (_PAGE_SZ_256K)
108 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
109 # define _PAGE_SZHUGE (_PAGE_SZ_512K)
110 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
111 # define _PAGE_SZHUGE (_PAGE_SZ_1M)
112 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
113 # define _PAGE_SZHUGE (_PAGE_SZ_2M)
114 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
115 # define _PAGE_SZHUGE (_PAGE_SZ_4M)
119 * The Linux memory management assumes a three-level page table setup. On
120 * Meta, we use that, but "fold" the mid level into the top-level page
124 /* PGDIR_SHIFT determines the size of the area a second-level page table can
125 * map. This is always 4MB.
128 #define PGDIR_SHIFT 22
129 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
130 #define PGDIR_MASK (~(PGDIR_SIZE-1))
133 * Entries per page directory level: we use a two-level, so
134 * we don't really have any PMD directory physically. First level tables
135 * always map 2Gb (local or global) at a granularity of 4MB, second-level
136 * tables map 4MB with a granularity between 4MB and 4kB (between 1 and
139 #define PTRS_PER_PTE (PGDIR_SIZE/PAGE_SIZE)
140 #define HPTRS_PER_PTE (PGDIR_SIZE/HPAGE_SIZE)
141 #define PTRS_PER_PGD 512
143 #define USER_PTRS_PER_PGD 256
144 #define FIRST_USER_ADDRESS META_MEMORY_BASE
145 #define FIRST_USER_PGD_NR pgd_index(FIRST_USER_ADDRESS)
147 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
150 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
151 _PAGE_ACCESSED | _PAGE_CACHEABLE)
152 #define PAGE_SHARED_C PAGE_SHARED
153 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
155 #define PAGE_COPY_C PAGE_COPY
157 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
159 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
160 _PAGE_ACCESSED | _PAGE_WRITE | \
161 _PAGE_CACHEABLE | _PAGE_KERNEL)
163 #define __P000 PAGE_NONE
164 #define __P001 PAGE_READONLY
165 #define __P010 PAGE_COPY
166 #define __P011 PAGE_COPY
167 #define __P100 PAGE_READONLY
168 #define __P101 PAGE_READONLY
169 #define __P110 PAGE_COPY_C
170 #define __P111 PAGE_COPY_C
172 #define __S000 PAGE_NONE
173 #define __S001 PAGE_READONLY
174 #define __S010 PAGE_SHARED
175 #define __S011 PAGE_SHARED
176 #define __S100 PAGE_READONLY
177 #define __S101 PAGE_READONLY
178 #define __S110 PAGE_SHARED_C
179 #define __S111 PAGE_SHARED_C
183 #include <asm/page.h>
185 /* zero page used for uninitialized stuff */
186 extern unsigned long empty_zero_page;
187 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
189 /* Certain architectures need to do special things when pte's
190 * within a page table are directly modified. Thus, the following
191 * hook is made available.
193 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
194 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
196 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
198 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
200 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
202 #define pte_none(x) (!pte_val(x))
203 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
204 #define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0)
206 #define pmd_none(x) (!pmd_val(x))
207 #define pmd_bad(x) ((pmd_val(x) & ~(PAGE_MASK | _PAGE_SZ_MASK)) \
208 != (_PAGE_TABLE & ~_PAGE_SZ_MASK))
209 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
210 #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
212 #define pte_page(x) pfn_to_page(pte_pfn(x))
215 * The following only work if pte_present() is true.
216 * Undefined behaviour if not..
219 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
220 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
221 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
222 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
223 static inline int pte_special(pte_t pte) { return 0; }
225 static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; }
226 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
227 static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
228 static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
229 static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
230 static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
231 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
232 static inline pte_t pte_mkhuge(pte_t pte) { return pte; }
235 * Macro and implementation to make a page protection as uncacheable.
237 #define pgprot_writecombine(prot) \
238 __pgprot(pgprot_val(prot) & ~(_PAGE_CACHE_CTRL1 | _PAGE_CACHE_CTRL0))
240 #define pgprot_noncached(prot) \
241 __pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)
245 * Conversion functions: convert a page and protection to a page entry,
246 * and a page entry and page directory to the page they refer to.
249 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
251 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
253 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
257 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
259 unsigned long paddr = pmd_val(pmd) & PAGE_MASK;
262 return (unsigned long)__va(paddr);
265 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
266 #define pmd_page_shift(pmd) (12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \
268 #define pmd_num_ptrs(pmd) (PGDIR_SIZE >> pmd_page_shift(pmd))
271 * Each pgd is only 2k, mapping 2Gb (local or global). If we're in global
272 * space drop the top bit before indexing the pgd.
274 #if PAGE_OFFSET >= LINGLOBAL_BASE
275 #define pgd_index(address) ((((address) & ~0x80000000) >> PGDIR_SHIFT) \
278 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
281 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
283 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
285 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
287 /* Find an entry in the second-level page table.. */
288 #if !defined(CONFIG_HUGETLB_PAGE)
289 /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */
290 # define pte_index(pmd, address) \
291 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
293 /* some pages are huge, so read 1st level pt to find out */
294 # define pte_index(pmd, address) \
295 (((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1))
297 #define pte_offset_kernel(dir, address) \
298 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(*(dir), address))
299 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
300 #define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
302 #define pte_unmap(pte) do { } while (0)
303 #define pte_unmap_nested(pte) do { } while (0)
305 #define pte_ERROR(e) \
306 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
307 #define pgd_ERROR(e) \
308 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
311 * Meta doesn't have any external MMU info: the kernel page
312 * tables contain all the necessary information.
314 static inline void update_mmu_cache(struct vm_area_struct *vma,
315 unsigned long address, pte_t *pte)
320 * Encode and decode a swap entry (must be !pte_none(e) && !pte_present(e))
321 * Since PAGE_PRESENT is bit 1, we can use the bits above that.
323 #define __swp_type(x) (((x).val >> 1) & 0xff)
324 #define __swp_offset(x) ((x).val >> 10)
325 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \
327 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
328 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
330 #define PTE_FILE_MAX_BITS 22
331 #define pte_to_pgoff(x) (pte_val(x) >> 10)
332 #define pgoff_to_pte(x) __pte(((x) << 10) | _PAGE_FILE)
334 #define kern_addr_valid(addr) (1)
337 * No page table caches to initialise
339 #define pgtable_cache_init() do { } while (0)
341 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
342 void paging_init(unsigned long mem_end);
344 #ifdef CONFIG_METAG_META12
345 /* This is a workaround for an issue in Meta 1 cores. These cores cache
346 * invalid entries in the TLB so we always need to flush whenever we add
347 * a new pte. Unfortunately we can only flush the whole TLB not shoot down
348 * single entries so this is sub-optimal. This implementation ensures that
349 * we will get a flush at the second attempt, so we may still get repeated
350 * faults, we just don't overflow the kernel stack handling them.
352 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
353 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
355 int __changed = !pte_same(*(__ptep), __entry); \
357 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
359 flush_tlb_page(__vma, __address); \
364 #include <asm-generic/pgtable.h>
366 #endif /* __ASSEMBLY__ */
367 #endif /* _METAG_PGTABLE_H */