1 #ifndef _ASM_POWERPC_PGTABLE_RADIX_H
2 #define _ASM_POWERPC_PGTABLE_RADIX_H
5 #include <asm/cmpxchg.h>
8 #ifdef CONFIG_PPC_64K_PAGES
9 #include <asm/book3s/64/radix-64k.h>
11 #include <asm/book3s/64/radix-4k.h>
15 #include <asm/book3s/64/tlbflush-radix.h>
16 #include <asm/cpu_has_feature.h>
19 /* An empty PTE can still have a R or C writeback */
20 #define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
22 /* Bits to set in a RPMD/RPUD/RPGD */
23 #define RADIX_PMD_VAL_BITS (0x8000000000000000UL | RADIX_PTE_INDEX_SIZE)
24 #define RADIX_PUD_VAL_BITS (0x8000000000000000UL | RADIX_PMD_INDEX_SIZE)
25 #define RADIX_PGD_VAL_BITS (0x8000000000000000UL | RADIX_PUD_INDEX_SIZE)
27 /* Don't have anything in the reserved bits and leaf bits */
28 #define RADIX_PMD_BAD_BITS 0x60000000000000e0UL
29 #define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
30 #define RADIX_PGD_BAD_BITS 0x60000000000000e0UL
33 * Size of EA range mapped by our pagetables.
35 #define RADIX_PGTABLE_EADDR_SIZE (RADIX_PTE_INDEX_SIZE + RADIX_PMD_INDEX_SIZE + \
36 RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT)
37 #define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE)
40 * We support 52 bit address space, Use top bit for kernel
41 * virtual mapping. Also make sure kernel fit in the top
44 * +------------------+
45 * +------------------+ Kernel virtual map (0xc008000000000000)
49 * 0b11......+------------------+ Kernel linear map (0xc....)
53 * 0b10......+------------------+
57 * 0b01......+------------------+
61 * 0b00......+------------------+
64 * 3rd quadrant expanded:
65 * +------------------------------+
69 * +------------------------------+ Kernel IO map end (0xc010000000000000)
72 * | 1/2 of virtual map |
75 * +------------------------------+ Kernel IO map start
77 * | 1/4 of virtual map |
79 * +------------------------------+ Kernel vmemap start
81 * | 1/4 of virtual map |
83 * +------------------------------+ Kernel virt start (0xc008000000000000)
87 * +------------------------------+ Kernel linear (0xc.....)
90 #define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
91 #define RADIX_KERN_VIRT_SIZE ASM_CONST(0x0008000000000000)
94 * The vmalloc space starts at the beginning of that region, and
95 * occupies a quarter of it on radix config.
96 * (we keep a quarter for the virtual memmap)
98 #define RADIX_VMALLOC_START RADIX_KERN_VIRT_START
99 #define RADIX_VMALLOC_SIZE (RADIX_KERN_VIRT_SIZE >> 2)
100 #define RADIX_VMALLOC_END (RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
102 * Defines the address of the vmemap area, in its own region on
105 #define RADIX_VMEMMAP_BASE (RADIX_VMALLOC_END)
108 #define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
109 #define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
110 #define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
111 #define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
113 static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
117 unsigned long old_pte, new_pte;
120 pte = READ_ONCE(*ptep);
121 old_pte = pte_val(pte);
122 new_pte = (old_pte | set) & ~clr;
124 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
130 static inline unsigned long radix__pte_update(struct mm_struct *mm,
132 pte_t *ptep, unsigned long clr,
136 unsigned long old_pte;
138 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
140 unsigned long new_pte;
142 old_pte = __radix_pte_update(ptep, ~0ul, 0);
146 new_pte = (old_pte | set) & ~clr;
147 asm volatile("ptesync" : : : "memory");
148 radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
150 __radix_pte_update(ptep, 0, new_pte);
152 old_pte = __radix_pte_update(ptep, clr, set);
153 asm volatile("ptesync" : : : "memory");
155 assert_pte_locked(mm, addr);
160 static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
162 pte_t *ptep, int full)
164 unsigned long old_pte;
168 * If we are trying to clear the pte, we can skip
169 * the DD1 pte update sequence and batch the tlb flush. The
170 * tlb flush batching is done by mmu gather code. We
171 * still keep the cmp_xchg update to make sure we get
172 * correct R/C bit which might be updated via Nest MMU.
174 old_pte = __radix_pte_update(ptep, ~0ul, 0);
176 old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0);
178 return __pte(old_pte);
182 * Set the dirty and/or accessed bits atomically in a linux PTE, this
183 * function doesn't need to invalidate tlb.
185 static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
186 pte_t *ptep, pte_t entry,
187 unsigned long address)
190 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
191 _PAGE_RW | _PAGE_EXEC);
193 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
195 unsigned long old_pte, new_pte;
197 old_pte = __radix_pte_update(ptep, ~0, 0);
198 asm volatile("ptesync" : : : "memory");
202 new_pte = old_pte | set;
203 radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
204 __radix_pte_update(ptep, 0, new_pte);
206 __radix_pte_update(ptep, 0, set);
207 asm volatile("ptesync" : : : "memory");
210 static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
212 return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
215 static inline int radix__pte_none(pte_t pte)
217 return (pte_val(pte) & ~RADIX_PTE_NONE_MASK) == 0;
220 static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
221 pte_t *ptep, pte_t pte, int percpu)
224 asm volatile("ptesync" : : : "memory");
227 static inline int radix__pmd_bad(pmd_t pmd)
229 return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS);
232 static inline int radix__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
234 return ((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) == 0);
237 static inline int radix__pud_bad(pud_t pud)
239 return !!(pud_val(pud) & RADIX_PUD_BAD_BITS);
243 static inline int radix__pgd_bad(pgd_t pgd)
245 return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS);
248 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
250 static inline int radix__pmd_trans_huge(pmd_t pmd)
252 return !!(pmd_val(pmd) & _PAGE_PTE);
255 static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
257 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
258 return __pmd(pmd_val(pmd) | _PAGE_PTE | _PAGE_LARGE);
259 return __pmd(pmd_val(pmd) | _PAGE_PTE);
261 static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma,
262 unsigned long address, pmd_t *pmdp)
264 /* Nothing to do for radix. */
268 extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
269 pmd_t *pmdp, unsigned long clr,
271 extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma,
272 unsigned long address, pmd_t *pmdp);
273 extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
275 extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
276 extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
277 unsigned long addr, pmd_t *pmdp);
278 extern int radix__has_transparent_hugepage(void);
281 extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
282 unsigned long page_size,
284 extern void radix__vmemmap_remove_mapping(unsigned long start,
285 unsigned long page_size);
287 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
288 pgprot_t flags, unsigned int psz);
290 static inline unsigned long radix__get_tree_size(void)
292 unsigned long rts_field;
294 * We support 52 bits, hence:
295 * DD1 52-28 = 24, 0b11000
296 * Others 52-31 = 21, 0b10101
297 * RTS encoding details
298 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
299 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
301 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
302 rts_field = (0x3UL << 61);
304 rts_field = (0x5UL << 5); /* 6 - 8 bits */
305 rts_field |= (0x2UL << 61);
310 #ifdef CONFIG_MEMORY_HOTPLUG
311 int radix__create_section_mapping(unsigned long start, unsigned long end);
312 int radix__remove_section_mapping(unsigned long start, unsigned long end);
313 #endif /* CONFIG_MEMORY_HOTPLUG */
314 #endif /* __ASSEMBLY__ */