3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup.
16 * For s390 64 bit we use up to four of the five levels the hardware
17 * provides (region first tables are not used).
19 * The "pgd_xxx()" functions are trivial for a folded two-level
20 * setup: the pgd is never bad, and a pmd always exists (as it's folded
23 * This file contains the functions and defines necessary to modify and use
24 * the S390 page table tree.
27 #include <linux/sched.h>
28 #include <linux/mm_types.h>
29 #include <linux/page-flags.h>
30 #include <linux/radix-tree.h>
31 #include <linux/atomic.h>
35 extern pgd_t swapper_pg_dir[];
36 extern void paging_init(void);
37 extern void vmem_map_init(void);
38 pmd_t *vmem_pmd_alloc(void);
39 pte_t *vmem_pte_alloc(void);
48 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
50 static inline void update_page_count(int level, long count)
52 if (IS_ENABLED(CONFIG_PROC_FS))
53 atomic_long_add(count, &direct_pages_count[level]);
57 void arch_report_meminfo(struct seq_file *m);
60 * The S390 doesn't have any external MMU info: the kernel page
61 * tables contain all the necessary information.
63 #define update_mmu_cache(vma, address, ptep) do { } while (0)
64 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
67 * ZERO_PAGE is a global shared page that is always zero; used
68 * for zero-mapped memory areas etc..
71 extern unsigned long empty_zero_page;
72 extern unsigned long zero_page_mask;
74 #define ZERO_PAGE(vaddr) \
75 (virt_to_page((void *)(empty_zero_page + \
76 (((unsigned long)(vaddr)) &zero_page_mask))))
77 #define __HAVE_COLOR_ZERO_PAGE
79 /* TODO: s390 cannot support io_remap_pfn_range... */
80 #endif /* !__ASSEMBLY__ */
83 * PMD_SHIFT determines the size of the area a second-level page
85 * PGDIR_SHIFT determines what a third-level page table entry can map
89 #define PGDIR_SHIFT 42
91 #define PMD_SIZE (1UL << PMD_SHIFT)
92 #define PMD_MASK (~(PMD_SIZE-1))
93 #define PUD_SIZE (1UL << PUD_SHIFT)
94 #define PUD_MASK (~(PUD_SIZE-1))
95 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
96 #define PGDIR_MASK (~(PGDIR_SIZE-1))
99 * entries per page directory level: the S390 is two-level, so
100 * we don't really have any PMD directory physically.
101 * for S390 segment-table entries are combined to one PGD
102 * that leads to 1024 pte per pgd
104 #define PTRS_PER_PTE 256
105 #define PTRS_PER_PMD 2048
106 #define PTRS_PER_PUD 2048
107 #define PTRS_PER_PGD 2048
109 #define FIRST_USER_ADDRESS 0UL
111 #define pte_ERROR(e) \
112 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
113 #define pmd_ERROR(e) \
114 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
115 #define pud_ERROR(e) \
116 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
117 #define pgd_ERROR(e) \
118 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
122 * The vmalloc and module area will always be on the topmost area of the
123 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
124 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
125 * modules will reside. That makes sure that inter module branches always
126 * happen without trampolines and in addition the placement within a 2GB frame
127 * is branch prediction unit friendly.
129 extern unsigned long VMALLOC_START;
130 extern unsigned long VMALLOC_END;
131 extern struct page *vmemmap;
133 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
135 extern unsigned long MODULES_VADDR;
136 extern unsigned long MODULES_END;
137 #define MODULES_VADDR MODULES_VADDR
138 #define MODULES_END MODULES_END
139 #define MODULES_LEN (1UL << 31)
141 static inline int is_module_addr(void *addr)
143 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
144 if (addr < (void *)MODULES_VADDR)
146 if (addr > (void *)MODULES_END)
152 * A 64 bit pagetable entry of S390 has following format:
154 * 0000000000111111111122222222223333333333444444444455555555556666
155 * 0123456789012345678901234567890123456789012345678901234567890123
157 * I Page-Invalid Bit: Page is not available for address-translation
158 * P Page-Protection Bit: Store access not possible for page
159 * C Change-bit override: HW is not required to set change bit
161 * A 64 bit segmenttable entry of S390 has following format:
162 * | P-table origin | TT
163 * 0000000000111111111122222222223333333333444444444455555555556666
164 * 0123456789012345678901234567890123456789012345678901234567890123
166 * I Segment-Invalid Bit: Segment is not available for address-translation
167 * C Common-Segment Bit: Segment is not private (PoP 3-30)
168 * P Page-Protection Bit: Store access not possible for page
171 * A 64 bit region table entry of S390 has following format:
172 * | S-table origin | TF TTTL
173 * 0000000000111111111122222222223333333333444444444455555555556666
174 * 0123456789012345678901234567890123456789012345678901234567890123
176 * I Segment-Invalid Bit: Segment is not available for address-translation
181 * The 64 bit regiontable origin of S390 has following format:
182 * | region table origon | DTTL
183 * 0000000000111111111122222222223333333333444444444455555555556666
184 * 0123456789012345678901234567890123456789012345678901234567890123
186 * X Space-Switch event:
187 * G Segment-Invalid Bit:
188 * P Private-Space Bit:
189 * S Storage-Alteration:
193 * A storage key has the following format:
197 * F : fetch protection bit
202 /* Hardware bits in the page table entry */
203 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
204 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
205 #define _PAGE_INVALID 0x400 /* HW invalid bit */
206 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
208 /* Software bits in the page table entry */
209 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
210 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
211 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
212 #define _PAGE_READ 0x010 /* SW pte read bit */
213 #define _PAGE_WRITE 0x020 /* SW pte write bit */
214 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
215 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
216 #define __HAVE_ARCH_PTE_SPECIAL
218 #ifdef CONFIG_MEM_SOFT_DIRTY
219 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
221 #define _PAGE_SOFT_DIRTY 0x000
224 /* Set of bits not changed in pte_modify */
225 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
226 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
229 * handle_pte_fault uses pte_present and pte_none to find out the pte type
230 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
231 * distinguish present from not-present ptes. It is changed only with the page
234 * The following table gives the different possible bit combinations for
235 * the pte hardware and software bits in the last 12 bits of a pte
236 * (. unassigned bit, x don't care, t swap type):
244 * prot-none, clean, old .11.xx0000.1
245 * prot-none, clean, young .11.xx0001.1
246 * prot-none, dirty, old .11.xx0010.1
247 * prot-none, dirty, young .11.xx0011.1
248 * read-only, clean, old .11.xx0100.1
249 * read-only, clean, young .01.xx0101.1
250 * read-only, dirty, old .11.xx0110.1
251 * read-only, dirty, young .01.xx0111.1
252 * read-write, clean, old .11.xx1100.1
253 * read-write, clean, young .01.xx1101.1
254 * read-write, dirty, old .10.xx1110.1
255 * read-write, dirty, young .00.xx1111.1
256 * HW-bits: R read-only, I invalid
257 * SW-bits: p present, y young, d dirty, r read, w write, s special,
260 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
261 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
262 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
265 /* Bits in the segment/region table address-space-control-element */
266 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
267 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
268 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
269 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
270 #define _ASCE_REAL_SPACE 0x20 /* real space control */
271 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
272 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
273 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
274 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
275 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
276 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
278 /* Bits in the region table entry */
279 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
280 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
281 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
282 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
283 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
284 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
285 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
286 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
287 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
288 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
290 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
291 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
292 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
293 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
294 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
295 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
297 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
298 #define _REGION3_ENTRY_ORIGIN ~0x7ffUL/* region third table origin */
300 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
301 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
302 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
303 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
304 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
306 #ifdef CONFIG_MEM_SOFT_DIRTY
307 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
309 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
312 #define _REGION_ENTRY_BITS 0xfffffffffffff227UL
313 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe27UL
315 /* Bits in the segment table entry */
316 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
317 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
318 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
319 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
320 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
321 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* region no-execute bit */
322 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
324 #define _SEGMENT_ENTRY (0)
325 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
327 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
328 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
329 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
330 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
331 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
333 #ifdef CONFIG_MEM_SOFT_DIRTY
334 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
336 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
340 * Segment table and region3 table entry encoding
341 * (R = read-only, I = invalid, y = young bit):
343 * prot-none, clean, old 00..1...1...00
344 * prot-none, clean, young 01..1...1...00
345 * prot-none, dirty, old 10..1...1...00
346 * prot-none, dirty, young 11..1...1...00
347 * read-only, clean, old 00..1...1...01
348 * read-only, clean, young 01..1...0...01
349 * read-only, dirty, old 10..1...1...01
350 * read-only, dirty, young 11..1...0...01
351 * read-write, clean, old 00..1...1...11
352 * read-write, clean, young 01..1...0...11
353 * read-write, dirty, old 10..0...1...11
354 * read-write, dirty, young 11..0...0...11
355 * The segment table origin is used to distinguish empty (origin==0) from
356 * read-write, old segment table entries (origin!=0)
357 * HW-bits: R read-only, I invalid
358 * SW-bits: y young, d dirty, r read, w write
361 /* Page status table bits for virtualization */
362 #define PGSTE_ACC_BITS 0xf000000000000000UL
363 #define PGSTE_FP_BIT 0x0800000000000000UL
364 #define PGSTE_PCL_BIT 0x0080000000000000UL
365 #define PGSTE_HR_BIT 0x0040000000000000UL
366 #define PGSTE_HC_BIT 0x0020000000000000UL
367 #define PGSTE_GR_BIT 0x0004000000000000UL
368 #define PGSTE_GC_BIT 0x0002000000000000UL
369 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
370 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
371 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
373 /* Guest Page State used for virtualization */
374 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
375 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
376 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
377 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
380 * A user page table pointer has the space-switch-event bit, the
381 * private-space-control bit and the storage-alteration-event-control
382 * bit set. A kernel page table pointer doesn't need them.
384 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
388 * Page protection definitions.
390 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
391 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
392 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
393 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
394 _PAGE_INVALID | _PAGE_PROTECT)
395 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
396 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
397 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
398 _PAGE_INVALID | _PAGE_PROTECT)
400 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
401 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
402 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
403 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
404 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
405 _PAGE_PROTECT | _PAGE_NOEXEC)
406 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
407 _PAGE_YOUNG | _PAGE_DIRTY)
410 * On s390 the page table entry has an invalid bit and a read-only bit.
411 * Read permission implies execute permission and write permission
412 * implies read permission.
415 #define __P000 PAGE_NONE
416 #define __P001 PAGE_RO
417 #define __P010 PAGE_RO
418 #define __P011 PAGE_RO
419 #define __P100 PAGE_RX
420 #define __P101 PAGE_RX
421 #define __P110 PAGE_RX
422 #define __P111 PAGE_RX
424 #define __S000 PAGE_NONE
425 #define __S001 PAGE_RO
426 #define __S010 PAGE_RW
427 #define __S011 PAGE_RW
428 #define __S100 PAGE_RX
429 #define __S101 PAGE_RX
430 #define __S110 PAGE_RWX
431 #define __S111 PAGE_RWX
434 * Segment entry (large page) protection definitions.
436 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
437 _SEGMENT_ENTRY_PROTECT)
438 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
439 _SEGMENT_ENTRY_READ | \
440 _SEGMENT_ENTRY_NOEXEC)
441 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
443 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
444 _SEGMENT_ENTRY_WRITE | \
445 _SEGMENT_ENTRY_NOEXEC)
446 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
447 _SEGMENT_ENTRY_WRITE)
448 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
449 _SEGMENT_ENTRY_LARGE | \
450 _SEGMENT_ENTRY_READ | \
451 _SEGMENT_ENTRY_WRITE | \
452 _SEGMENT_ENTRY_YOUNG | \
453 _SEGMENT_ENTRY_DIRTY | \
454 _SEGMENT_ENTRY_NOEXEC)
455 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
456 _SEGMENT_ENTRY_LARGE | \
457 _SEGMENT_ENTRY_READ | \
458 _SEGMENT_ENTRY_YOUNG | \
459 _SEGMENT_ENTRY_PROTECT | \
460 _SEGMENT_ENTRY_NOEXEC)
463 * Region3 entry (large page) protection definitions.
466 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
467 _REGION3_ENTRY_LARGE | \
468 _REGION3_ENTRY_READ | \
469 _REGION3_ENTRY_WRITE | \
470 _REGION3_ENTRY_YOUNG | \
471 _REGION3_ENTRY_DIRTY | \
472 _REGION_ENTRY_NOEXEC)
473 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
474 _REGION3_ENTRY_LARGE | \
475 _REGION3_ENTRY_READ | \
476 _REGION3_ENTRY_YOUNG | \
477 _REGION_ENTRY_PROTECT | \
478 _REGION_ENTRY_NOEXEC)
480 static inline int mm_has_pgste(struct mm_struct *mm)
483 if (unlikely(mm->context.has_pgste))
489 static inline int mm_alloc_pgste(struct mm_struct *mm)
492 if (unlikely(mm->context.alloc_pgste))
499 * In the case that a guest uses storage keys
500 * faults should no longer be backed by zero pages
502 #define mm_forbids_zeropage mm_use_skey
503 static inline int mm_use_skey(struct mm_struct *mm)
506 if (mm->context.use_skey)
512 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
514 register unsigned long reg2 asm("2") = old;
515 register unsigned long reg3 asm("3") = new;
516 unsigned long address = (unsigned long)ptr | 1;
520 : "+d" (reg2), "+m" (*ptr)
521 : "d" (reg3), "d" (address)
525 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
527 register unsigned long reg2 asm("2") = old;
528 register unsigned long reg3 asm("3") = new;
529 unsigned long address = (unsigned long)ptr | 1;
532 " .insn rre,0xb98a0000,%0,%3"
533 : "+d" (reg2), "+m" (*ptr)
534 : "d" (reg3), "d" (address)
538 #define CRDTE_DTT_PAGE 0x00UL
539 #define CRDTE_DTT_SEGMENT 0x10UL
540 #define CRDTE_DTT_REGION3 0x14UL
541 #define CRDTE_DTT_REGION2 0x18UL
542 #define CRDTE_DTT_REGION1 0x1cUL
544 static inline void crdte(unsigned long old, unsigned long new,
545 unsigned long table, unsigned long dtt,
546 unsigned long address, unsigned long asce)
548 register unsigned long reg2 asm("2") = old;
549 register unsigned long reg3 asm("3") = new;
550 register unsigned long reg4 asm("4") = table | dtt;
551 register unsigned long reg5 asm("5") = address;
553 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
555 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
560 * pgd/pmd/pte query functions
562 static inline int pgd_present(pgd_t pgd)
564 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
566 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
569 static inline int pgd_none(pgd_t pgd)
571 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
573 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
576 static inline int pgd_bad(pgd_t pgd)
579 * With dynamic page table levels the pgd can be a region table
580 * entry or a segment table entry. Check for the bit that are
581 * invalid for either table entry.
584 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
585 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
586 return (pgd_val(pgd) & mask) != 0;
589 static inline int pud_present(pud_t pud)
591 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
593 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
596 static inline int pud_none(pud_t pud)
598 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
600 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
603 static inline int pud_large(pud_t pud)
605 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
607 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
610 static inline unsigned long pud_pfn(pud_t pud)
612 unsigned long origin_mask;
614 origin_mask = _REGION3_ENTRY_ORIGIN;
616 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
617 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
620 static inline int pmd_large(pmd_t pmd)
622 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
625 static inline int pmd_bad(pmd_t pmd)
628 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
629 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
632 static inline int pud_bad(pud_t pud)
634 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
635 return pmd_bad(__pmd(pud_val(pud)));
637 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
638 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
641 static inline int pmd_present(pmd_t pmd)
643 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
646 static inline int pmd_none(pmd_t pmd)
648 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
651 static inline unsigned long pmd_pfn(pmd_t pmd)
653 unsigned long origin_mask;
655 origin_mask = _SEGMENT_ENTRY_ORIGIN;
657 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
658 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
661 #define __HAVE_ARCH_PMD_WRITE
662 static inline int pmd_write(pmd_t pmd)
664 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
667 static inline int pmd_dirty(pmd_t pmd)
671 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
675 static inline int pmd_young(pmd_t pmd)
679 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
683 static inline int pte_present(pte_t pte)
685 /* Bit pattern: (pte & 0x001) == 0x001 */
686 return (pte_val(pte) & _PAGE_PRESENT) != 0;
689 static inline int pte_none(pte_t pte)
691 /* Bit pattern: pte == 0x400 */
692 return pte_val(pte) == _PAGE_INVALID;
695 static inline int pte_swap(pte_t pte)
697 /* Bit pattern: (pte & 0x201) == 0x200 */
698 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
702 static inline int pte_special(pte_t pte)
704 return (pte_val(pte) & _PAGE_SPECIAL);
707 #define __HAVE_ARCH_PTE_SAME
708 static inline int pte_same(pte_t a, pte_t b)
710 return pte_val(a) == pte_val(b);
713 #ifdef CONFIG_NUMA_BALANCING
714 static inline int pte_protnone(pte_t pte)
716 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
719 static inline int pmd_protnone(pmd_t pmd)
721 /* pmd_large(pmd) implies pmd_present(pmd) */
722 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
726 static inline int pte_soft_dirty(pte_t pte)
728 return pte_val(pte) & _PAGE_SOFT_DIRTY;
730 #define pte_swp_soft_dirty pte_soft_dirty
732 static inline pte_t pte_mksoft_dirty(pte_t pte)
734 pte_val(pte) |= _PAGE_SOFT_DIRTY;
737 #define pte_swp_mksoft_dirty pte_mksoft_dirty
739 static inline pte_t pte_clear_soft_dirty(pte_t pte)
741 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
744 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
746 static inline int pmd_soft_dirty(pmd_t pmd)
748 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
751 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
753 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
757 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
759 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
764 * query functions pte_write/pte_dirty/pte_young only work if
765 * pte_present() is true. Undefined behaviour if not..
767 static inline int pte_write(pte_t pte)
769 return (pte_val(pte) & _PAGE_WRITE) != 0;
772 static inline int pte_dirty(pte_t pte)
774 return (pte_val(pte) & _PAGE_DIRTY) != 0;
777 static inline int pte_young(pte_t pte)
779 return (pte_val(pte) & _PAGE_YOUNG) != 0;
782 #define __HAVE_ARCH_PTE_UNUSED
783 static inline int pte_unused(pte_t pte)
785 return pte_val(pte) & _PAGE_UNUSED;
789 * pgd/pmd/pte modification functions
792 static inline void pgd_clear(pgd_t *pgd)
794 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
795 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
798 static inline void pud_clear(pud_t *pud)
800 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
801 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
804 static inline void pmd_clear(pmd_t *pmdp)
806 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
809 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
811 pte_val(*ptep) = _PAGE_INVALID;
815 * The following pte modification functions only work if
816 * pte_present() is true. Undefined behaviour if not..
818 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
820 pte_val(pte) &= _PAGE_CHG_MASK;
821 pte_val(pte) |= pgprot_val(newprot);
823 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
824 * has the invalid bit set, clear it again for readable, young pages
826 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
827 pte_val(pte) &= ~_PAGE_INVALID;
829 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
830 * protection bit set, clear it again for writable, dirty pages
832 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
833 pte_val(pte) &= ~_PAGE_PROTECT;
837 static inline pte_t pte_wrprotect(pte_t pte)
839 pte_val(pte) &= ~_PAGE_WRITE;
840 pte_val(pte) |= _PAGE_PROTECT;
844 static inline pte_t pte_mkwrite(pte_t pte)
846 pte_val(pte) |= _PAGE_WRITE;
847 if (pte_val(pte) & _PAGE_DIRTY)
848 pte_val(pte) &= ~_PAGE_PROTECT;
852 static inline pte_t pte_mkclean(pte_t pte)
854 pte_val(pte) &= ~_PAGE_DIRTY;
855 pte_val(pte) |= _PAGE_PROTECT;
859 static inline pte_t pte_mkdirty(pte_t pte)
861 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
862 if (pte_val(pte) & _PAGE_WRITE)
863 pte_val(pte) &= ~_PAGE_PROTECT;
867 static inline pte_t pte_mkold(pte_t pte)
869 pte_val(pte) &= ~_PAGE_YOUNG;
870 pte_val(pte) |= _PAGE_INVALID;
874 static inline pte_t pte_mkyoung(pte_t pte)
876 pte_val(pte) |= _PAGE_YOUNG;
877 if (pte_val(pte) & _PAGE_READ)
878 pte_val(pte) &= ~_PAGE_INVALID;
882 static inline pte_t pte_mkspecial(pte_t pte)
884 pte_val(pte) |= _PAGE_SPECIAL;
888 #ifdef CONFIG_HUGETLB_PAGE
889 static inline pte_t pte_mkhuge(pte_t pte)
891 pte_val(pte) |= _PAGE_LARGE;
896 #define IPTE_GLOBAL 0
899 static inline void __ptep_ipte(unsigned long address, pte_t *ptep, int local)
901 unsigned long pto = (unsigned long) ptep;
903 /* Invalidation + TLB flush for the pte */
905 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
906 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
910 static inline void __ptep_ipte_range(unsigned long address, int nr,
911 pte_t *ptep, int local)
913 unsigned long pto = (unsigned long) ptep;
915 /* Invalidate a range of ptes + TLB flush of the ptes */
918 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
919 : [r2] "+a" (address), [r3] "+a" (nr)
920 : [r1] "a" (pto), [m4] "i" (local) : "memory");
925 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
926 * both clear the TLB for the unmapped pte. The reason is that
927 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
928 * to modify an active pte. The sequence is
929 * 1) ptep_get_and_clear
932 * On s390 the tlb needs to get flushed with the modification of the pte
933 * if the pte is active. The only way how this can be implemented is to
934 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
937 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
938 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
940 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
941 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
942 unsigned long addr, pte_t *ptep)
946 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
947 return pte_young(pte);
950 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
951 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
952 unsigned long address, pte_t *ptep)
954 return ptep_test_and_clear_young(vma, address, ptep);
957 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
958 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
959 unsigned long addr, pte_t *ptep)
961 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
964 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
965 pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *);
966 void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t);
968 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
969 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
970 unsigned long addr, pte_t *ptep)
972 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
976 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
977 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
978 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
979 * cannot be accessed while the batched unmap is running. In this case
980 * full==1 and a simple pte_clear is enough. See tlb.h.
982 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
983 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
985 pte_t *ptep, int full)
989 *ptep = __pte(_PAGE_INVALID);
992 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
995 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
996 static inline void ptep_set_wrprotect(struct mm_struct *mm,
997 unsigned long addr, pte_t *ptep)
1002 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1005 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1006 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1007 unsigned long addr, pte_t *ptep,
1008 pte_t entry, int dirty)
1010 if (pte_same(*ptep, entry))
1012 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1017 * Additional functions to handle KVM guest page tables
1019 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1020 pte_t *ptep, pte_t entry);
1021 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1022 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1023 pte_t *ptep, unsigned long bits);
1024 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1025 pte_t *ptep, int prot, unsigned long bit);
1026 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1027 pte_t *ptep , int reset);
1028 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1029 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1030 pte_t *sptep, pte_t *tptep, pte_t pte);
1031 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1033 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
1034 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1035 unsigned char key, bool nq);
1036 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1037 unsigned char key, unsigned char *oldkey,
1038 bool nq, bool mr, bool mc);
1039 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1040 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1041 unsigned char *key);
1044 * Certain architectures need to do special things when PTEs
1045 * within a page table are directly modified. Thus, the following
1046 * hook is made available.
1048 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1049 pte_t *ptep, pte_t entry)
1051 if (!MACHINE_HAS_NX)
1052 pte_val(entry) &= ~_PAGE_NOEXEC;
1053 if (mm_has_pgste(mm))
1054 ptep_set_pte_at(mm, addr, ptep, entry);
1060 * Conversion functions: convert a page and protection to a page entry,
1061 * and a page entry and page directory to the page they refer to.
1063 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1066 pte_val(__pte) = physpage + pgprot_val(pgprot);
1067 return pte_mkyoung(__pte);
1070 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1072 unsigned long physpage = page_to_phys(page);
1073 pte_t __pte = mk_pte_phys(physpage, pgprot);
1075 if (pte_write(__pte) && PageDirty(page))
1076 __pte = pte_mkdirty(__pte);
1080 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1081 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1082 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1083 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1085 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1086 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1088 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1089 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1090 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1092 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1094 pud_t *pud = (pud_t *) pgd;
1095 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1096 pud = (pud_t *) pgd_deref(*pgd);
1097 return pud + pud_index(address);
1100 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1102 pmd_t *pmd = (pmd_t *) pud;
1103 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1104 pmd = (pmd_t *) pud_deref(*pud);
1105 return pmd + pmd_index(address);
1108 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1109 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1110 #define pte_page(x) pfn_to_page(pte_pfn(x))
1112 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1113 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1115 /* Find an entry in the lowest level page table.. */
1116 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1117 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1118 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1119 #define pte_unmap(pte) do { } while (0)
1121 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1123 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1124 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1128 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1130 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1131 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1133 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1137 static inline pmd_t pmd_mkclean(pmd_t pmd)
1139 if (pmd_large(pmd)) {
1140 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1141 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1146 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1148 if (pmd_large(pmd)) {
1149 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1150 _SEGMENT_ENTRY_SOFT_DIRTY;
1151 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1152 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1157 static inline pud_t pud_wrprotect(pud_t pud)
1159 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1160 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1164 static inline pud_t pud_mkwrite(pud_t pud)
1166 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1167 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1169 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1173 static inline pud_t pud_mkclean(pud_t pud)
1175 if (pud_large(pud)) {
1176 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1177 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1182 static inline pud_t pud_mkdirty(pud_t pud)
1184 if (pud_large(pud)) {
1185 pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1186 _REGION3_ENTRY_SOFT_DIRTY;
1187 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1188 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1193 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1194 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1197 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1198 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1200 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1201 return pgprot_val(SEGMENT_NONE);
1202 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1203 return pgprot_val(SEGMENT_RO);
1204 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1205 return pgprot_val(SEGMENT_RX);
1206 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1207 return pgprot_val(SEGMENT_RW);
1208 return pgprot_val(SEGMENT_RWX);
1211 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1213 if (pmd_large(pmd)) {
1214 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1215 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1216 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1221 static inline pmd_t pmd_mkold(pmd_t pmd)
1223 if (pmd_large(pmd)) {
1224 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1225 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1230 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1232 if (pmd_large(pmd)) {
1233 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1234 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1235 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1236 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1237 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1238 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1239 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1240 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1243 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1244 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1248 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1251 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1255 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1257 static inline void __pmdp_csp(pmd_t *pmdp)
1259 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1260 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1263 #define IDTE_GLOBAL 0
1264 #define IDTE_LOCAL 1
1266 static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp, int local)
1270 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1272 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1274 : [r1] "a" (sto), [r2] "a" ((address & HPAGE_MASK)),
1279 static inline void __pudp_idte(unsigned long address, pud_t *pudp, int local)
1283 r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
1284 r3o |= _ASCE_TYPE_REGION3;
1286 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1288 : [r1] "a" (r3o), [r2] "a" ((address & PUD_MASK)),
1293 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1294 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1295 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1297 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1299 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1300 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1303 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1304 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1306 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1307 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1308 unsigned long addr, pmd_t *pmdp,
1309 pmd_t entry, int dirty)
1311 VM_BUG_ON(addr & ~HPAGE_MASK);
1313 entry = pmd_mkyoung(entry);
1315 entry = pmd_mkdirty(entry);
1316 if (pmd_val(*pmdp) == pmd_val(entry))
1318 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1322 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1323 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1324 unsigned long addr, pmd_t *pmdp)
1328 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1329 return pmd_young(pmd);
1332 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1333 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1334 unsigned long addr, pmd_t *pmdp)
1336 VM_BUG_ON(addr & ~HPAGE_MASK);
1337 return pmdp_test_and_clear_young(vma, addr, pmdp);
1340 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1341 pmd_t *pmdp, pmd_t entry)
1343 if (!MACHINE_HAS_NX)
1344 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1348 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1350 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1351 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1352 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1356 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1357 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1358 unsigned long addr, pmd_t *pmdp)
1360 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1363 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1364 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1366 pmd_t *pmdp, int full)
1370 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1373 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1376 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1377 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1378 unsigned long addr, pmd_t *pmdp)
1380 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1383 #define __HAVE_ARCH_PMDP_INVALIDATE
1384 static inline void pmdp_invalidate(struct vm_area_struct *vma,
1385 unsigned long addr, pmd_t *pmdp)
1387 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1390 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1391 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1392 unsigned long addr, pmd_t *pmdp)
1397 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1400 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1401 unsigned long address,
1404 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1406 #define pmdp_collapse_flush pmdp_collapse_flush
1408 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1409 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1411 static inline int pmd_trans_huge(pmd_t pmd)
1413 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1416 #define has_transparent_hugepage has_transparent_hugepage
1417 static inline int has_transparent_hugepage(void)
1419 return MACHINE_HAS_EDAT1 ? 1 : 0;
1421 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1424 * 64 bit swap entry format:
1425 * A page-table entry has some bits we have to treat in a special way.
1426 * Bits 52 and bit 55 have to be zero, otherwise a specification
1427 * exception will occur instead of a page translation exception. The
1428 * specification exception has the bad habit not to store necessary
1429 * information in the lowcore.
1430 * Bits 54 and 63 are used to indicate the page type.
1431 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1432 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1433 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1435 * | offset |01100|type |00|
1436 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1437 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1440 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1441 #define __SWP_OFFSET_SHIFT 12
1442 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1443 #define __SWP_TYPE_SHIFT 2
1445 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1449 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1450 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1451 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1455 static inline unsigned long __swp_type(swp_entry_t entry)
1457 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1460 static inline unsigned long __swp_offset(swp_entry_t entry)
1462 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1465 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1467 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1470 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1471 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1473 #endif /* !__ASSEMBLY__ */
1475 #define kern_addr_valid(addr) (1)
1477 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1478 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1479 extern int s390_enable_sie(void);
1480 extern int s390_enable_skey(void);
1481 extern void s390_reset_cmma(struct mm_struct *mm);
1483 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1484 #define HAVE_ARCH_UNMAPPED_AREA
1485 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1488 * No page table caches to initialise
1490 static inline void pgtable_cache_init(void) { }
1491 static inline void check_pgt_cache(void) { }
1493 #include <asm-generic/pgtable.h>
1495 #endif /* _S390_PAGE_H */