3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup.
16 * For s390 64 bit we use up to four of the five levels the hardware
17 * provides (region first tables are not used).
19 * The "pgd_xxx()" functions are trivial for a folded two-level
20 * setup: the pgd is never bad, and a pmd always exists (as it's folded
23 * This file contains the functions and defines necessary to modify and use
24 * the S390 page table tree.
27 #include <linux/sched.h>
28 #include <linux/mm_types.h>
29 #include <linux/page-flags.h>
30 #include <linux/radix-tree.h>
34 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
35 extern void paging_init(void);
36 extern void vmem_map_init(void);
39 * The S390 doesn't have any external MMU info: the kernel page
40 * tables contain all the necessary information.
42 #define update_mmu_cache(vma, address, ptep) do { } while (0)
43 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
46 * ZERO_PAGE is a global shared page that is always zero; used
47 * for zero-mapped memory areas etc..
50 extern unsigned long empty_zero_page;
51 extern unsigned long zero_page_mask;
53 #define ZERO_PAGE(vaddr) \
54 (virt_to_page((void *)(empty_zero_page + \
55 (((unsigned long)(vaddr)) &zero_page_mask))))
56 #define __HAVE_COLOR_ZERO_PAGE
58 /* TODO: s390 cannot support io_remap_pfn_range... */
59 #endif /* !__ASSEMBLY__ */
62 * PMD_SHIFT determines the size of the area a second-level page
64 * PGDIR_SHIFT determines what a third-level page table entry can map
68 #define PGDIR_SHIFT 42
70 #define PMD_SIZE (1UL << PMD_SHIFT)
71 #define PMD_MASK (~(PMD_SIZE-1))
72 #define PUD_SIZE (1UL << PUD_SHIFT)
73 #define PUD_MASK (~(PUD_SIZE-1))
74 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
75 #define PGDIR_MASK (~(PGDIR_SIZE-1))
78 * entries per page directory level: the S390 is two-level, so
79 * we don't really have any PMD directory physically.
80 * for S390 segment-table entries are combined to one PGD
81 * that leads to 1024 pte per pgd
83 #define PTRS_PER_PTE 256
84 #define PTRS_PER_PMD 2048
85 #define PTRS_PER_PUD 2048
86 #define PTRS_PER_PGD 2048
88 #define FIRST_USER_ADDRESS 0UL
90 #define pte_ERROR(e) \
91 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
92 #define pmd_ERROR(e) \
93 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
94 #define pud_ERROR(e) \
95 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
96 #define pgd_ERROR(e) \
97 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
101 * The vmalloc and module area will always be on the topmost area of the
102 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
103 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
104 * modules will reside. That makes sure that inter module branches always
105 * happen without trampolines and in addition the placement within a 2GB frame
106 * is branch prediction unit friendly.
108 extern unsigned long VMALLOC_START;
109 extern unsigned long VMALLOC_END;
110 extern struct page *vmemmap;
112 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
114 extern unsigned long MODULES_VADDR;
115 extern unsigned long MODULES_END;
116 #define MODULES_VADDR MODULES_VADDR
117 #define MODULES_END MODULES_END
118 #define MODULES_LEN (1UL << 31)
120 static inline int is_module_addr(void *addr)
122 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
123 if (addr < (void *)MODULES_VADDR)
125 if (addr > (void *)MODULES_END)
131 * A 64 bit pagetable entry of S390 has following format:
133 * 0000000000111111111122222222223333333333444444444455555555556666
134 * 0123456789012345678901234567890123456789012345678901234567890123
136 * I Page-Invalid Bit: Page is not available for address-translation
137 * P Page-Protection Bit: Store access not possible for page
138 * C Change-bit override: HW is not required to set change bit
140 * A 64 bit segmenttable entry of S390 has following format:
141 * | P-table origin | TT
142 * 0000000000111111111122222222223333333333444444444455555555556666
143 * 0123456789012345678901234567890123456789012345678901234567890123
145 * I Segment-Invalid Bit: Segment is not available for address-translation
146 * C Common-Segment Bit: Segment is not private (PoP 3-30)
147 * P Page-Protection Bit: Store access not possible for page
150 * A 64 bit region table entry of S390 has following format:
151 * | S-table origin | TF TTTL
152 * 0000000000111111111122222222223333333333444444444455555555556666
153 * 0123456789012345678901234567890123456789012345678901234567890123
155 * I Segment-Invalid Bit: Segment is not available for address-translation
160 * The 64 bit regiontable origin of S390 has following format:
161 * | region table origon | DTTL
162 * 0000000000111111111122222222223333333333444444444455555555556666
163 * 0123456789012345678901234567890123456789012345678901234567890123
165 * X Space-Switch event:
166 * G Segment-Invalid Bit:
167 * P Private-Space Bit:
168 * S Storage-Alteration:
172 * A storage key has the following format:
176 * F : fetch protection bit
181 /* Hardware bits in the page table entry */
182 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
183 #define _PAGE_INVALID 0x400 /* HW invalid bit */
184 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
186 /* Software bits in the page table entry */
187 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
188 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
189 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
190 #define _PAGE_READ 0x010 /* SW pte read bit */
191 #define _PAGE_WRITE 0x020 /* SW pte write bit */
192 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
193 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
194 #define __HAVE_ARCH_PTE_SPECIAL
196 /* Set of bits not changed in pte_modify */
197 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
201 * handle_pte_fault uses pte_present and pte_none to find out the pte type
202 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
203 * distinguish present from not-present ptes. It is changed only with the page
206 * The following table gives the different possible bit combinations for
207 * the pte hardware and software bits in the last 12 bits of a pte
208 * (. unassigned bit, x don't care, t swap type):
216 * prot-none, clean, old .11.xx0000.1
217 * prot-none, clean, young .11.xx0001.1
218 * prot-none, dirty, old .10.xx0010.1
219 * prot-none, dirty, young .10.xx0011.1
220 * read-only, clean, old .11.xx0100.1
221 * read-only, clean, young .01.xx0101.1
222 * read-only, dirty, old .11.xx0110.1
223 * read-only, dirty, young .01.xx0111.1
224 * read-write, clean, old .11.xx1100.1
225 * read-write, clean, young .01.xx1101.1
226 * read-write, dirty, old .10.xx1110.1
227 * read-write, dirty, young .00.xx1111.1
228 * HW-bits: R read-only, I invalid
229 * SW-bits: p present, y young, d dirty, r read, w write, s special,
232 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
233 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
234 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
237 /* Bits in the segment/region table address-space-control-element */
238 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
239 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
240 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
241 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
242 #define _ASCE_REAL_SPACE 0x20 /* real space control */
243 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
244 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
245 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
246 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
247 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
248 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
250 /* Bits in the region table entry */
251 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
252 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
253 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
254 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
255 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
256 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
257 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
258 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
260 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
261 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
262 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
263 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
264 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
265 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
267 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
268 #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
270 /* Bits in the segment table entry */
271 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
272 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
273 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
274 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
275 #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
276 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
278 #define _SEGMENT_ENTRY (0)
279 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
281 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
282 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
283 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
284 #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
285 #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
288 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
290 * prot-none, clean, old 00..1...1...00
291 * prot-none, clean, young 01..1...1...00
292 * prot-none, dirty, old 10..1...1...00
293 * prot-none, dirty, young 11..1...1...00
294 * read-only, clean, old 00..1...1...01
295 * read-only, clean, young 01..1...0...01
296 * read-only, dirty, old 10..1...1...01
297 * read-only, dirty, young 11..1...0...01
298 * read-write, clean, old 00..1...1...11
299 * read-write, clean, young 01..1...0...11
300 * read-write, dirty, old 10..0...1...11
301 * read-write, dirty, young 11..0...0...11
302 * The segment table origin is used to distinguish empty (origin==0) from
303 * read-write, old segment table entries (origin!=0)
304 * HW-bits: R read-only, I invalid
305 * SW-bits: y young, d dirty, r read, w write
308 /* Page status table bits for virtualization */
309 #define PGSTE_ACC_BITS 0xf000000000000000UL
310 #define PGSTE_FP_BIT 0x0800000000000000UL
311 #define PGSTE_PCL_BIT 0x0080000000000000UL
312 #define PGSTE_HR_BIT 0x0040000000000000UL
313 #define PGSTE_HC_BIT 0x0020000000000000UL
314 #define PGSTE_GR_BIT 0x0004000000000000UL
315 #define PGSTE_GC_BIT 0x0002000000000000UL
316 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
317 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
319 /* Guest Page State used for virtualization */
320 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
321 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
322 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
323 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
326 * A user page table pointer has the space-switch-event bit, the
327 * private-space-control bit and the storage-alteration-event-control
328 * bit set. A kernel page table pointer doesn't need them.
330 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
334 * Page protection definitions.
336 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
337 #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
338 _PAGE_INVALID | _PAGE_PROTECT)
339 #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
340 _PAGE_INVALID | _PAGE_PROTECT)
342 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
343 _PAGE_YOUNG | _PAGE_DIRTY)
344 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
345 _PAGE_YOUNG | _PAGE_DIRTY)
346 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
350 * On s390 the page table entry has an invalid bit and a read-only bit.
351 * Read permission implies execute permission and write permission
352 * implies read permission.
355 #define __P000 PAGE_NONE
356 #define __P001 PAGE_READ
357 #define __P010 PAGE_READ
358 #define __P011 PAGE_READ
359 #define __P100 PAGE_READ
360 #define __P101 PAGE_READ
361 #define __P110 PAGE_READ
362 #define __P111 PAGE_READ
364 #define __S000 PAGE_NONE
365 #define __S001 PAGE_READ
366 #define __S010 PAGE_WRITE
367 #define __S011 PAGE_WRITE
368 #define __S100 PAGE_READ
369 #define __S101 PAGE_READ
370 #define __S110 PAGE_WRITE
371 #define __S111 PAGE_WRITE
374 * Segment entry (large page) protection definitions.
376 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
377 _SEGMENT_ENTRY_PROTECT)
378 #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
380 #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
381 _SEGMENT_ENTRY_WRITE)
383 static inline int mm_has_pgste(struct mm_struct *mm)
386 if (unlikely(mm->context.has_pgste))
392 static inline int mm_alloc_pgste(struct mm_struct *mm)
395 if (unlikely(mm->context.alloc_pgste))
402 * In the case that a guest uses storage keys
403 * faults should no longer be backed by zero pages
405 #define mm_forbids_zeropage mm_use_skey
406 static inline int mm_use_skey(struct mm_struct *mm)
409 if (mm->context.use_skey)
416 * pgd/pmd/pte query functions
418 static inline int pgd_present(pgd_t pgd)
420 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
422 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
425 static inline int pgd_none(pgd_t pgd)
427 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
429 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
432 static inline int pgd_bad(pgd_t pgd)
435 * With dynamic page table levels the pgd can be a region table
436 * entry or a segment table entry. Check for the bit that are
437 * invalid for either table entry.
440 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
441 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
442 return (pgd_val(pgd) & mask) != 0;
445 static inline int pud_present(pud_t pud)
447 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
449 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
452 static inline int pud_none(pud_t pud)
454 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
456 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
459 static inline int pud_large(pud_t pud)
461 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
463 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
466 static inline int pud_bad(pud_t pud)
469 * With dynamic page table levels the pud can be a region table
470 * entry or a segment table entry. Check for the bit that are
471 * invalid for either table entry.
474 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
475 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
476 return (pud_val(pud) & mask) != 0;
479 static inline int pmd_present(pmd_t pmd)
481 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
484 static inline int pmd_none(pmd_t pmd)
486 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
489 static inline int pmd_large(pmd_t pmd)
491 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
494 static inline unsigned long pmd_pfn(pmd_t pmd)
496 unsigned long origin_mask;
498 origin_mask = _SEGMENT_ENTRY_ORIGIN;
500 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
501 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
504 static inline int pmd_bad(pmd_t pmd)
507 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
508 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
511 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
512 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
513 unsigned long address, pmd_t *pmdp,
514 pmd_t entry, int dirty);
516 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
517 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
518 unsigned long address, pmd_t *pmdp);
520 #define __HAVE_ARCH_PMD_WRITE
521 static inline int pmd_write(pmd_t pmd)
523 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
526 static inline int pmd_dirty(pmd_t pmd)
530 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
534 static inline int pmd_young(pmd_t pmd)
538 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
542 static inline int pte_present(pte_t pte)
544 /* Bit pattern: (pte & 0x001) == 0x001 */
545 return (pte_val(pte) & _PAGE_PRESENT) != 0;
548 static inline int pte_none(pte_t pte)
550 /* Bit pattern: pte == 0x400 */
551 return pte_val(pte) == _PAGE_INVALID;
554 static inline int pte_swap(pte_t pte)
556 /* Bit pattern: (pte & 0x201) == 0x200 */
557 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
561 static inline int pte_special(pte_t pte)
563 return (pte_val(pte) & _PAGE_SPECIAL);
566 #define __HAVE_ARCH_PTE_SAME
567 static inline int pte_same(pte_t a, pte_t b)
569 return pte_val(a) == pte_val(b);
572 #ifdef CONFIG_NUMA_BALANCING
573 static inline int pte_protnone(pte_t pte)
575 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
578 static inline int pmd_protnone(pmd_t pmd)
580 /* pmd_large(pmd) implies pmd_present(pmd) */
581 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
585 static inline pgste_t pgste_get_lock(pte_t *ptep)
587 unsigned long new = 0;
595 " nihh %0,0xff7f\n" /* clear PCL bit in old */
596 " oihh %1,0x0080\n" /* set PCL bit in new */
599 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
600 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
605 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
609 " nihh %1,0xff7f\n" /* clear PCL bit */
611 : "=Q" (ptep[PTRS_PER_PTE])
612 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
618 static inline pgste_t pgste_get(pte_t *ptep)
620 unsigned long pgste = 0;
622 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
624 return __pgste(pgste);
627 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
630 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
634 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
635 struct mm_struct *mm)
638 unsigned long address, bits, skey;
640 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
642 address = pte_val(*ptep) & PAGE_MASK;
643 skey = (unsigned long) page_get_storage_key(address);
644 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
645 /* Transfer page changed & referenced bit to guest bits in pgste */
646 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
647 /* Copy page access key and fetch protection bit to pgste */
648 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
649 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
655 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
656 struct mm_struct *mm)
659 unsigned long address;
662 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
664 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
665 address = pte_val(entry) & PAGE_MASK;
667 * Set page access key and fetch protection bit from pgste.
668 * The guest C/R information is still in the PGSTE, set real
671 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
672 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
673 page_set_storage_key(address, nkey, 0);
677 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
679 if ((pte_val(entry) & _PAGE_PRESENT) &&
680 (pte_val(entry) & _PAGE_WRITE) &&
681 !(pte_val(entry) & _PAGE_INVALID)) {
682 if (!MACHINE_HAS_ESOP) {
684 * Without enhanced suppression-on-protection force
685 * the dirty bit on for all writable ptes.
687 pte_val(entry) |= _PAGE_DIRTY;
688 pte_val(entry) &= ~_PAGE_PROTECT;
690 if (!(pte_val(entry) & _PAGE_PROTECT))
691 /* This pte allows write access, set user-dirty */
692 pgste_val(pgste) |= PGSTE_UC_BIT;
699 * struct gmap_struct - guest address space
700 * @crst_list: list of all crst tables used in the guest address space
701 * @mm: pointer to the parent mm_struct
702 * @guest_to_host: radix tree with guest to host address translation
703 * @host_to_guest: radix tree with pointer to segment table entries
704 * @guest_table_lock: spinlock to protect all entries in the guest page table
705 * @table: pointer to the page directory
706 * @asce: address space control element for gmap page table
707 * @pfault_enabled: defines if pfaults are applicable for the guest
710 struct list_head list;
711 struct list_head crst_list;
712 struct mm_struct *mm;
713 struct radix_tree_root guest_to_host;
714 struct radix_tree_root host_to_guest;
715 spinlock_t guest_table_lock;
716 unsigned long *table;
718 unsigned long asce_end;
724 * struct gmap_notifier - notify function block for page invalidation
725 * @notifier_call: address of callback function
727 struct gmap_notifier {
728 struct list_head list;
729 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
732 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
733 void gmap_free(struct gmap *gmap);
734 void gmap_enable(struct gmap *gmap);
735 void gmap_disable(struct gmap *gmap);
736 int gmap_map_segment(struct gmap *gmap, unsigned long from,
737 unsigned long to, unsigned long len);
738 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
739 unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
740 unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
741 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
742 int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
743 void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
744 void __gmap_zap(struct gmap *, unsigned long gaddr);
745 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
748 void gmap_register_ipte_notifier(struct gmap_notifier *);
749 void gmap_unregister_ipte_notifier(struct gmap_notifier *);
750 int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
751 void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
753 static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
755 pte_t *ptep, pgste_t pgste)
758 if (pgste_val(pgste) & PGSTE_IN_BIT) {
759 pgste_val(pgste) &= ~PGSTE_IN_BIT;
760 gmap_do_ipte_notify(mm, addr, ptep);
767 * Certain architectures need to do special things when PTEs
768 * within a page table are directly modified. Thus, the following
769 * hook is made available.
771 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
772 pte_t *ptep, pte_t entry)
776 if (mm_has_pgste(mm)) {
777 pgste = pgste_get_lock(ptep);
778 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
779 pgste_set_key(ptep, pgste, entry, mm);
780 pgste = pgste_set_pte(ptep, pgste, entry);
781 pgste_set_unlock(ptep, pgste);
788 * query functions pte_write/pte_dirty/pte_young only work if
789 * pte_present() is true. Undefined behaviour if not..
791 static inline int pte_write(pte_t pte)
793 return (pte_val(pte) & _PAGE_WRITE) != 0;
796 static inline int pte_dirty(pte_t pte)
798 return (pte_val(pte) & _PAGE_DIRTY) != 0;
801 static inline int pte_young(pte_t pte)
803 return (pte_val(pte) & _PAGE_YOUNG) != 0;
806 #define __HAVE_ARCH_PTE_UNUSED
807 static inline int pte_unused(pte_t pte)
809 return pte_val(pte) & _PAGE_UNUSED;
813 * pgd/pmd/pte modification functions
816 static inline void pgd_clear(pgd_t *pgd)
818 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
819 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
822 static inline void pud_clear(pud_t *pud)
824 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
825 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
828 static inline void pmd_clear(pmd_t *pmdp)
830 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
833 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
835 pte_val(*ptep) = _PAGE_INVALID;
839 * The following pte modification functions only work if
840 * pte_present() is true. Undefined behaviour if not..
842 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
844 pte_val(pte) &= _PAGE_CHG_MASK;
845 pte_val(pte) |= pgprot_val(newprot);
847 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
848 * invalid bit set, clear it again for readable, young pages
850 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
851 pte_val(pte) &= ~_PAGE_INVALID;
853 * newprot for PAGE_READ and PAGE_WRITE has the page protection
854 * bit set, clear it again for writable, dirty pages
856 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
857 pte_val(pte) &= ~_PAGE_PROTECT;
861 static inline pte_t pte_wrprotect(pte_t pte)
863 pte_val(pte) &= ~_PAGE_WRITE;
864 pte_val(pte) |= _PAGE_PROTECT;
868 static inline pte_t pte_mkwrite(pte_t pte)
870 pte_val(pte) |= _PAGE_WRITE;
871 if (pte_val(pte) & _PAGE_DIRTY)
872 pte_val(pte) &= ~_PAGE_PROTECT;
876 static inline pte_t pte_mkclean(pte_t pte)
878 pte_val(pte) &= ~_PAGE_DIRTY;
879 pte_val(pte) |= _PAGE_PROTECT;
883 static inline pte_t pte_mkdirty(pte_t pte)
885 pte_val(pte) |= _PAGE_DIRTY;
886 if (pte_val(pte) & _PAGE_WRITE)
887 pte_val(pte) &= ~_PAGE_PROTECT;
891 static inline pte_t pte_mkold(pte_t pte)
893 pte_val(pte) &= ~_PAGE_YOUNG;
894 pte_val(pte) |= _PAGE_INVALID;
898 static inline pte_t pte_mkyoung(pte_t pte)
900 pte_val(pte) |= _PAGE_YOUNG;
901 if (pte_val(pte) & _PAGE_READ)
902 pte_val(pte) &= ~_PAGE_INVALID;
906 static inline pte_t pte_mkspecial(pte_t pte)
908 pte_val(pte) |= _PAGE_SPECIAL;
912 #ifdef CONFIG_HUGETLB_PAGE
913 static inline pte_t pte_mkhuge(pte_t pte)
915 pte_val(pte) |= _PAGE_LARGE;
920 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
922 unsigned long pto = (unsigned long) ptep;
924 /* Invalidation + global TLB flush for the pte */
927 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
930 static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
932 unsigned long pto = (unsigned long) ptep;
934 /* Invalidation + local TLB flush for the pte */
936 " .insn rrf,0xb2210000,%2,%3,0,1"
937 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
940 static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
942 unsigned long pto = (unsigned long) ptep;
944 /* Invalidate a range of ptes + global TLB flush of the ptes */
947 " .insn rrf,0xb2210000,%2,%0,%1,0"
948 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
952 static inline void ptep_flush_direct(struct mm_struct *mm,
953 unsigned long address, pte_t *ptep)
957 if (pte_val(*ptep) & _PAGE_INVALID)
959 active = (mm == current->active_mm) ? 1 : 0;
960 count = atomic_add_return(0x10000, &mm->context.attach_count);
961 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
962 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
963 __ptep_ipte_local(address, ptep);
965 __ptep_ipte(address, ptep);
966 atomic_sub(0x10000, &mm->context.attach_count);
969 static inline void ptep_flush_lazy(struct mm_struct *mm,
970 unsigned long address, pte_t *ptep)
974 if (pte_val(*ptep) & _PAGE_INVALID)
976 active = (mm == current->active_mm) ? 1 : 0;
977 count = atomic_add_return(0x10000, &mm->context.attach_count);
978 if ((count & 0xffff) <= active) {
979 pte_val(*ptep) |= _PAGE_INVALID;
980 mm->context.flush_mm = 1;
982 __ptep_ipte(address, ptep);
983 atomic_sub(0x10000, &mm->context.attach_count);
987 * Get (and clear) the user dirty bit for a pte.
989 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
997 if (!mm_has_pgste(mm))
999 pgste = pgste_get_lock(ptep);
1000 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
1001 pgste_val(pgste) &= ~PGSTE_UC_BIT;
1003 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
1004 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
1005 __ptep_ipte(addr, ptep);
1006 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
1007 pte_val(pte) |= _PAGE_PROTECT;
1009 pte_val(pte) |= _PAGE_INVALID;
1012 pgste_set_unlock(ptep, pgste);
1016 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1017 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1018 unsigned long addr, pte_t *ptep)
1024 if (mm_has_pgste(vma->vm_mm)) {
1025 pgste = pgste_get_lock(ptep);
1026 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
1029 oldpte = pte = *ptep;
1030 ptep_flush_direct(vma->vm_mm, addr, ptep);
1031 young = pte_young(pte);
1032 pte = pte_mkold(pte);
1034 if (mm_has_pgste(vma->vm_mm)) {
1035 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
1036 pgste = pgste_set_pte(ptep, pgste, pte);
1037 pgste_set_unlock(ptep, pgste);
1044 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1045 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1046 unsigned long address, pte_t *ptep)
1048 return ptep_test_and_clear_young(vma, address, ptep);
1052 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1053 * both clear the TLB for the unmapped pte. The reason is that
1054 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1055 * to modify an active pte. The sequence is
1056 * 1) ptep_get_and_clear
1058 * 3) flush_tlb_range
1059 * On s390 the tlb needs to get flushed with the modification of the pte
1060 * if the pte is active. The only way how this can be implemented is to
1061 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1064 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1065 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1066 unsigned long address, pte_t *ptep)
1071 if (mm_has_pgste(mm)) {
1072 pgste = pgste_get_lock(ptep);
1073 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1077 ptep_flush_lazy(mm, address, ptep);
1078 pte_val(*ptep) = _PAGE_INVALID;
1080 if (mm_has_pgste(mm)) {
1081 pgste = pgste_update_all(&pte, pgste, mm);
1082 pgste_set_unlock(ptep, pgste);
1087 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1088 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1089 unsigned long address,
1095 if (mm_has_pgste(mm)) {
1096 pgste = pgste_get_lock(ptep);
1097 pgste_ipte_notify(mm, address, ptep, pgste);
1101 ptep_flush_lazy(mm, address, ptep);
1103 if (mm_has_pgste(mm)) {
1104 pgste = pgste_update_all(&pte, pgste, mm);
1105 pgste_set(ptep, pgste);
1110 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1111 unsigned long address,
1112 pte_t *ptep, pte_t pte)
1116 if (mm_has_pgste(mm)) {
1117 pgste = pgste_get(ptep);
1118 pgste_set_key(ptep, pgste, pte, mm);
1119 pgste = pgste_set_pte(ptep, pgste, pte);
1120 pgste_set_unlock(ptep, pgste);
1125 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1126 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1127 unsigned long address, pte_t *ptep)
1132 if (mm_has_pgste(vma->vm_mm)) {
1133 pgste = pgste_get_lock(ptep);
1134 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1138 ptep_flush_direct(vma->vm_mm, address, ptep);
1139 pte_val(*ptep) = _PAGE_INVALID;
1141 if (mm_has_pgste(vma->vm_mm)) {
1142 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1143 _PGSTE_GPS_USAGE_UNUSED)
1144 pte_val(pte) |= _PAGE_UNUSED;
1145 pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
1146 pgste_set_unlock(ptep, pgste);
1152 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1153 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1154 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1155 * cannot be accessed while the batched unmap is running. In this case
1156 * full==1 and a simple pte_clear is enough. See tlb.h.
1158 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1159 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1160 unsigned long address,
1161 pte_t *ptep, int full)
1166 if (!full && mm_has_pgste(mm)) {
1167 pgste = pgste_get_lock(ptep);
1168 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1173 ptep_flush_lazy(mm, address, ptep);
1174 pte_val(*ptep) = _PAGE_INVALID;
1176 if (!full && mm_has_pgste(mm)) {
1177 pgste = pgste_update_all(&pte, pgste, mm);
1178 pgste_set_unlock(ptep, pgste);
1183 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1184 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1185 unsigned long address, pte_t *ptep)
1190 if (pte_write(pte)) {
1191 if (mm_has_pgste(mm)) {
1192 pgste = pgste_get_lock(ptep);
1193 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1196 ptep_flush_lazy(mm, address, ptep);
1197 pte = pte_wrprotect(pte);
1199 if (mm_has_pgste(mm)) {
1200 pgste = pgste_set_pte(ptep, pgste, pte);
1201 pgste_set_unlock(ptep, pgste);
1208 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1209 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1210 unsigned long address, pte_t *ptep,
1211 pte_t entry, int dirty)
1215 if (pte_same(*ptep, entry))
1217 if (mm_has_pgste(vma->vm_mm)) {
1218 pgste = pgste_get_lock(ptep);
1219 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1222 ptep_flush_direct(vma->vm_mm, address, ptep);
1224 if (mm_has_pgste(vma->vm_mm)) {
1225 pgste_set_key(ptep, pgste, entry, vma->vm_mm);
1226 pgste = pgste_set_pte(ptep, pgste, entry);
1227 pgste_set_unlock(ptep, pgste);
1234 * Conversion functions: convert a page and protection to a page entry,
1235 * and a page entry and page directory to the page they refer to.
1237 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1240 pte_val(__pte) = physpage + pgprot_val(pgprot);
1241 return pte_mkyoung(__pte);
1244 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1246 unsigned long physpage = page_to_phys(page);
1247 pte_t __pte = mk_pte_phys(physpage, pgprot);
1249 if (pte_write(__pte) && PageDirty(page))
1250 __pte = pte_mkdirty(__pte);
1254 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1255 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1256 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1257 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1259 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1260 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1262 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1263 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1264 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1266 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1268 pud_t *pud = (pud_t *) pgd;
1269 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1270 pud = (pud_t *) pgd_deref(*pgd);
1271 return pud + pud_index(address);
1274 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1276 pmd_t *pmd = (pmd_t *) pud;
1277 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1278 pmd = (pmd_t *) pud_deref(*pud);
1279 return pmd + pmd_index(address);
1282 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1283 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1284 #define pte_page(x) pfn_to_page(pte_pfn(x))
1286 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1288 /* Find an entry in the lowest level page table.. */
1289 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1290 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1291 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1292 #define pte_unmap(pte) do { } while (0)
1294 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1295 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1298 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1299 * Convert to segment table entry format.
1301 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1302 return pgprot_val(SEGMENT_NONE);
1303 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1304 return pgprot_val(SEGMENT_READ);
1305 return pgprot_val(SEGMENT_WRITE);
1308 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1310 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1311 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1315 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1317 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1318 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1320 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1324 static inline pmd_t pmd_mkclean(pmd_t pmd)
1326 if (pmd_large(pmd)) {
1327 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1328 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1333 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1335 if (pmd_large(pmd)) {
1336 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
1337 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1338 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1343 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1345 if (pmd_large(pmd)) {
1346 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1347 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1348 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1353 static inline pmd_t pmd_mkold(pmd_t pmd)
1355 if (pmd_large(pmd)) {
1356 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1357 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1362 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1364 if (pmd_large(pmd)) {
1365 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1366 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1367 _SEGMENT_ENTRY_LARGE;
1368 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1369 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1370 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1371 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1372 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1375 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1376 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1380 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1383 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1387 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1389 static inline void __pmdp_csp(pmd_t *pmdp)
1391 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1392 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1393 _SEGMENT_ENTRY_INVALID;
1394 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1399 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1402 static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1406 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1408 " .insn rrf,0xb98e0000,%2,%3,0,0"
1410 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1414 static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1418 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1420 " .insn rrf,0xb98e0000,%2,%3,0,1"
1422 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1426 static inline void pmdp_flush_direct(struct mm_struct *mm,
1427 unsigned long address, pmd_t *pmdp)
1431 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1433 if (!MACHINE_HAS_IDTE) {
1437 active = (mm == current->active_mm) ? 1 : 0;
1438 count = atomic_add_return(0x10000, &mm->context.attach_count);
1439 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1440 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1441 __pmdp_idte_local(address, pmdp);
1443 __pmdp_idte(address, pmdp);
1444 atomic_sub(0x10000, &mm->context.attach_count);
1447 static inline void pmdp_flush_lazy(struct mm_struct *mm,
1448 unsigned long address, pmd_t *pmdp)
1452 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1454 active = (mm == current->active_mm) ? 1 : 0;
1455 count = atomic_add_return(0x10000, &mm->context.attach_count);
1456 if ((count & 0xffff) <= active) {
1457 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1458 mm->context.flush_mm = 1;
1459 } else if (MACHINE_HAS_IDTE)
1460 __pmdp_idte(address, pmdp);
1463 atomic_sub(0x10000, &mm->context.attach_count);
1466 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1468 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1469 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1472 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1473 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1475 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1476 pmd_t *pmdp, pmd_t entry)
1481 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1483 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1484 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1485 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1489 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1490 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1491 unsigned long address, pmd_t *pmdp)
1496 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1497 *pmdp = pmd_mkold(pmd);
1498 return pmd_young(pmd);
1501 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1502 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1503 unsigned long address, pmd_t *pmdp)
1507 pmdp_flush_direct(mm, address, pmdp);
1512 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1513 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1514 unsigned long address,
1515 pmd_t *pmdp, int full)
1520 pmdp_flush_lazy(mm, address, pmdp);
1525 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1526 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1527 unsigned long address, pmd_t *pmdp)
1529 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1532 #define __HAVE_ARCH_PMDP_INVALIDATE
1533 static inline void pmdp_invalidate(struct vm_area_struct *vma,
1534 unsigned long address, pmd_t *pmdp)
1536 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1539 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1540 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1541 unsigned long address, pmd_t *pmdp)
1545 if (pmd_write(pmd)) {
1546 pmdp_flush_direct(mm, address, pmdp);
1547 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1551 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1552 unsigned long address,
1555 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1557 #define pmdp_collapse_flush pmdp_collapse_flush
1559 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1560 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1562 static inline int pmd_trans_huge(pmd_t pmd)
1564 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1567 static inline int has_transparent_hugepage(void)
1569 return MACHINE_HAS_HPAGE ? 1 : 0;
1571 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1574 * 64 bit swap entry format:
1575 * A page-table entry has some bits we have to treat in a special way.
1576 * Bits 52 and bit 55 have to be zero, otherwise a specification
1577 * exception will occur instead of a page translation exception. The
1578 * specification exception has the bad habit not to store necessary
1579 * information in the lowcore.
1580 * Bits 54 and 63 are used to indicate the page type.
1581 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1582 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1583 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1585 * | offset |01100|type |00|
1586 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1587 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1590 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1591 #define __SWP_OFFSET_SHIFT 12
1592 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1593 #define __SWP_TYPE_SHIFT 2
1595 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1599 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1600 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1601 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1605 static inline unsigned long __swp_type(swp_entry_t entry)
1607 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1610 static inline unsigned long __swp_offset(swp_entry_t entry)
1612 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1615 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1617 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1620 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1621 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1623 #endif /* !__ASSEMBLY__ */
1625 #define kern_addr_valid(addr) (1)
1627 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1628 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1629 extern int s390_enable_sie(void);
1630 extern int s390_enable_skey(void);
1631 extern void s390_reset_cmma(struct mm_struct *mm);
1633 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1634 #define HAVE_ARCH_UNMAPPED_AREA
1635 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1638 * No page table caches to initialise
1640 static inline void pgtable_cache_init(void) { }
1641 static inline void check_pgt_cache(void) { }
1643 #include <asm-generic/pgtable.h>
1645 #endif /* _S390_PAGE_H */