3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgtable.h"
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
30 #include <linux/sched.h>
31 #include <linux/mm_types.h>
32 #include <linux/page-flags.h>
36 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
37 extern void paging_init(void);
38 extern void vmem_map_init(void);
41 * The S390 doesn't have any external MMU info: the kernel page
42 * tables contain all the necessary information.
44 #define update_mmu_cache(vma, address, ptep) do { } while (0)
45 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
48 * ZERO_PAGE is a global shared page that is always zero; used
49 * for zero-mapped memory areas etc..
52 extern unsigned long empty_zero_page;
53 extern unsigned long zero_page_mask;
55 #define ZERO_PAGE(vaddr) \
56 (virt_to_page((void *)(empty_zero_page + \
57 (((unsigned long)(vaddr)) &zero_page_mask))))
58 #define __HAVE_COLOR_ZERO_PAGE
60 /* TODO: s390 cannot support io_remap_pfn_range... */
61 #endif /* !__ASSEMBLY__ */
64 * PMD_SHIFT determines the size of the area a second-level page
66 * PGDIR_SHIFT determines what a third-level page table entry can map
71 # define PGDIR_SHIFT 20
72 #else /* CONFIG_64BIT */
75 # define PGDIR_SHIFT 42
76 #endif /* CONFIG_64BIT */
78 #define PMD_SIZE (1UL << PMD_SHIFT)
79 #define PMD_MASK (~(PMD_SIZE-1))
80 #define PUD_SIZE (1UL << PUD_SHIFT)
81 #define PUD_MASK (~(PUD_SIZE-1))
82 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
83 #define PGDIR_MASK (~(PGDIR_SIZE-1))
86 * entries per page directory level: the S390 is two-level, so
87 * we don't really have any PMD directory physically.
88 * for S390 segment-table entries are combined to one PGD
89 * that leads to 1024 pte per pgd
91 #define PTRS_PER_PTE 256
93 #define PTRS_PER_PMD 1
94 #define PTRS_PER_PUD 1
95 #else /* CONFIG_64BIT */
96 #define PTRS_PER_PMD 2048
97 #define PTRS_PER_PUD 2048
98 #endif /* CONFIG_64BIT */
99 #define PTRS_PER_PGD 2048
101 #define FIRST_USER_ADDRESS 0
103 #define pte_ERROR(e) \
104 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
105 #define pmd_ERROR(e) \
106 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
107 #define pud_ERROR(e) \
108 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
109 #define pgd_ERROR(e) \
110 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
114 * The vmalloc and module area will always be on the topmost area of the kernel
115 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
116 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
117 * modules will reside. That makes sure that inter module branches always
118 * happen without trampolines and in addition the placement within a 2GB frame
119 * is branch prediction unit friendly.
121 extern unsigned long VMALLOC_START;
122 extern unsigned long VMALLOC_END;
123 extern struct page *vmemmap;
125 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
128 extern unsigned long MODULES_VADDR;
129 extern unsigned long MODULES_END;
130 #define MODULES_VADDR MODULES_VADDR
131 #define MODULES_END MODULES_END
132 #define MODULES_LEN (1UL << 31)
136 * A 31 bit pagetable entry of S390 has following format:
139 * 00000000001111111111222222222233
140 * 01234567890123456789012345678901
142 * I Page-Invalid Bit: Page is not available for address-translation
143 * P Page-Protection Bit: Store access not possible for page
145 * A 31 bit segmenttable entry of S390 has following format:
146 * | P-table origin | |PTL
148 * 00000000001111111111222222222233
149 * 01234567890123456789012345678901
151 * I Segment-Invalid Bit: Segment is not available for address-translation
152 * C Common-Segment Bit: Segment is not private (PoP 3-30)
153 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
155 * The 31 bit segmenttable origin of S390 has following format:
157 * |S-table origin | | STL |
159 * 00000000001111111111222222222233
160 * 01234567890123456789012345678901
162 * X Space-Switch event:
163 * G Segment-Invalid Bit: *
164 * P Private-Space Bit: Segment is not private (PoP 3-30)
165 * S Storage-Alteration:
166 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
168 * A 64 bit pagetable entry of S390 has following format:
170 * 0000000000111111111122222222223333333333444444444455555555556666
171 * 0123456789012345678901234567890123456789012345678901234567890123
173 * I Page-Invalid Bit: Page is not available for address-translation
174 * P Page-Protection Bit: Store access not possible for page
175 * C Change-bit override: HW is not required to set change bit
177 * A 64 bit segmenttable entry of S390 has following format:
178 * | P-table origin | TT
179 * 0000000000111111111122222222223333333333444444444455555555556666
180 * 0123456789012345678901234567890123456789012345678901234567890123
182 * I Segment-Invalid Bit: Segment is not available for address-translation
183 * C Common-Segment Bit: Segment is not private (PoP 3-30)
184 * P Page-Protection Bit: Store access not possible for page
187 * A 64 bit region table entry of S390 has following format:
188 * | S-table origin | TF TTTL
189 * 0000000000111111111122222222223333333333444444444455555555556666
190 * 0123456789012345678901234567890123456789012345678901234567890123
192 * I Segment-Invalid Bit: Segment is not available for address-translation
197 * The 64 bit regiontable origin of S390 has following format:
198 * | region table origon | DTTL
199 * 0000000000111111111122222222223333333333444444444455555555556666
200 * 0123456789012345678901234567890123456789012345678901234567890123
202 * X Space-Switch event:
203 * G Segment-Invalid Bit:
204 * P Private-Space Bit:
205 * S Storage-Alteration:
209 * A storage key has the following format:
213 * F : fetch protection bit
218 /* Hardware bits in the page table entry */
219 #define _PAGE_CO 0x100 /* HW Change-bit override */
220 #define _PAGE_RO 0x200 /* HW read-only bit */
221 #define _PAGE_INVALID 0x400 /* HW invalid bit */
223 /* Software bits in the page table entry */
224 #define _PAGE_SWT 0x001 /* SW pte type bit t */
225 #define _PAGE_SWX 0x002 /* SW pte type bit x */
226 #define _PAGE_SWC 0x004 /* SW pte changed bit */
227 #define _PAGE_SWR 0x008 /* SW pte referenced bit */
228 #define _PAGE_SWW 0x010 /* SW pte write bit */
229 #define _PAGE_SPECIAL 0x020 /* SW associated with special page */
230 #define __HAVE_ARCH_PTE_SPECIAL
232 /* Set of bits not changed in pte_modify */
233 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
234 _PAGE_SWC | _PAGE_SWR)
236 /* Six different types of pages. */
237 #define _PAGE_TYPE_EMPTY 0x400
238 #define _PAGE_TYPE_NONE 0x401
239 #define _PAGE_TYPE_SWAP 0x403
240 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
241 #define _PAGE_TYPE_RO 0x200
242 #define _PAGE_TYPE_RW 0x000
245 * Only four types for huge pages, using the invalid bit and protection bit
246 * of a segment table entry.
248 #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
249 #define _HPAGE_TYPE_NONE 0x220
250 #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
251 #define _HPAGE_TYPE_RW 0x000
254 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
255 * pte_none and pte_file to find out the pte type WITHOUT holding the page
256 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
257 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
258 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
259 * This change is done while holding the lock, but the intermediate step
260 * of a previously valid pte with the hw invalid bit set can be observed by
261 * handle_pte_fault. That makes it necessary that all valid pte types with
262 * the hw invalid bit set must be distinguishable from the four pte types
263 * empty, none, swap and file.
266 * _PAGE_TYPE_EMPTY 1000 -> 1000
267 * _PAGE_TYPE_NONE 1001 -> 1001
268 * _PAGE_TYPE_SWAP 1011 -> 1011
269 * _PAGE_TYPE_FILE 11?1 -> 11?1
270 * _PAGE_TYPE_RO 0100 -> 1100
271 * _PAGE_TYPE_RW 0000 -> 1000
273 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
274 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
275 * pte_file is true for bits combinations 1101, 1111
276 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
281 /* Bits in the segment table address-space-control-element */
282 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
283 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
284 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
285 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
286 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
288 /* Bits in the segment table entry */
289 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
290 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
291 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
292 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
293 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
295 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
296 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
298 /* Page status table bits for virtualization */
299 #define RCP_ACC_BITS 0xf0000000UL
300 #define RCP_FP_BIT 0x08000000UL
301 #define RCP_PCL_BIT 0x00800000UL
302 #define RCP_HR_BIT 0x00400000UL
303 #define RCP_HC_BIT 0x00200000UL
304 #define RCP_GR_BIT 0x00040000UL
305 #define RCP_GC_BIT 0x00020000UL
306 #define RCP_IN_BIT 0x00002000UL /* IPTE notify bit */
308 /* User dirty / referenced bit for KVM's migration feature */
309 #define KVM_UR_BIT 0x00008000UL
310 #define KVM_UC_BIT 0x00004000UL
312 #else /* CONFIG_64BIT */
314 /* Bits in the segment/region table address-space-control-element */
315 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
316 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
317 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
318 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
319 #define _ASCE_REAL_SPACE 0x20 /* real space control */
320 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
321 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
322 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
323 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
324 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
325 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
327 /* Bits in the region table entry */
328 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
329 #define _REGION_ENTRY_RO 0x200 /* region protection bit */
330 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
331 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
332 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
333 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
334 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
335 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
337 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
338 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
339 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
340 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
341 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
342 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
344 #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
345 #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
346 #define _REGION3_ENTRY_CO 0x100 /* change-recording override */
348 /* Bits in the segment table entry */
349 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
350 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
351 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
352 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
354 #define _SEGMENT_ENTRY (0)
355 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
357 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
358 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
359 #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
360 #define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
362 /* Set of bits not changed in pmd_modify */
363 #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
364 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
366 /* Page status table bits for virtualization */
367 #define RCP_ACC_BITS 0xf000000000000000UL
368 #define RCP_FP_BIT 0x0800000000000000UL
369 #define RCP_PCL_BIT 0x0080000000000000UL
370 #define RCP_HR_BIT 0x0040000000000000UL
371 #define RCP_HC_BIT 0x0020000000000000UL
372 #define RCP_GR_BIT 0x0004000000000000UL
373 #define RCP_GC_BIT 0x0002000000000000UL
374 #define RCP_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
376 /* User dirty / referenced bit for KVM's migration feature */
377 #define KVM_UR_BIT 0x0000800000000000UL
378 #define KVM_UC_BIT 0x0000400000000000UL
380 #endif /* CONFIG_64BIT */
383 * A user page table pointer has the space-switch-event bit, the
384 * private-space-control bit and the storage-alteration-event-control
385 * bit set. A kernel page table pointer doesn't need them.
387 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
391 * Page protection definitions.
393 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
394 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
395 #define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
396 #define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
398 #define PAGE_KERNEL PAGE_RWC
399 #define PAGE_SHARED PAGE_KERNEL
400 #define PAGE_COPY PAGE_RO
403 * On s390 the page table entry has an invalid bit and a read-only bit.
404 * Read permission implies execute permission and write permission
405 * implies read permission.
408 #define __P000 PAGE_NONE
409 #define __P001 PAGE_RO
410 #define __P010 PAGE_RO
411 #define __P011 PAGE_RO
412 #define __P100 PAGE_RO
413 #define __P101 PAGE_RO
414 #define __P110 PAGE_RO
415 #define __P111 PAGE_RO
417 #define __S000 PAGE_NONE
418 #define __S001 PAGE_RO
419 #define __S010 PAGE_RW
420 #define __S011 PAGE_RW
421 #define __S100 PAGE_RO
422 #define __S101 PAGE_RO
423 #define __S110 PAGE_RW
424 #define __S111 PAGE_RW
427 * Segment entry (large page) protection definitions.
429 #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
430 #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
431 #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
433 static inline int mm_exclusive(struct mm_struct *mm)
435 return likely(mm == current->active_mm &&
436 atomic_read(&mm->context.attach_count) <= 1);
439 static inline int mm_has_pgste(struct mm_struct *mm)
442 if (unlikely(mm->context.has_pgste))
448 * pgd/pmd/pte query functions
452 static inline int pgd_present(pgd_t pgd) { return 1; }
453 static inline int pgd_none(pgd_t pgd) { return 0; }
454 static inline int pgd_bad(pgd_t pgd) { return 0; }
456 static inline int pud_present(pud_t pud) { return 1; }
457 static inline int pud_none(pud_t pud) { return 0; }
458 static inline int pud_large(pud_t pud) { return 0; }
459 static inline int pud_bad(pud_t pud) { return 0; }
461 #else /* CONFIG_64BIT */
463 static inline int pgd_present(pgd_t pgd)
465 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
467 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
470 static inline int pgd_none(pgd_t pgd)
472 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
474 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
477 static inline int pgd_bad(pgd_t pgd)
480 * With dynamic page table levels the pgd can be a region table
481 * entry or a segment table entry. Check for the bit that are
482 * invalid for either table entry.
485 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
486 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
487 return (pgd_val(pgd) & mask) != 0;
490 static inline int pud_present(pud_t pud)
492 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
494 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
497 static inline int pud_none(pud_t pud)
499 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
501 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
504 static inline int pud_large(pud_t pud)
506 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
508 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
511 static inline int pud_bad(pud_t pud)
514 * With dynamic page table levels the pud can be a region table
515 * entry or a segment table entry. Check for the bit that are
516 * invalid for either table entry.
519 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
520 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
521 return (pud_val(pud) & mask) != 0;
524 #endif /* CONFIG_64BIT */
526 static inline int pmd_present(pmd_t pmd)
528 unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
529 return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
530 !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
533 static inline int pmd_none(pmd_t pmd)
535 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
536 !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
539 static inline int pmd_large(pmd_t pmd)
542 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
548 static inline int pmd_bad(pmd_t pmd)
550 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
551 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
554 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
555 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
556 unsigned long addr, pmd_t *pmdp);
558 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
559 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
560 unsigned long address, pmd_t *pmdp,
561 pmd_t entry, int dirty);
563 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
564 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
565 unsigned long address, pmd_t *pmdp);
567 #define __HAVE_ARCH_PMD_WRITE
568 static inline int pmd_write(pmd_t pmd)
570 return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0;
573 static inline int pmd_young(pmd_t pmd)
578 static inline int pte_none(pte_t pte)
580 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
583 static inline int pte_present(pte_t pte)
585 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
586 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
587 (!(pte_val(pte) & _PAGE_INVALID) &&
588 !(pte_val(pte) & _PAGE_SWT));
591 static inline int pte_file(pte_t pte)
593 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
594 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
597 static inline int pte_special(pte_t pte)
599 return (pte_val(pte) & _PAGE_SPECIAL);
602 #define __HAVE_ARCH_PTE_SAME
603 static inline int pte_same(pte_t a, pte_t b)
605 return pte_val(a) == pte_val(b);
608 static inline pgste_t pgste_get_lock(pte_t *ptep)
610 unsigned long new = 0;
618 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
619 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
622 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
623 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
628 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
632 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
634 : "=Q" (ptep[PTRS_PER_PTE])
635 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
641 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
644 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
648 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
651 unsigned long address, bits;
654 if (pte_val(*ptep) & _PAGE_INVALID)
656 address = pte_val(*ptep) & PAGE_MASK;
657 skey = page_get_storage_key(address);
658 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
659 /* Clear page changed & referenced bit in the storage key */
660 if (bits & _PAGE_CHANGED)
661 page_set_storage_key(address, skey ^ bits, 0);
663 page_reset_referenced(address);
664 /* Transfer page changed & referenced bit to guest bits in pgste */
665 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
666 /* Get host changed & referenced bits from pgste */
667 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
668 /* Transfer page changed & referenced bit to kvm user bits */
669 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
670 /* Clear relevant host bits in pgste. */
671 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
672 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
673 /* Copy page access key and fetch protection bit to pgste */
675 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
676 /* Transfer referenced bit to pte */
677 pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
683 static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
688 if (pte_val(*ptep) & _PAGE_INVALID)
690 /* Get referenced bit from storage key */
691 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
693 pgste_val(pgste) |= RCP_GR_BIT;
694 /* Get host referenced bit from pgste */
695 if (pgste_val(pgste) & RCP_HR_BIT) {
696 pgste_val(pgste) &= ~RCP_HR_BIT;
699 /* Transfer referenced bit to kvm user bits and pte */
701 pgste_val(pgste) |= KVM_UR_BIT;
702 pte_val(*ptep) |= _PAGE_SWR;
708 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
711 unsigned long address;
714 if (pte_val(entry) & _PAGE_INVALID)
716 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
717 address = pte_val(entry) & PAGE_MASK;
719 * Set page access key and fetch protection bit from pgste.
720 * The guest C/R information is still in the PGSTE, set real
723 nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
724 page_set_storage_key(address, nkey, 0);
728 static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
730 if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) {
732 * Without enhanced suppression-on-protection force
733 * the dirty bit on for all writable ptes.
735 pte_val(entry) |= _PAGE_SWC;
736 pte_val(entry) &= ~_PAGE_RO;
742 * struct gmap_struct - guest address space
743 * @mm: pointer to the parent mm_struct
744 * @table: pointer to the page directory
745 * @asce: address space control element for gmap page table
746 * @crst_list: list of all crst tables used in the guest address space
749 struct list_head list;
750 struct mm_struct *mm;
751 unsigned long *table;
753 struct list_head crst_list;
757 * struct gmap_rmap - reverse mapping for segment table entries
758 * @gmap: pointer to the gmap_struct
759 * @entry: pointer to a segment table entry
760 * @vmaddr: virtual address in the guest address space
763 struct list_head list;
765 unsigned long *entry;
766 unsigned long vmaddr;
770 * struct gmap_pgtable - gmap information attached to a page table
771 * @vmaddr: address of the 1MB segment in the process virtual memory
772 * @mapper: list of segment table entries mapping a page table
774 struct gmap_pgtable {
775 unsigned long vmaddr;
776 struct list_head mapper;
780 * struct gmap_notifier - notify function block for page invalidation
781 * @notifier_call: address of callback function
783 struct gmap_notifier {
784 struct list_head list;
785 void (*notifier_call)(struct gmap *gmap, unsigned long address);
788 struct gmap *gmap_alloc(struct mm_struct *mm);
789 void gmap_free(struct gmap *gmap);
790 void gmap_enable(struct gmap *gmap);
791 void gmap_disable(struct gmap *gmap);
792 int gmap_map_segment(struct gmap *gmap, unsigned long from,
793 unsigned long to, unsigned long len);
794 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
795 unsigned long __gmap_translate(unsigned long address, struct gmap *);
796 unsigned long gmap_translate(unsigned long address, struct gmap *);
797 unsigned long __gmap_fault(unsigned long address, struct gmap *);
798 unsigned long gmap_fault(unsigned long address, struct gmap *);
799 void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
801 void gmap_register_ipte_notifier(struct gmap_notifier *);
802 void gmap_unregister_ipte_notifier(struct gmap_notifier *);
803 int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
804 void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
806 static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
808 pte_t *ptep, pgste_t pgste)
811 if (pgste_val(pgste) & RCP_IN_BIT) {
812 pgste_val(pgste) &= ~RCP_IN_BIT;
813 gmap_do_ipte_notify(mm, addr, ptep);
820 * Certain architectures need to do special things when PTEs
821 * within a page table are directly modified. Thus, the following
822 * hook is made available.
824 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
825 pte_t *ptep, pte_t entry)
829 if (mm_has_pgste(mm)) {
830 pgste = pgste_get_lock(ptep);
831 pgste_set_key(ptep, pgste, entry);
832 pgste_set_pte(ptep, entry);
833 pgste_set_unlock(ptep, pgste);
835 if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
836 pte_val(entry) |= _PAGE_CO;
842 * query functions pte_write/pte_dirty/pte_young only work if
843 * pte_present() is true. Undefined behaviour if not..
845 static inline int pte_write(pte_t pte)
847 return (pte_val(pte) & _PAGE_SWW) != 0;
850 static inline int pte_dirty(pte_t pte)
852 return (pte_val(pte) & _PAGE_SWC) != 0;
855 static inline int pte_young(pte_t pte)
858 if (pte_val(pte) & _PAGE_SWR)
865 * pgd/pmd/pte modification functions
868 static inline void pgd_clear(pgd_t *pgd)
871 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
872 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
876 static inline void pud_clear(pud_t *pud)
879 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
880 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
884 static inline void pmd_clear(pmd_t *pmdp)
886 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
889 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
891 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
895 * The following pte modification functions only work if
896 * pte_present() is true. Undefined behaviour if not..
898 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
900 pte_val(pte) &= _PAGE_CHG_MASK;
901 pte_val(pte) |= pgprot_val(newprot);
902 if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW))
903 pte_val(pte) &= ~_PAGE_RO;
907 static inline pte_t pte_wrprotect(pte_t pte)
909 pte_val(pte) &= ~_PAGE_SWW;
910 /* Do not clobber _PAGE_TYPE_NONE pages! */
911 if (!(pte_val(pte) & _PAGE_INVALID))
912 pte_val(pte) |= _PAGE_RO;
916 static inline pte_t pte_mkwrite(pte_t pte)
918 pte_val(pte) |= _PAGE_SWW;
919 if (pte_val(pte) & _PAGE_SWC)
920 pte_val(pte) &= ~_PAGE_RO;
924 static inline pte_t pte_mkclean(pte_t pte)
926 pte_val(pte) &= ~_PAGE_SWC;
927 /* Do not clobber _PAGE_TYPE_NONE pages! */
928 if (!(pte_val(pte) & _PAGE_INVALID))
929 pte_val(pte) |= _PAGE_RO;
933 static inline pte_t pte_mkdirty(pte_t pte)
935 pte_val(pte) |= _PAGE_SWC;
936 if (pte_val(pte) & _PAGE_SWW)
937 pte_val(pte) &= ~_PAGE_RO;
941 static inline pte_t pte_mkold(pte_t pte)
944 pte_val(pte) &= ~_PAGE_SWR;
949 static inline pte_t pte_mkyoung(pte_t pte)
954 static inline pte_t pte_mkspecial(pte_t pte)
956 pte_val(pte) |= _PAGE_SPECIAL;
960 #ifdef CONFIG_HUGETLB_PAGE
961 static inline pte_t pte_mkhuge(pte_t pte)
963 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
969 * Get (and clear) the user dirty bit for a pte.
971 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
977 if (mm_has_pgste(mm)) {
978 pgste = pgste_get_lock(ptep);
979 pgste = pgste_update_all(ptep, pgste);
980 dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
981 pgste_val(pgste) &= ~KVM_UC_BIT;
982 pgste_set_unlock(ptep, pgste);
989 * Get (and clear) the user referenced bit for a pte.
991 static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
997 if (mm_has_pgste(mm)) {
998 pgste = pgste_get_lock(ptep);
999 pgste = pgste_update_young(ptep, pgste);
1000 young = !!(pgste_val(pgste) & KVM_UR_BIT);
1001 pgste_val(pgste) &= ~KVM_UR_BIT;
1002 pgste_set_unlock(ptep, pgste);
1007 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1008 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1009 unsigned long addr, pte_t *ptep)
1014 if (mm_has_pgste(vma->vm_mm)) {
1015 pgste = pgste_get_lock(ptep);
1016 pgste = pgste_update_young(ptep, pgste);
1018 *ptep = pte_mkold(pte);
1019 pgste_set_unlock(ptep, pgste);
1020 return pte_young(pte);
1025 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1026 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1027 unsigned long address, pte_t *ptep)
1029 /* No need to flush TLB
1030 * On s390 reference bits are in storage key and never in TLB
1031 * With virtualization we handle the reference bit, without we
1032 * we can simply return */
1033 return ptep_test_and_clear_young(vma, address, ptep);
1036 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1038 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1039 #ifndef CONFIG_64BIT
1040 /* pto must point to the start of the segment table */
1041 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
1043 /* ipte in zarch mode can do the math */
1048 : "=m" (*ptep) : "m" (*ptep),
1049 "a" (pto), "a" (address));
1054 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1055 * both clear the TLB for the unmapped pte. The reason is that
1056 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1057 * to modify an active pte. The sequence is
1058 * 1) ptep_get_and_clear
1060 * 3) flush_tlb_range
1061 * On s390 the tlb needs to get flushed with the modification of the pte
1062 * if the pte is active. The only way how this can be implemented is to
1063 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1066 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1067 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1068 unsigned long address, pte_t *ptep)
1073 mm->context.flush_mm = 1;
1074 if (mm_has_pgste(mm)) {
1075 pgste = pgste_get_lock(ptep);
1076 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1080 if (!mm_exclusive(mm))
1081 __ptep_ipte(address, ptep);
1082 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1084 if (mm_has_pgste(mm)) {
1085 pgste = pgste_update_all(&pte, pgste);
1086 pgste_set_unlock(ptep, pgste);
1091 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1092 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1093 unsigned long address,
1099 mm->context.flush_mm = 1;
1100 if (mm_has_pgste(mm)) {
1101 pgste = pgste_get_lock(ptep);
1102 pgste_ipte_notify(mm, address, ptep, pgste);
1106 if (!mm_exclusive(mm))
1107 __ptep_ipte(address, ptep);
1109 if (mm_has_pgste(mm)) {
1110 pgste = pgste_update_all(&pte, pgste);
1111 pgste_set(ptep, pgste);
1116 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1117 unsigned long address,
1118 pte_t *ptep, pte_t pte)
1122 if (mm_has_pgste(mm)) {
1123 pgste = *(pgste_t *)(ptep + PTRS_PER_PTE);
1124 pgste_set_key(ptep, pgste, pte);
1125 pgste_set_pte(ptep, pte);
1126 pgste_set_unlock(ptep, pgste);
1131 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1132 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1133 unsigned long address, pte_t *ptep)
1138 if (mm_has_pgste(vma->vm_mm)) {
1139 pgste = pgste_get_lock(ptep);
1140 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1144 __ptep_ipte(address, ptep);
1145 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1147 if (mm_has_pgste(vma->vm_mm)) {
1148 pgste = pgste_update_all(&pte, pgste);
1149 pgste_set_unlock(ptep, pgste);
1155 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1156 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1157 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1158 * cannot be accessed while the batched unmap is running. In this case
1159 * full==1 and a simple pte_clear is enough. See tlb.h.
1161 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1162 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1163 unsigned long address,
1164 pte_t *ptep, int full)
1169 if (mm_has_pgste(mm)) {
1170 pgste = pgste_get_lock(ptep);
1172 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1177 __ptep_ipte(address, ptep);
1178 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1180 if (mm_has_pgste(mm)) {
1181 pgste = pgste_update_all(&pte, pgste);
1182 pgste_set_unlock(ptep, pgste);
1187 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1188 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1189 unsigned long address, pte_t *ptep)
1194 if (pte_write(pte)) {
1195 mm->context.flush_mm = 1;
1196 if (mm_has_pgste(mm)) {
1197 pgste = pgste_get_lock(ptep);
1198 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1201 if (!mm_exclusive(mm))
1202 __ptep_ipte(address, ptep);
1203 pte = pte_wrprotect(pte);
1205 if (mm_has_pgste(mm)) {
1206 pgste_set_pte(ptep, pte);
1207 pgste_set_unlock(ptep, pgste);
1214 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1215 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1216 unsigned long address, pte_t *ptep,
1217 pte_t entry, int dirty)
1221 if (pte_same(*ptep, entry))
1223 if (mm_has_pgste(vma->vm_mm)) {
1224 pgste = pgste_get_lock(ptep);
1225 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1228 __ptep_ipte(address, ptep);
1230 if (mm_has_pgste(vma->vm_mm)) {
1231 pgste_set_pte(ptep, entry);
1232 pgste_set_unlock(ptep, pgste);
1239 * Conversion functions: convert a page and protection to a page entry,
1240 * and a page entry and page directory to the page they refer to.
1242 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1245 pte_val(__pte) = physpage + pgprot_val(pgprot);
1249 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1251 unsigned long physpage = page_to_phys(page);
1252 pte_t __pte = mk_pte_phys(physpage, pgprot);
1254 if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) {
1255 pte_val(__pte) |= _PAGE_SWC;
1256 pte_val(__pte) &= ~_PAGE_RO;
1261 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1262 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1263 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1264 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1266 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1267 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1269 #ifndef CONFIG_64BIT
1271 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1272 #define pud_deref(pmd) ({ BUG(); 0UL; })
1273 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1275 #define pud_offset(pgd, address) ((pud_t *) pgd)
1276 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1278 #else /* CONFIG_64BIT */
1280 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1281 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1282 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1284 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1286 pud_t *pud = (pud_t *) pgd;
1287 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1288 pud = (pud_t *) pgd_deref(*pgd);
1289 return pud + pud_index(address);
1292 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1294 pmd_t *pmd = (pmd_t *) pud;
1295 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1296 pmd = (pmd_t *) pud_deref(*pud);
1297 return pmd + pmd_index(address);
1300 #endif /* CONFIG_64BIT */
1302 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1303 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1304 #define pte_page(x) pfn_to_page(pte_pfn(x))
1306 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1308 /* Find an entry in the lowest level page table.. */
1309 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1310 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1311 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1312 #define pte_unmap(pte) do { } while (0)
1314 static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1316 unsigned long sto = (unsigned long) pmdp -
1317 pmd_index(address) * sizeof(pmd_t);
1319 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
1321 " .insn rrf,0xb98e0000,%2,%3,0,0"
1323 : "m" (*pmdp), "a" (sto),
1324 "a" ((address & HPAGE_MASK))
1330 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1331 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1334 * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
1335 * Convert to segment table entry format.
1337 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1338 return pgprot_val(SEGMENT_NONE);
1339 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1340 return pgprot_val(SEGMENT_RO);
1341 return pgprot_val(SEGMENT_RW);
1344 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1346 pmd_val(pmd) &= _SEGMENT_CHG_MASK;
1347 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1351 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1354 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1358 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1360 /* Do not clobber _HPAGE_TYPE_NONE pages! */
1361 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
1362 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
1365 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1367 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1369 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1370 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
1372 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1373 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
1375 static inline int pmd_trans_splitting(pmd_t pmd)
1377 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1380 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1381 pmd_t *pmdp, pmd_t entry)
1383 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
1384 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1388 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1390 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1394 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1396 pmd_val(pmd) |= _SEGMENT_ENTRY_RO;
1400 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1402 /* No dirty bit in the segment table entry. */
1406 static inline pmd_t pmd_mkold(pmd_t pmd)
1408 /* No referenced bit in the segment table entry. */
1412 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1414 /* No referenced bit in the segment table entry. */
1418 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1419 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1420 unsigned long address, pmd_t *pmdp)
1422 unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
1427 if (MACHINE_HAS_RRBM) {
1428 counter = PTRS_PER_PTE >> 6;
1430 "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
1434 : "=&d" (tmp), "+&d" (rc), "+d" (counter),
1436 : "a" (64 * 4096UL) : "cc");
1439 counter = PTRS_PER_PTE;
1446 : "+d" (rc), "+d" (counter), "+a" (pmd_addr)
1447 : "a" (4096UL) : "cc");
1452 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1453 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1454 unsigned long address, pmd_t *pmdp)
1458 __pmd_idte(address, pmdp);
1463 #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1464 static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1465 unsigned long address, pmd_t *pmdp)
1467 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1470 #define __HAVE_ARCH_PMDP_INVALIDATE
1471 static inline void pmdp_invalidate(struct vm_area_struct *vma,
1472 unsigned long address, pmd_t *pmdp)
1474 __pmd_idte(address, pmdp);
1477 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1478 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1479 unsigned long address, pmd_t *pmdp)
1483 if (pmd_write(pmd)) {
1484 __pmd_idte(address, pmdp);
1485 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1489 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1490 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1492 static inline int pmd_trans_huge(pmd_t pmd)
1494 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1497 static inline int has_transparent_hugepage(void)
1499 return MACHINE_HAS_HPAGE ? 1 : 0;
1502 static inline unsigned long pmd_pfn(pmd_t pmd)
1504 return pmd_val(pmd) >> PAGE_SHIFT;
1506 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1509 * 31 bit swap entry format:
1510 * A page-table entry has some bits we have to treat in a special way.
1511 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1512 * exception will occur instead of a page translation exception. The
1513 * specifiation exception has the bad habit not to store necessary
1514 * information in the lowcore.
1515 * Bit 21 and bit 22 are the page invalid bit and the page protection
1516 * bit. We set both to indicate a swapped page.
1517 * Bit 30 and 31 are used to distinguish the different page types. For
1518 * a swapped page these bits need to be zero.
1519 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1520 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1521 * plus 24 for the offset.
1522 * 0| offset |0110|o|type |00|
1523 * 0 0000000001111111111 2222 2 22222 33
1524 * 0 1234567890123456789 0123 4 56789 01
1526 * 64 bit swap entry format:
1527 * A page-table entry has some bits we have to treat in a special way.
1528 * Bits 52 and bit 55 have to be zero, otherwise an specification
1529 * exception will occur instead of a page translation exception. The
1530 * specifiation exception has the bad habit not to store necessary
1531 * information in the lowcore.
1532 * Bit 53 and bit 54 are the page invalid bit and the page protection
1533 * bit. We set both to indicate a swapped page.
1534 * Bit 62 and 63 are used to distinguish the different page types. For
1535 * a swapped page these bits need to be zero.
1536 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1537 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1538 * plus 56 for the offset.
1539 * | offset |0110|o|type |00|
1540 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1541 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1543 #ifndef CONFIG_64BIT
1544 #define __SWP_OFFSET_MASK (~0UL >> 12)
1546 #define __SWP_OFFSET_MASK (~0UL >> 11)
1548 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1551 offset &= __SWP_OFFSET_MASK;
1552 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
1553 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1557 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1558 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1559 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1561 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1562 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1564 #ifndef CONFIG_64BIT
1565 # define PTE_FILE_MAX_BITS 26
1566 #else /* CONFIG_64BIT */
1567 # define PTE_FILE_MAX_BITS 59
1568 #endif /* CONFIG_64BIT */
1570 #define pte_to_pgoff(__pte) \
1571 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1573 #define pgoff_to_pte(__off) \
1574 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1575 | _PAGE_TYPE_FILE })
1577 #endif /* !__ASSEMBLY__ */
1579 #define kern_addr_valid(addr) (1)
1581 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1582 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1583 extern int s390_enable_sie(void);
1586 * No page table caches to initialise
1588 static inline void pgtable_cache_init(void) { }
1589 static inline void check_pgt_cache(void) { }
1591 #include <asm-generic/pgtable.h>
1593 #endif /* _S390_PAGE_H */