2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/swapops.h>
17 #include <linux/sysctl.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 static inline pte_t ptep_flush_direct(struct mm_struct *mm,
28 unsigned long addr, pte_t *ptep)
33 if (unlikely(pte_val(old) & _PAGE_INVALID))
35 atomic_inc(&mm->context.flush_count);
36 if (MACHINE_HAS_TLB_LC &&
37 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
38 __ptep_ipte(addr, ptep, IPTE_LOCAL);
40 __ptep_ipte(addr, ptep, IPTE_GLOBAL);
41 atomic_dec(&mm->context.flush_count);
45 static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
46 unsigned long addr, pte_t *ptep)
51 if (unlikely(pte_val(old) & _PAGE_INVALID))
53 atomic_inc(&mm->context.flush_count);
54 if (cpumask_equal(&mm->context.cpu_attach_mask,
55 cpumask_of(smp_processor_id()))) {
56 pte_val(*ptep) |= _PAGE_INVALID;
57 mm->context.flush_mm = 1;
59 __ptep_ipte(addr, ptep, IPTE_GLOBAL);
60 atomic_dec(&mm->context.flush_count);
64 static inline pgste_t pgste_get_lock(pte_t *ptep)
66 unsigned long new = 0;
73 " nihh %0,0xff7f\n" /* clear PCL bit in old */
74 " oihh %1,0x0080\n" /* set PCL bit in new */
77 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
78 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
83 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
87 " nihh %1,0xff7f\n" /* clear PCL bit */
89 : "=Q" (ptep[PTRS_PER_PTE])
90 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
95 static inline pgste_t pgste_get(pte_t *ptep)
97 unsigned long pgste = 0;
99 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
101 return __pgste(pgste);
104 static inline void pgste_set(pte_t *ptep, pgste_t pgste)
107 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
111 static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
112 struct mm_struct *mm)
115 unsigned long address, bits, skey;
117 if (!mm_use_skey(mm) || pte_val(pte) & _PAGE_INVALID)
119 address = pte_val(pte) & PAGE_MASK;
120 skey = (unsigned long) page_get_storage_key(address);
121 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
122 /* Transfer page changed & referenced bit to guest bits in pgste */
123 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
124 /* Copy page access key and fetch protection bit to pgste */
125 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
126 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
132 static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
133 struct mm_struct *mm)
136 unsigned long address;
139 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
141 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
142 address = pte_val(entry) & PAGE_MASK;
144 * Set page access key and fetch protection bit from pgste.
145 * The guest C/R information is still in the PGSTE, set real
148 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
149 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
150 page_set_storage_key(address, nkey, 0);
154 static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
157 if ((pte_val(entry) & _PAGE_PRESENT) &&
158 (pte_val(entry) & _PAGE_WRITE) &&
159 !(pte_val(entry) & _PAGE_INVALID)) {
160 if (!MACHINE_HAS_ESOP) {
162 * Without enhanced suppression-on-protection force
163 * the dirty bit on for all writable ptes.
165 pte_val(entry) |= _PAGE_DIRTY;
166 pte_val(entry) &= ~_PAGE_PROTECT;
168 if (!(pte_val(entry) & _PAGE_PROTECT))
169 /* This pte allows write access, set user-dirty */
170 pgste_val(pgste) |= PGSTE_UC_BIT;
177 static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
179 pte_t *ptep, pgste_t pgste)
184 bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
186 pgste_val(pgste) ^= bits;
187 ptep_notify(mm, addr, ptep, bits);
193 static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
194 unsigned long addr, pte_t *ptep)
196 pgste_t pgste = __pgste(0);
198 if (mm_has_pgste(mm)) {
199 pgste = pgste_get_lock(ptep);
200 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
205 static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
206 unsigned long addr, pte_t *ptep,
207 pgste_t pgste, pte_t old, pte_t new)
209 if (mm_has_pgste(mm)) {
210 if (pte_val(old) & _PAGE_INVALID)
211 pgste_set_key(ptep, pgste, new, mm);
212 if (pte_val(new) & _PAGE_INVALID) {
213 pgste = pgste_update_all(old, pgste, mm);
214 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
215 _PGSTE_GPS_USAGE_UNUSED)
216 pte_val(old) |= _PAGE_UNUSED;
218 pgste = pgste_set_pte(ptep, pgste, new);
219 pgste_set_unlock(ptep, pgste);
226 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
227 pte_t *ptep, pte_t new)
233 pgste = ptep_xchg_start(mm, addr, ptep);
234 old = ptep_flush_direct(mm, addr, ptep);
235 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
239 EXPORT_SYMBOL(ptep_xchg_direct);
241 pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
242 pte_t *ptep, pte_t new)
248 pgste = ptep_xchg_start(mm, addr, ptep);
249 old = ptep_flush_lazy(mm, addr, ptep);
250 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
254 EXPORT_SYMBOL(ptep_xchg_lazy);
256 pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
263 pgste = ptep_xchg_start(mm, addr, ptep);
264 old = ptep_flush_lazy(mm, addr, ptep);
265 if (mm_has_pgste(mm)) {
266 pgste = pgste_update_all(old, pgste, mm);
267 pgste_set(ptep, pgste);
271 EXPORT_SYMBOL(ptep_modify_prot_start);
273 void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
274 pte_t *ptep, pte_t pte)
279 pte_val(pte) &= ~_PAGE_NOEXEC;
280 if (mm_has_pgste(mm)) {
281 pgste = pgste_get(ptep);
282 pgste_set_key(ptep, pgste, pte, mm);
283 pgste = pgste_set_pte(ptep, pgste, pte);
284 pgste_set_unlock(ptep, pgste);
290 EXPORT_SYMBOL(ptep_modify_prot_commit);
292 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
293 unsigned long addr, pmd_t *pmdp)
298 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
300 if (!MACHINE_HAS_IDTE) {
304 atomic_inc(&mm->context.flush_count);
305 if (MACHINE_HAS_TLB_LC &&
306 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
307 __pmdp_idte(addr, pmdp, IDTE_LOCAL);
309 __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
310 atomic_dec(&mm->context.flush_count);
314 static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
315 unsigned long addr, pmd_t *pmdp)
320 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
322 atomic_inc(&mm->context.flush_count);
323 if (cpumask_equal(&mm->context.cpu_attach_mask,
324 cpumask_of(smp_processor_id()))) {
325 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
326 mm->context.flush_mm = 1;
327 } else if (MACHINE_HAS_IDTE)
328 __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
331 atomic_dec(&mm->context.flush_count);
335 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
336 pmd_t *pmdp, pmd_t new)
341 old = pmdp_flush_direct(mm, addr, pmdp);
346 EXPORT_SYMBOL(pmdp_xchg_direct);
348 pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
349 pmd_t *pmdp, pmd_t new)
354 old = pmdp_flush_lazy(mm, addr, pmdp);
359 EXPORT_SYMBOL(pmdp_xchg_lazy);
361 static inline pud_t pudp_flush_direct(struct mm_struct *mm,
362 unsigned long addr, pud_t *pudp)
367 if (pud_val(old) & _REGION_ENTRY_INVALID)
369 if (!MACHINE_HAS_IDTE) {
371 * Invalid bit position is the same for pmd and pud, so we can
372 * re-use _pmd_csp() here
374 __pmdp_csp((pmd_t *) pudp);
377 atomic_inc(&mm->context.flush_count);
378 if (MACHINE_HAS_TLB_LC &&
379 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
380 __pudp_idte(addr, pudp, IDTE_LOCAL);
382 __pudp_idte(addr, pudp, IDTE_GLOBAL);
383 atomic_dec(&mm->context.flush_count);
387 pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
388 pud_t *pudp, pud_t new)
393 old = pudp_flush_direct(mm, addr, pudp);
398 EXPORT_SYMBOL(pudp_xchg_direct);
400 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
401 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
404 struct list_head *lh = (struct list_head *) pgtable;
406 assert_spin_locked(pmd_lockptr(mm, pmdp));
409 if (!pmd_huge_pte(mm, pmdp))
412 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
413 pmd_huge_pte(mm, pmdp) = pgtable;
416 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
418 struct list_head *lh;
422 assert_spin_locked(pmd_lockptr(mm, pmdp));
425 pgtable = pmd_huge_pte(mm, pmdp);
426 lh = (struct list_head *) pgtable;
428 pmd_huge_pte(mm, pmdp) = NULL;
430 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
433 ptep = (pte_t *) pgtable;
434 pte_val(*ptep) = _PAGE_INVALID;
436 pte_val(*ptep) = _PAGE_INVALID;
439 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
442 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
443 pte_t *ptep, pte_t entry)
447 /* the mm_has_pgste() check is done in set_pte_at() */
449 pgste = pgste_get_lock(ptep);
450 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
451 pgste_set_key(ptep, pgste, entry, mm);
452 pgste = pgste_set_pte(ptep, pgste, entry);
453 pgste_set_unlock(ptep, pgste);
457 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
462 pgste = pgste_get_lock(ptep);
463 pgste_val(pgste) |= PGSTE_IN_BIT;
464 pgste_set_unlock(ptep, pgste);
469 * ptep_force_prot - change access rights of a locked pte
470 * @mm: pointer to the process mm_struct
471 * @addr: virtual address in the guest address space
472 * @ptep: pointer to the page table entry
473 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
474 * @bit: pgste bit to set (e.g. for notification)
476 * Returns 0 if the access rights were changed and -EAGAIN if the current
477 * and requested access rights are incompatible.
479 int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
480 pte_t *ptep, int prot, unsigned long bit)
486 pgste = pgste_get_lock(ptep);
488 /* Check pte entry after all locks have been acquired */
489 pte_i = pte_val(entry) & _PAGE_INVALID;
490 pte_p = pte_val(entry) & _PAGE_PROTECT;
491 if ((pte_i && (prot != PROT_NONE)) ||
492 (pte_p && (prot & PROT_WRITE))) {
493 pgste_set_unlock(ptep, pgste);
496 /* Change access rights and set pgste bit */
497 if (prot == PROT_NONE && !pte_i) {
498 ptep_flush_direct(mm, addr, ptep);
499 pgste = pgste_update_all(entry, pgste, mm);
500 pte_val(entry) |= _PAGE_INVALID;
502 if (prot == PROT_READ && !pte_p) {
503 ptep_flush_direct(mm, addr, ptep);
504 pte_val(entry) &= ~_PAGE_INVALID;
505 pte_val(entry) |= _PAGE_PROTECT;
507 pgste_val(pgste) |= bit;
508 pgste = pgste_set_pte(ptep, pgste, entry);
509 pgste_set_unlock(ptep, pgste);
513 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
514 pte_t *sptep, pte_t *tptep, pte_t pte)
516 pgste_t spgste, tpgste;
520 if (!(pte_val(*tptep) & _PAGE_INVALID))
521 return 0; /* already shadowed */
522 spgste = pgste_get_lock(sptep);
524 if (!(pte_val(spte) & _PAGE_INVALID) &&
525 !((pte_val(spte) & _PAGE_PROTECT) &&
526 !(pte_val(pte) & _PAGE_PROTECT))) {
527 pgste_val(spgste) |= PGSTE_VSIE_BIT;
528 tpgste = pgste_get_lock(tptep);
529 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
530 (pte_val(pte) & _PAGE_PROTECT);
531 /* don't touch the storage key - it belongs to parent pgste */
532 tpgste = pgste_set_pte(tptep, tpgste, tpte);
533 pgste_set_unlock(tptep, tpgste);
536 pgste_set_unlock(sptep, spgste);
540 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
544 pgste = pgste_get_lock(ptep);
545 /* notifier is called by the caller */
546 ptep_flush_direct(mm, saddr, ptep);
547 /* don't touch the storage key - it belongs to parent pgste */
548 pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
549 pgste_set_unlock(ptep, pgste);
552 static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
554 if (!non_swap_entry(entry))
555 dec_mm_counter(mm, MM_SWAPENTS);
556 else if (is_migration_entry(entry)) {
557 struct page *page = migration_entry_to_page(entry);
559 dec_mm_counter(mm, mm_counter(page));
561 free_swap_and_cache(entry);
564 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
565 pte_t *ptep, int reset)
567 unsigned long pgstev;
571 /* Zap unused and logically-zero pages */
573 pgste = pgste_get_lock(ptep);
574 pgstev = pgste_val(pgste);
576 if (!reset && pte_swap(pte) &&
577 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
578 (pgstev & _PGSTE_GPS_ZERO))) {
579 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
580 pte_clear(mm, addr, ptep);
583 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
584 pgste_set_unlock(ptep, pgste);
588 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
593 /* Clear storage key */
595 pgste = pgste_get_lock(ptep);
596 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
597 PGSTE_GR_BIT | PGSTE_GC_BIT);
598 ptev = pte_val(*ptep);
599 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
600 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
601 pgste_set_unlock(ptep, pgste);
606 * Test and reset if a guest page is dirty
608 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
616 ptep = get_locked_pte(mm, addr, &ptl);
620 pgste = pgste_get_lock(ptep);
621 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
622 pgste_val(pgste) &= ~PGSTE_UC_BIT;
624 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
625 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
626 __ptep_ipte(addr, ptep, IPTE_GLOBAL);
627 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
628 pte_val(pte) |= _PAGE_PROTECT;
630 pte_val(pte) |= _PAGE_INVALID;
633 pgste_set_unlock(ptep, pgste);
638 EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
640 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
641 unsigned char key, bool nq)
648 ptep = get_locked_pte(mm, addr, &ptl);
652 new = old = pgste_get_lock(ptep);
653 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
654 PGSTE_ACC_BITS | PGSTE_FP_BIT);
655 keyul = (unsigned long) key;
656 pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
657 pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
658 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
659 unsigned long address, bits, skey;
661 address = pte_val(*ptep) & PAGE_MASK;
662 skey = (unsigned long) page_get_storage_key(address);
663 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
664 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
665 /* Set storage key ACC and FP */
666 page_set_storage_key(address, skey, !nq);
667 /* Merge host changed & referenced into pgste */
668 pgste_val(new) |= bits << 52;
670 /* changing the guest storage key is considered a change of the page */
671 if ((pgste_val(new) ^ pgste_val(old)) &
672 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
673 pgste_val(new) |= PGSTE_UC_BIT;
675 pgste_set_unlock(ptep, new);
676 pte_unmap_unlock(ptep, ptl);
679 EXPORT_SYMBOL(set_guest_storage_key);
682 * Conditionally set a guest storage key (handling csske).
683 * oldkey will be updated when either mr or mc is set and a pointer is given.
685 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
686 * storage key was updated and -EFAULT on access errors.
688 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
689 unsigned char key, unsigned char *oldkey,
690 bool nq, bool mr, bool mc)
692 unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
695 /* we can drop the pgste lock between getting and setting the key */
697 rc = get_guest_storage_key(current->mm, addr, &tmp);
703 mask |= _PAGE_REFERENCED;
705 mask |= _PAGE_CHANGED;
706 if (!((tmp ^ key) & mask))
709 rc = set_guest_storage_key(current->mm, addr, key, nq);
710 return rc < 0 ? rc : 1;
712 EXPORT_SYMBOL(cond_set_guest_storage_key);
715 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
717 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
719 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
726 ptep = get_locked_pte(mm, addr, &ptl);
730 new = old = pgste_get_lock(ptep);
731 /* Reset guest reference bit only */
732 pgste_val(new) &= ~PGSTE_GR_BIT;
734 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
735 cc = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
736 /* Merge real referenced bit into host-set */
737 pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
739 /* Reflect guest's logical view, not physical */
740 cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
741 /* Changing the guest storage key is considered a change of the page */
742 if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
743 pgste_val(new) |= PGSTE_UC_BIT;
745 pgste_set_unlock(ptep, new);
746 pte_unmap_unlock(ptep, ptl);
749 EXPORT_SYMBOL(reset_guest_reference_bit);
751 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
758 ptep = get_locked_pte(mm, addr, &ptl);
762 pgste = pgste_get_lock(ptep);
763 *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
764 if (!(pte_val(*ptep) & _PAGE_INVALID))
765 *key = page_get_storage_key(pte_val(*ptep) & PAGE_MASK);
766 /* Reflect guest's logical view, not physical */
767 *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
768 pgste_set_unlock(ptep, pgste);
769 pte_unmap_unlock(ptep, ptl);
772 EXPORT_SYMBOL(get_guest_storage_key);