2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
19 #include <linux/slab.h>
20 #include <linux/swapops.h>
21 #include <linux/ksm.h>
22 #include <linux/mman.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
27 #include <asm/tlbflush.h>
28 #include <asm/mmu_context.h>
32 #define FRAG_MASK 0x0f
35 #define FRAG_MASK 0x03
39 unsigned long *crst_table_alloc(struct mm_struct *mm)
41 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
45 return (unsigned long *) page_to_phys(page);
48 void crst_table_free(struct mm_struct *mm, unsigned long *table)
50 free_pages((unsigned long) table, ALLOC_ORDER);
54 static void __crst_table_upgrade(void *arg)
56 struct mm_struct *mm = arg;
58 if (current->active_mm == mm) {
65 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
67 unsigned long *table, *pgd;
71 BUG_ON(limit > (1UL << 53));
74 table = crst_table_alloc(mm);
77 spin_lock_bh(&mm->page_table_lock);
78 if (mm->context.asce_limit < limit) {
79 pgd = (unsigned long *) mm->pgd;
80 if (mm->context.asce_limit <= (1UL << 31)) {
81 entry = _REGION3_ENTRY_EMPTY;
82 mm->context.asce_limit = 1UL << 42;
83 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
87 entry = _REGION2_ENTRY_EMPTY;
88 mm->context.asce_limit = 1UL << 53;
89 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
93 crst_table_init(table, entry);
94 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
95 mm->pgd = (pgd_t *) table;
96 mm->task_size = mm->context.asce_limit;
100 spin_unlock_bh(&mm->page_table_lock);
102 crst_table_free(mm, table);
103 if (mm->context.asce_limit < limit)
106 on_each_cpu(__crst_table_upgrade, mm, 0);
110 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
114 if (current->active_mm == mm) {
118 while (mm->context.asce_limit > limit) {
120 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
121 case _REGION_ENTRY_TYPE_R2:
122 mm->context.asce_limit = 1UL << 42;
123 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
127 case _REGION_ENTRY_TYPE_R3:
128 mm->context.asce_limit = 1UL << 31;
129 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
136 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
137 mm->task_size = mm->context.asce_limit;
138 crst_table_free(mm, (unsigned long *) pgd);
140 if (current->active_mm == mm)
148 * gmap_alloc - allocate a guest address space
149 * @mm: pointer to the parent mm_struct
150 * @limit: maximum size of the gmap address space
152 * Returns a guest address space structure.
154 struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
158 unsigned long *table;
159 unsigned long etype, atype;
161 if (limit < (1UL << 31)) {
162 limit = (1UL << 31) - 1;
163 atype = _ASCE_TYPE_SEGMENT;
164 etype = _SEGMENT_ENTRY_EMPTY;
165 } else if (limit < (1UL << 42)) {
166 limit = (1UL << 42) - 1;
167 atype = _ASCE_TYPE_REGION3;
168 etype = _REGION3_ENTRY_EMPTY;
169 } else if (limit < (1UL << 53)) {
170 limit = (1UL << 53) - 1;
171 atype = _ASCE_TYPE_REGION2;
172 etype = _REGION2_ENTRY_EMPTY;
175 atype = _ASCE_TYPE_REGION1;
176 etype = _REGION1_ENTRY_EMPTY;
178 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
181 INIT_LIST_HEAD(&gmap->crst_list);
182 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
183 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
184 spin_lock_init(&gmap->guest_table_lock);
186 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
190 list_add(&page->lru, &gmap->crst_list);
191 table = (unsigned long *) page_to_phys(page);
192 crst_table_init(table, etype);
194 gmap->asce = atype | _ASCE_TABLE_LENGTH |
195 _ASCE_USER_BITS | __pa(table);
196 gmap->asce_end = limit;
197 down_write(&mm->mmap_sem);
198 list_add(&gmap->list, &mm->context.gmap_list);
199 up_write(&mm->mmap_sem);
207 EXPORT_SYMBOL_GPL(gmap_alloc);
209 static void gmap_flush_tlb(struct gmap *gmap)
211 if (MACHINE_HAS_IDTE)
212 __tlb_flush_asce(gmap->mm, gmap->asce);
214 __tlb_flush_global();
217 static void gmap_radix_tree_free(struct radix_tree_root *root)
219 struct radix_tree_iter iter;
220 unsigned long indices[16];
225 /* A radix tree is freed by deleting all of its entries */
229 radix_tree_for_each_slot(slot, root, &iter, index) {
230 indices[nr] = iter.index;
234 for (i = 0; i < nr; i++) {
236 radix_tree_delete(root, index);
242 * gmap_free - free a guest address space
243 * @gmap: pointer to the guest address space structure
245 void gmap_free(struct gmap *gmap)
247 struct page *page, *next;
250 if (MACHINE_HAS_IDTE)
251 __tlb_flush_asce(gmap->mm, gmap->asce);
253 __tlb_flush_global();
255 /* Free all segment & region tables. */
256 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
257 __free_pages(page, ALLOC_ORDER);
258 gmap_radix_tree_free(&gmap->guest_to_host);
259 gmap_radix_tree_free(&gmap->host_to_guest);
260 down_write(&gmap->mm->mmap_sem);
261 list_del(&gmap->list);
262 up_write(&gmap->mm->mmap_sem);
265 EXPORT_SYMBOL_GPL(gmap_free);
268 * gmap_enable - switch primary space to the guest address space
269 * @gmap: pointer to the guest address space structure
271 void gmap_enable(struct gmap *gmap)
273 S390_lowcore.gmap = (unsigned long) gmap;
275 EXPORT_SYMBOL_GPL(gmap_enable);
278 * gmap_disable - switch back to the standard primary address space
279 * @gmap: pointer to the guest address space structure
281 void gmap_disable(struct gmap *gmap)
283 S390_lowcore.gmap = 0UL;
285 EXPORT_SYMBOL_GPL(gmap_disable);
288 * gmap_alloc_table is assumed to be called with mmap_sem held
290 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
291 unsigned long init, unsigned long gaddr)
296 /* since we dont free the gmap table until gmap_free we can unlock */
297 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
300 new = (unsigned long *) page_to_phys(page);
301 crst_table_init(new, init);
302 spin_lock(&gmap->mm->page_table_lock);
303 if (*table & _REGION_ENTRY_INVALID) {
304 list_add(&page->lru, &gmap->crst_list);
305 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
306 (*table & _REGION_ENTRY_TYPE_MASK);
310 spin_unlock(&gmap->mm->page_table_lock);
312 __free_pages(page, ALLOC_ORDER);
317 * __gmap_segment_gaddr - find virtual address from segment pointer
318 * @entry: pointer to a segment table entry in the guest address space
320 * Returns the virtual address in the guest address space for the segment
322 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
325 unsigned long offset, mask;
327 offset = (unsigned long) entry / sizeof(unsigned long);
328 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
329 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
330 page = virt_to_page((void *)((unsigned long) entry & mask));
331 return page->index + offset;
335 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
336 * @gmap: pointer to the guest address space structure
337 * @vmaddr: address in the host process address space
339 * Returns 1 if a TLB flush is required
341 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
343 unsigned long *entry;
346 spin_lock(&gmap->guest_table_lock);
347 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
349 flush = (*entry != _SEGMENT_ENTRY_INVALID);
350 *entry = _SEGMENT_ENTRY_INVALID;
352 spin_unlock(&gmap->guest_table_lock);
357 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
358 * @gmap: pointer to the guest address space structure
359 * @gaddr: address in the guest address space
361 * Returns 1 if a TLB flush is required
363 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
365 unsigned long vmaddr;
367 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
369 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
373 * gmap_unmap_segment - unmap segment from the guest address space
374 * @gmap: pointer to the guest address space structure
375 * @to: address in the guest address space
376 * @len: length of the memory area to unmap
378 * Returns 0 if the unmap succeeded, -EINVAL if not.
380 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
385 if ((to | len) & (PMD_SIZE - 1))
387 if (len == 0 || to + len < to)
391 down_write(&gmap->mm->mmap_sem);
392 for (off = 0; off < len; off += PMD_SIZE)
393 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
394 up_write(&gmap->mm->mmap_sem);
396 gmap_flush_tlb(gmap);
399 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
402 * gmap_mmap_segment - map a segment to the guest address space
403 * @gmap: pointer to the guest address space structure
404 * @from: source address in the parent address space
405 * @to: target address in the guest address space
406 * @len: length of the memory area to map
408 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
410 int gmap_map_segment(struct gmap *gmap, unsigned long from,
411 unsigned long to, unsigned long len)
416 if ((from | to | len) & (PMD_SIZE - 1))
418 if (len == 0 || from + len < from || to + len < to ||
419 from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
423 down_write(&gmap->mm->mmap_sem);
424 for (off = 0; off < len; off += PMD_SIZE) {
425 /* Remove old translation */
426 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
427 /* Store new translation */
428 if (radix_tree_insert(&gmap->guest_to_host,
429 (to + off) >> PMD_SHIFT,
430 (void *) from + off))
433 up_write(&gmap->mm->mmap_sem);
435 gmap_flush_tlb(gmap);
438 gmap_unmap_segment(gmap, to, len);
441 EXPORT_SYMBOL_GPL(gmap_map_segment);
444 * __gmap_translate - translate a guest address to a user space address
445 * @gmap: pointer to guest mapping meta data structure
446 * @gaddr: guest address
448 * Returns user space address which corresponds to the guest address or
449 * -EFAULT if no such mapping exists.
450 * This function does not establish potentially missing page table entries.
451 * The mmap_sem of the mm that belongs to the address space must be held
452 * when this function gets called.
454 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
456 unsigned long vmaddr;
458 vmaddr = (unsigned long)
459 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
460 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
462 EXPORT_SYMBOL_GPL(__gmap_translate);
465 * gmap_translate - translate a guest address to a user space address
466 * @gmap: pointer to guest mapping meta data structure
467 * @gaddr: guest address
469 * Returns user space address which corresponds to the guest address or
470 * -EFAULT if no such mapping exists.
471 * This function does not establish potentially missing page table entries.
473 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
477 down_read(&gmap->mm->mmap_sem);
478 rc = __gmap_translate(gmap, gaddr);
479 up_read(&gmap->mm->mmap_sem);
482 EXPORT_SYMBOL_GPL(gmap_translate);
485 * gmap_unlink - disconnect a page table from the gmap shadow tables
486 * @gmap: pointer to guest mapping meta data structure
487 * @table: pointer to the host page table
488 * @vmaddr: vm address associated with the host page table
490 static void gmap_unlink(struct mm_struct *mm, unsigned long *table,
491 unsigned long vmaddr)
496 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
497 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
499 gmap_flush_tlb(gmap);
504 * gmap_link - set up shadow page tables to connect a host to a guest address
505 * @gmap: pointer to guest mapping meta data structure
506 * @gaddr: guest address
507 * @vmaddr: vm address
509 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
510 * if the vm address is already mapped to a different guest segment.
511 * The mmap_sem of the mm that belongs to the address space must be held
512 * when this function gets called.
514 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
516 struct mm_struct *mm;
517 unsigned long *table;
524 /* Create higher level tables in the gmap page table */
526 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
527 table += (gaddr >> 53) & 0x7ff;
528 if ((*table & _REGION_ENTRY_INVALID) &&
529 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
530 gaddr & 0xffe0000000000000UL))
532 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
534 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
535 table += (gaddr >> 42) & 0x7ff;
536 if ((*table & _REGION_ENTRY_INVALID) &&
537 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
538 gaddr & 0xfffffc0000000000UL))
540 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
542 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
543 table += (gaddr >> 31) & 0x7ff;
544 if ((*table & _REGION_ENTRY_INVALID) &&
545 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
546 gaddr & 0xffffffff80000000UL))
548 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
550 table += (gaddr >> 20) & 0x7ff;
551 /* Walk the parent mm page table */
553 pgd = pgd_offset(mm, vmaddr);
554 VM_BUG_ON(pgd_none(*pgd));
555 pud = pud_offset(pgd, vmaddr);
556 VM_BUG_ON(pud_none(*pud));
557 pmd = pmd_offset(pud, vmaddr);
558 VM_BUG_ON(pmd_none(*pmd));
559 /* large pmds cannot yet be handled */
562 /* Link gmap segment table entry location to page table. */
563 rc = radix_tree_preload(GFP_KERNEL);
566 ptl = pmd_lock(mm, pmd);
567 spin_lock(&gmap->guest_table_lock);
568 if (*table == _SEGMENT_ENTRY_INVALID) {
569 rc = radix_tree_insert(&gmap->host_to_guest,
570 vmaddr >> PMD_SHIFT, table);
572 *table = pmd_val(*pmd);
575 spin_unlock(&gmap->guest_table_lock);
577 radix_tree_preload_end();
582 * gmap_fault - resolve a fault on a guest address
583 * @gmap: pointer to guest mapping meta data structure
584 * @gaddr: guest address
585 * @fault_flags: flags to pass down to handle_mm_fault()
587 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
588 * if the vm address is already mapped to a different guest segment.
590 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
591 unsigned int fault_flags)
593 unsigned long vmaddr;
596 down_read(&gmap->mm->mmap_sem);
597 vmaddr = __gmap_translate(gmap, gaddr);
598 if (IS_ERR_VALUE(vmaddr)) {
602 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) {
606 rc = __gmap_link(gmap, gaddr, vmaddr);
608 up_read(&gmap->mm->mmap_sem);
611 EXPORT_SYMBOL_GPL(gmap_fault);
613 static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
615 if (!non_swap_entry(entry))
616 dec_mm_counter(mm, MM_SWAPENTS);
617 else if (is_migration_entry(entry)) {
618 struct page *page = migration_entry_to_page(entry);
621 dec_mm_counter(mm, MM_ANONPAGES);
623 dec_mm_counter(mm, MM_FILEPAGES);
625 free_swap_and_cache(entry);
629 * this function is assumed to be called with mmap_sem held
631 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
633 unsigned long vmaddr, ptev, pgstev;
638 /* Find the vm address for the guest address */
639 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
643 vmaddr |= gaddr & ~PMD_MASK;
644 /* Get pointer to the page table entry */
645 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
651 /* Zap unused and logically-zero pages */
652 pgste = pgste_get_lock(ptep);
653 pgstev = pgste_val(pgste);
655 if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
656 ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
657 gmap_zap_swap_entry(pte_to_swp_entry(pte), gmap->mm);
658 pte_clear(gmap->mm, vmaddr, ptep);
660 pgste_set_unlock(ptep, pgste);
662 pte_unmap_unlock(ptep, ptl);
664 EXPORT_SYMBOL_GPL(__gmap_zap);
666 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
668 unsigned long gaddr, vmaddr, size;
669 struct vm_area_struct *vma;
671 down_read(&gmap->mm->mmap_sem);
672 for (gaddr = from; gaddr < to;
673 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
674 /* Find the vm address for the guest address */
675 vmaddr = (unsigned long)
676 radix_tree_lookup(&gmap->guest_to_host,
680 vmaddr |= gaddr & ~PMD_MASK;
681 /* Find vma in the parent mm */
682 vma = find_vma(gmap->mm, vmaddr);
683 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
684 zap_page_range(vma, vmaddr, size, NULL);
686 up_read(&gmap->mm->mmap_sem);
688 EXPORT_SYMBOL_GPL(gmap_discard);
690 static LIST_HEAD(gmap_notifier_list);
691 static DEFINE_SPINLOCK(gmap_notifier_lock);
694 * gmap_register_ipte_notifier - register a pte invalidation callback
695 * @nb: pointer to the gmap notifier block
697 void gmap_register_ipte_notifier(struct gmap_notifier *nb)
699 spin_lock(&gmap_notifier_lock);
700 list_add(&nb->list, &gmap_notifier_list);
701 spin_unlock(&gmap_notifier_lock);
703 EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
706 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
707 * @nb: pointer to the gmap notifier block
709 void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
711 spin_lock(&gmap_notifier_lock);
712 list_del_init(&nb->list);
713 spin_unlock(&gmap_notifier_lock);
715 EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
718 * gmap_ipte_notify - mark a range of ptes for invalidation notification
719 * @gmap: pointer to guest mapping meta data structure
720 * @gaddr: virtual address in the guest address space
723 * Returns 0 if for each page in the given range a gmap mapping exists and
724 * the invalidation notification could be set. If the gmap mapping is missing
725 * for one or more pages -EFAULT is returned. If no memory could be allocated
726 * -ENOMEM is returned. This function establishes missing page table entries.
728 int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
736 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK))
738 down_read(&gmap->mm->mmap_sem);
740 /* Convert gmap address and connect the page tables */
741 addr = __gmap_translate(gmap, gaddr);
742 if (IS_ERR_VALUE(addr)) {
746 /* Get the page mapped */
747 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
751 rc = __gmap_link(gmap, gaddr, addr);
754 /* Walk the process page table, lock and get pte pointer */
755 ptep = get_locked_pte(gmap->mm, addr, &ptl);
757 /* Set notification bit in the pgste of the pte */
759 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
760 pgste = pgste_get_lock(ptep);
761 pgste_val(pgste) |= PGSTE_IN_BIT;
762 pgste_set_unlock(ptep, pgste);
766 pte_unmap_unlock(ptep, ptl);
768 up_read(&gmap->mm->mmap_sem);
771 EXPORT_SYMBOL_GPL(gmap_ipte_notify);
774 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
775 * @mm: pointer to the process mm_struct
776 * @addr: virtual address in the process address space
777 * @pte: pointer to the page table entry
779 * This function is assumed to be called with the page table lock held
780 * for the pte to notify.
782 void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
784 unsigned long offset, gaddr;
785 unsigned long *table;
786 struct gmap_notifier *nb;
789 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
790 offset = offset * (4096 / sizeof(pte_t));
791 spin_lock(&gmap_notifier_lock);
792 list_for_each_entry(gmap, &mm->context.gmap_list, list) {
793 table = radix_tree_lookup(&gmap->host_to_guest,
794 vmaddr >> PMD_SHIFT);
797 gaddr = __gmap_segment_gaddr(table) + offset;
798 list_for_each_entry(nb, &gmap_notifier_list, list)
799 nb->notifier_call(gmap, gaddr);
801 spin_unlock(&gmap_notifier_lock);
803 EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
805 static inline int page_table_with_pgste(struct page *page)
807 return atomic_read(&page->_mapcount) == 0;
810 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
813 unsigned long *table;
815 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
818 if (!pgtable_page_ctor(page)) {
822 atomic_set(&page->_mapcount, 0);
823 table = (unsigned long *) page_to_phys(page);
824 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
825 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
829 static inline void page_table_free_pgste(unsigned long *table)
833 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
834 pgtable_page_dtor(page);
835 atomic_set(&page->_mapcount, -1);
839 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
840 unsigned long key, bool nq)
846 down_read(&mm->mmap_sem);
848 ptep = get_locked_pte(mm, addr, &ptl);
849 if (unlikely(!ptep)) {
850 up_read(&mm->mmap_sem);
853 if (!(pte_val(*ptep) & _PAGE_INVALID) &&
854 (pte_val(*ptep) & _PAGE_PROTECT)) {
855 pte_unmap_unlock(ptep, ptl);
856 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
857 up_read(&mm->mmap_sem);
863 new = old = pgste_get_lock(ptep);
864 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
865 PGSTE_ACC_BITS | PGSTE_FP_BIT);
866 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
867 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
868 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
869 unsigned long address, bits, skey;
871 address = pte_val(*ptep) & PAGE_MASK;
872 skey = (unsigned long) page_get_storage_key(address);
873 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
874 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
875 /* Set storage key ACC and FP */
876 page_set_storage_key(address, skey, !nq);
877 /* Merge host changed & referenced into pgste */
878 pgste_val(new) |= bits << 52;
880 /* changing the guest storage key is considered a change of the page */
881 if ((pgste_val(new) ^ pgste_val(old)) &
882 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
883 pgste_val(new) |= PGSTE_UC_BIT;
885 pgste_set_unlock(ptep, new);
886 pte_unmap_unlock(ptep, ptl);
887 up_read(&mm->mmap_sem);
890 EXPORT_SYMBOL(set_guest_storage_key);
892 unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
898 unsigned long key = 0;
900 down_read(&mm->mmap_sem);
901 ptep = get_locked_pte(mm, addr, &ptl);
902 if (unlikely(!ptep)) {
903 up_read(&mm->mmap_sem);
906 pgste = pgste_get_lock(ptep);
908 if (pte_val(*ptep) & _PAGE_INVALID) {
909 key |= (pgste_val(pgste) & PGSTE_ACC_BITS) >> 56;
910 key |= (pgste_val(pgste) & PGSTE_FP_BIT) >> 56;
911 key |= (pgste_val(pgste) & PGSTE_GR_BIT) >> 48;
912 key |= (pgste_val(pgste) & PGSTE_GC_BIT) >> 48;
914 physaddr = pte_val(*ptep) & PAGE_MASK;
915 key = page_get_storage_key(physaddr);
917 /* Reflect guest's logical view, not physical */
918 if (pgste_val(pgste) & PGSTE_GR_BIT)
919 key |= _PAGE_REFERENCED;
920 if (pgste_val(pgste) & PGSTE_GC_BIT)
921 key |= _PAGE_CHANGED;
924 pgste_set_unlock(ptep, pgste);
925 pte_unmap_unlock(ptep, ptl);
926 up_read(&mm->mmap_sem);
929 EXPORT_SYMBOL(get_guest_storage_key);
931 #else /* CONFIG_PGSTE */
933 static inline int page_table_with_pgste(struct page *page)
938 static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
943 static inline void page_table_free_pgste(unsigned long *table)
947 static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table,
948 unsigned long vmaddr)
952 #endif /* CONFIG_PGSTE */
954 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
956 unsigned int old, new;
959 old = atomic_read(v);
961 } while (atomic_cmpxchg(v, old, new) != old);
966 * page table entry allocation/free routines.
968 unsigned long *page_table_alloc(struct mm_struct *mm)
970 unsigned long *uninitialized_var(table);
971 struct page *uninitialized_var(page);
972 unsigned int mask, bit;
974 if (mm_has_pgste(mm))
975 return page_table_alloc_pgste(mm);
976 /* Allocate fragments of a 4K page as 1K/2K page table */
977 spin_lock_bh(&mm->context.list_lock);
979 if (!list_empty(&mm->context.pgtable_list)) {
980 page = list_first_entry(&mm->context.pgtable_list,
982 table = (unsigned long *) page_to_phys(page);
983 mask = atomic_read(&page->_mapcount);
984 mask = mask | (mask >> 4);
986 if ((mask & FRAG_MASK) == FRAG_MASK) {
987 spin_unlock_bh(&mm->context.list_lock);
988 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
991 if (!pgtable_page_ctor(page)) {
995 atomic_set(&page->_mapcount, 1);
996 table = (unsigned long *) page_to_phys(page);
997 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
998 spin_lock_bh(&mm->context.list_lock);
999 list_add(&page->lru, &mm->context.pgtable_list);
1001 for (bit = 1; mask & bit; bit <<= 1)
1002 table += PTRS_PER_PTE;
1003 mask = atomic_xor_bits(&page->_mapcount, bit);
1004 if ((mask & FRAG_MASK) == FRAG_MASK)
1005 list_del(&page->lru);
1007 spin_unlock_bh(&mm->context.list_lock);
1011 void page_table_free(struct mm_struct *mm, unsigned long *table)
1014 unsigned int bit, mask;
1016 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1017 if (page_table_with_pgste(page))
1018 return page_table_free_pgste(table);
1019 /* Free 1K/2K page table fragment of a 4K page */
1020 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
1021 spin_lock_bh(&mm->context.list_lock);
1022 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1023 list_del(&page->lru);
1024 mask = atomic_xor_bits(&page->_mapcount, bit);
1025 if (mask & FRAG_MASK)
1026 list_add(&page->lru, &mm->context.pgtable_list);
1027 spin_unlock_bh(&mm->context.list_lock);
1029 pgtable_page_dtor(page);
1030 atomic_set(&page->_mapcount, -1);
1035 static void __page_table_free_rcu(void *table, unsigned bit)
1039 if (bit == FRAG_MASK)
1040 return page_table_free_pgste(table);
1041 /* Free 1K/2K page table fragment of a 4K page */
1042 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1043 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
1044 pgtable_page_dtor(page);
1045 atomic_set(&page->_mapcount, -1);
1050 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
1051 unsigned long vmaddr)
1053 struct mm_struct *mm;
1055 unsigned int bit, mask;
1058 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1059 if (page_table_with_pgste(page)) {
1060 gmap_unlink(mm, table, vmaddr);
1061 table = (unsigned long *) (__pa(table) | FRAG_MASK);
1062 tlb_remove_table(tlb, table);
1065 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
1066 spin_lock_bh(&mm->context.list_lock);
1067 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1068 list_del(&page->lru);
1069 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
1070 if (mask & FRAG_MASK)
1071 list_add_tail(&page->lru, &mm->context.pgtable_list);
1072 spin_unlock_bh(&mm->context.list_lock);
1073 table = (unsigned long *) (__pa(table) | (bit << 4));
1074 tlb_remove_table(tlb, table);
1077 static void __tlb_remove_table(void *_table)
1079 const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
1080 void *table = (void *)((unsigned long) _table & ~mask);
1081 unsigned type = (unsigned long) _table & mask;
1084 __page_table_free_rcu(table, type);
1086 free_pages((unsigned long) table, ALLOC_ORDER);
1089 static void tlb_remove_table_smp_sync(void *arg)
1091 /* Simply deliver the interrupt */
1094 static void tlb_remove_table_one(void *table)
1097 * This isn't an RCU grace period and hence the page-tables cannot be
1098 * assumed to be actually RCU-freed.
1100 * It is however sufficient for software page-table walkers that rely
1101 * on IRQ disabling. See the comment near struct mmu_table_batch.
1103 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
1104 __tlb_remove_table(table);
1107 static void tlb_remove_table_rcu(struct rcu_head *head)
1109 struct mmu_table_batch *batch;
1112 batch = container_of(head, struct mmu_table_batch, rcu);
1114 for (i = 0; i < batch->nr; i++)
1115 __tlb_remove_table(batch->tables[i]);
1117 free_page((unsigned long)batch);
1120 void tlb_table_flush(struct mmu_gather *tlb)
1122 struct mmu_table_batch **batch = &tlb->batch;
1125 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1130 void tlb_remove_table(struct mmu_gather *tlb, void *table)
1132 struct mmu_table_batch **batch = &tlb->batch;
1134 tlb->mm->context.flush_mm = 1;
1135 if (*batch == NULL) {
1136 *batch = (struct mmu_table_batch *)
1137 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1138 if (*batch == NULL) {
1139 __tlb_flush_mm_lazy(tlb->mm);
1140 tlb_remove_table_one(table);
1145 (*batch)->tables[(*batch)->nr++] = table;
1146 if ((*batch)->nr == MAX_TABLE_BATCH)
1150 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1151 static inline void thp_split_vma(struct vm_area_struct *vma)
1155 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1156 follow_page(vma, addr, FOLL_SPLIT);
1159 static inline void thp_split_mm(struct mm_struct *mm)
1161 struct vm_area_struct *vma;
1163 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1165 vma->vm_flags &= ~VM_HUGEPAGE;
1166 vma->vm_flags |= VM_NOHUGEPAGE;
1168 mm->def_flags |= VM_NOHUGEPAGE;
1171 static inline void thp_split_mm(struct mm_struct *mm)
1174 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1176 static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
1177 struct mm_struct *mm, pud_t *pud,
1178 unsigned long addr, unsigned long end)
1180 unsigned long next, *table, *new;
1185 pmd = pmd_offset(pud, addr);
1187 next = pmd_addr_end(addr, end);
1189 if (pmd_none_or_clear_bad(pmd))
1191 table = (unsigned long *) pmd_deref(*pmd);
1192 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1193 if (page_table_with_pgste(page))
1195 /* Allocate new page table with pgstes */
1196 new = page_table_alloc_pgste(mm);
1200 ptl = pmd_lock(mm, pmd);
1201 if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1202 /* Nuke pmd entry pointing to the "short" page table */
1203 pmdp_flush_lazy(mm, addr, pmd);
1205 /* Copy ptes from old table to new table */
1206 memcpy(new, table, PAGE_SIZE/2);
1207 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
1208 /* Establish new table */
1209 pmd_populate(mm, pmd, (pte_t *) new);
1210 /* Free old table with rcu, there might be a walker! */
1211 page_table_free_rcu(tlb, table, addr);
1216 page_table_free_pgste(new);
1219 } while (pmd++, addr = next, addr != end);
1224 static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
1225 struct mm_struct *mm, pgd_t *pgd,
1226 unsigned long addr, unsigned long end)
1231 pud = pud_offset(pgd, addr);
1233 next = pud_addr_end(addr, end);
1234 if (pud_none_or_clear_bad(pud))
1236 next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
1237 if (unlikely(IS_ERR_VALUE(next)))
1239 } while (pud++, addr = next, addr != end);
1244 static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1245 unsigned long addr, unsigned long end)
1250 pgd = pgd_offset(mm, addr);
1252 next = pgd_addr_end(addr, end);
1253 if (pgd_none_or_clear_bad(pgd))
1255 next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
1256 if (unlikely(IS_ERR_VALUE(next)))
1258 } while (pgd++, addr = next, addr != end);
1264 * switch on pgstes for its userspace process (for kvm)
1266 int s390_enable_sie(void)
1268 struct task_struct *tsk = current;
1269 struct mm_struct *mm = tsk->mm;
1270 struct mmu_gather tlb;
1272 /* Do we have pgstes? if yes, we are done */
1273 if (mm_has_pgste(tsk->mm))
1276 down_write(&mm->mmap_sem);
1277 /* split thp mappings and disable thp for future mappings */
1279 /* Reallocate the page tables with pgstes */
1280 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
1281 if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
1282 mm->context.has_pgste = 1;
1283 tlb_finish_mmu(&tlb, 0, TASK_SIZE);
1284 up_write(&mm->mmap_sem);
1285 return mm->context.has_pgste ? 0 : -ENOMEM;
1287 EXPORT_SYMBOL_GPL(s390_enable_sie);
1290 * Enable storage key handling from now on and initialize the storage
1291 * keys with the default key.
1293 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1294 unsigned long next, struct mm_walk *walk)
1299 pgste = pgste_get_lock(pte);
1301 * Remove all zero page mappings,
1302 * after establishing a policy to forbid zero page mappings
1303 * following faults for that page will get fresh anonymous pages
1305 if (is_zero_pfn(pte_pfn(*pte))) {
1306 ptep_flush_direct(walk->mm, addr, pte);
1307 pte_val(*pte) = _PAGE_INVALID;
1309 /* Clear storage key */
1310 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
1311 PGSTE_GR_BIT | PGSTE_GC_BIT);
1312 ptev = pte_val(*pte);
1313 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
1314 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
1315 pgste_set_unlock(pte, pgste);
1319 int s390_enable_skey(void)
1321 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1322 struct mm_struct *mm = current->mm;
1323 struct vm_area_struct *vma;
1326 down_write(&mm->mmap_sem);
1327 if (mm_use_skey(mm))
1330 mm->context.use_skey = 1;
1331 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1332 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1333 MADV_UNMERGEABLE, &vma->vm_flags)) {
1334 mm->context.use_skey = 0;
1339 mm->def_flags &= ~VM_MERGEABLE;
1342 walk_page_range(0, TASK_SIZE, &walk);
1345 up_write(&mm->mmap_sem);
1348 EXPORT_SYMBOL_GPL(s390_enable_skey);
1351 * Reset CMMA state, make all pages stable again.
1353 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
1354 unsigned long next, struct mm_walk *walk)
1358 pgste = pgste_get_lock(pte);
1359 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
1360 pgste_set_unlock(pte, pgste);
1364 void s390_reset_cmma(struct mm_struct *mm)
1366 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
1368 down_write(&mm->mmap_sem);
1370 walk_page_range(0, TASK_SIZE, &walk);
1371 up_write(&mm->mmap_sem);
1373 EXPORT_SYMBOL_GPL(s390_reset_cmma);
1376 * Test and reset if a guest page is dirty
1378 bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
1384 pte = get_locked_pte(gmap->mm, address, &ptl);
1388 if (ptep_test_and_clear_user_dirty(gmap->mm, address, pte))
1394 EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty);
1396 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1397 int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1400 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1401 /* No need to flush TLB
1402 * On s390 reference bits are in storage key and never in TLB */
1403 return pmdp_test_and_clear_young(vma, address, pmdp);
1406 int pmdp_set_access_flags(struct vm_area_struct *vma,
1407 unsigned long address, pmd_t *pmdp,
1408 pmd_t entry, int dirty)
1410 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1412 entry = pmd_mkyoung(entry);
1414 entry = pmd_mkdirty(entry);
1415 if (pmd_same(*pmdp, entry))
1417 pmdp_invalidate(vma, address, pmdp);
1418 set_pmd_at(vma->vm_mm, address, pmdp, entry);
1422 static void pmdp_splitting_flush_sync(void *arg)
1424 /* Simply deliver the interrupt */
1427 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1430 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1431 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1432 (unsigned long *) pmdp)) {
1433 /* need to serialize against gup-fast (IRQ disabled) */
1434 smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1438 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1441 struct list_head *lh = (struct list_head *) pgtable;
1443 assert_spin_locked(pmd_lockptr(mm, pmdp));
1446 if (!pmd_huge_pte(mm, pmdp))
1449 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1450 pmd_huge_pte(mm, pmdp) = pgtable;
1453 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1455 struct list_head *lh;
1459 assert_spin_locked(pmd_lockptr(mm, pmdp));
1462 pgtable = pmd_huge_pte(mm, pmdp);
1463 lh = (struct list_head *) pgtable;
1465 pmd_huge_pte(mm, pmdp) = NULL;
1467 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1470 ptep = (pte_t *) pgtable;
1471 pte_val(*ptep) = _PAGE_INVALID;
1473 pte_val(*ptep) = _PAGE_INVALID;
1476 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */