2 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
16 #include <linux/hugetlb.h>
18 #include <asm/pgtable.h>
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
23 * Free all pages allocated for subpage protection maps and pointers.
24 * Also makes sure that the subpage_prot_table structure is
25 * reinitialized for the next user.
27 void subpage_prot_free(struct mm_struct *mm)
29 struct subpage_prot_table *spt = &mm->context.spt;
30 unsigned long i, j, addr;
33 for (i = 0; i < 4; ++i) {
34 if (spt->low_prot[i]) {
35 free_page((unsigned long)spt->low_prot[i]);
36 spt->low_prot[i] = NULL;
40 for (i = 0; i < 2; ++i) {
44 spt->protptrs[i] = NULL;
45 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
46 ++j, addr += PAGE_SIZE)
48 free_page((unsigned long)p[j]);
49 free_page((unsigned long)p);
54 void subpage_prot_init_new_context(struct mm_struct *mm)
56 struct subpage_prot_table *spt = &mm->context.spt;
58 memset(spt, 0, sizeof(*spt));
61 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
70 pgd = pgd_offset(mm, addr);
73 pud = pud_offset(pgd, addr);
76 pmd = pmd_offset(pud, addr);
79 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
80 arch_enter_lazy_mmu_mode();
81 for (; npages > 0; --npages) {
82 pte_update(mm, addr, pte, 0, 0);
86 arch_leave_lazy_mmu_mode();
87 pte_unmap_unlock(pte - 1, ptl);
91 * Clear the subpage protection map for an address range, allowing
92 * all accesses that are allowed by the pte permissions.
94 static void subpage_prot_clear(unsigned long addr, unsigned long len)
96 struct mm_struct *mm = current->mm;
97 struct subpage_prot_table *spt = &mm->context.spt;
100 unsigned long next, limit;
102 down_write(&mm->mmap_sem);
104 if (limit > spt->maxaddr)
105 limit = spt->maxaddr;
106 for (; addr < limit; addr = next) {
107 next = pmd_addr_end(addr, limit);
108 if (addr < 0x100000000) {
111 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
115 spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
118 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
120 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
121 nw = PTRS_PER_PTE - i;
122 if (addr + (nw << PAGE_SHIFT) > next)
123 nw = (next - addr) >> PAGE_SHIFT;
125 memset(spp, 0, nw * sizeof(u32));
127 /* now flush any existing HPTEs for the range */
128 hpte_flush_range(mm, addr, nw);
130 up_write(&mm->mmap_sem);
134 * Copy in a subpage protection map for an address range.
135 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
136 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
137 * 2 or 3 to prevent all accesses.
138 * Note that the normal page protections also apply; the subpage
139 * protection mechanism is an additional constraint, so putting 0
140 * in a 2-bit field won't allow writes to a page that is otherwise
143 long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
145 struct mm_struct *mm = current->mm;
146 struct subpage_prot_table *spt = &mm->context.spt;
149 unsigned long next, limit;
152 /* Check parameters */
153 if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
154 addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
157 if (is_hugepage_only_range(mm, addr, len))
161 /* Clear out the protection map for the address range */
162 subpage_prot_clear(addr, len);
166 if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
169 down_write(&mm->mmap_sem);
170 for (limit = addr + len; addr < limit; addr = next) {
171 next = pmd_addr_end(addr, limit);
173 if (addr < 0x100000000) {
176 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
178 spm = (u32 **)get_zeroed_page(GFP_KERNEL);
181 spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
184 spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
187 spp = (u32 *)get_zeroed_page(GFP_KERNEL);
192 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
195 demote_segment_4k(mm, addr);
198 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
199 nw = PTRS_PER_PTE - i;
200 if (addr + (nw << PAGE_SHIFT) > next)
201 nw = (next - addr) >> PAGE_SHIFT;
203 up_write(&mm->mmap_sem);
205 if (__copy_from_user(spp, map, nw * sizeof(u32)))
208 down_write(&mm->mmap_sem);
210 /* now flush any existing HPTEs for the range */
211 hpte_flush_range(mm, addr, nw);
213 if (limit > spt->maxaddr)
214 spt->maxaddr = limit;
217 up_write(&mm->mmap_sem);