2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include "asm/pgtable.h"
8 #include "asm/tlbflush.h"
15 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
16 unsigned int prot, struct host_vm_op *ops, int *index,
17 int last_filled, union mm_context *mmu, void **flush,
18 int (*do_ops)(union mm_context *, struct host_vm_op *,
22 struct host_vm_op *last;
25 fd = phys_mapping(phys, &offset);
28 if ((last->type == MMAP) &&
29 (last->u.mmap.addr + last->u.mmap.len == virt) &&
30 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
31 (last->u.mmap.offset + last->u.mmap.len == offset)) {
32 last->u.mmap.len += len;
37 if (*index == last_filled) {
38 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
42 ops[++*index] = ((struct host_vm_op) { .type = MMAP,
53 static int add_munmap(unsigned long addr, unsigned long len,
54 struct host_vm_op *ops, int *index, int last_filled,
55 union mm_context *mmu, void **flush,
56 int (*do_ops)(union mm_context *, struct host_vm_op *,
59 struct host_vm_op *last;
64 if ((last->type == MUNMAP) &&
65 (last->u.munmap.addr + last->u.mmap.len == addr)) {
66 last->u.munmap.len += len;
71 if (*index == last_filled) {
72 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
76 ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
83 static int add_mprotect(unsigned long addr, unsigned long len,
84 unsigned int prot, struct host_vm_op *ops, int *index,
85 int last_filled, union mm_context *mmu, void **flush,
86 int (*do_ops)(union mm_context *, struct host_vm_op *,
89 struct host_vm_op *last;
94 if ((last->type == MPROTECT) &&
95 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
96 (last->u.mprotect.prot == prot)) {
97 last->u.mprotect.len += len;
102 if (*index == last_filled) {
103 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
107 ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
115 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
117 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
118 unsigned long end, struct host_vm_op *ops,
119 int last_op, int *op_index, int force,
120 union mm_context *mmu, void **flush,
121 int (*do_ops)(union mm_context *,
122 struct host_vm_op *, int, int,
126 int r, w, x, prot, ret = 0;
128 pte = pte_offset_kernel(pmd, addr);
133 if (!pte_young(*pte)) {
136 } else if (!pte_dirty(*pte)) {
139 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
140 (x ? UM_PROT_EXEC : 0));
141 if (force || pte_newpage(*pte)) {
142 if (pte_present(*pte))
143 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
144 PAGE_SIZE, prot, ops, op_index,
145 last_op, mmu, flush, do_ops);
146 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
147 last_op, mmu, flush, do_ops);
149 else if (pte_newprot(*pte))
150 ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
151 last_op, mmu, flush, do_ops);
152 *pte = pte_mkuptodate(*pte);
153 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
157 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
158 unsigned long end, struct host_vm_op *ops,
159 int last_op, int *op_index, int force,
160 union mm_context *mmu, void **flush,
161 int (*do_ops)(union mm_context *,
162 struct host_vm_op *, int, int,
169 pmd = pmd_offset(pud, addr);
171 next = pmd_addr_end(addr, end);
172 if (!pmd_present(*pmd)) {
173 if (force || pmd_newpage(*pmd)) {
174 ret = add_munmap(addr, next - addr, ops,
175 op_index, last_op, mmu,
177 pmd_mkuptodate(*pmd);
180 else ret = update_pte_range(pmd, addr, next, ops, last_op,
181 op_index, force, mmu, flush,
183 } while (pmd++, addr = next, ((addr != end) && !ret));
187 static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
188 unsigned long end, struct host_vm_op *ops,
189 int last_op, int *op_index, int force,
190 union mm_context *mmu, void **flush,
191 int (*do_ops)(union mm_context *,
192 struct host_vm_op *, int, int,
199 pud = pud_offset(pgd, addr);
201 next = pud_addr_end(addr, end);
202 if (!pud_present(*pud)) {
203 if (force || pud_newpage(*pud)) {
204 ret = add_munmap(addr, next - addr, ops,
205 op_index, last_op, mmu,
207 pud_mkuptodate(*pud);
210 else ret = update_pmd_range(pud, addr, next, ops, last_op,
211 op_index, force, mmu, flush,
213 } while (pud++, addr = next, ((addr != end) && !ret));
217 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
218 unsigned long end_addr, int force,
219 int (*do_ops)(union mm_context *, struct host_vm_op *,
223 union mm_context *mmu = &mm->context;
224 struct host_vm_op ops[1];
225 unsigned long addr = start_addr, next;
226 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
230 pgd = pgd_offset(mm, addr);
232 next = pgd_addr_end(addr, end_addr);
233 if (!pgd_present(*pgd)) {
234 if (force || pgd_newpage(*pgd)) {
235 ret = add_munmap(addr, next - addr, ops,
236 &op_index, last_op, mmu,
238 pgd_mkuptodate(*pgd);
241 else ret = update_pud_range(pgd, addr, next, ops, last_op,
242 &op_index, force, mmu, &flush,
244 } while (pgd++, addr = next, ((addr != end_addr) && !ret));
247 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
249 /* This is not an else because ret is modified above */
251 printk(KERN_ERR "fix_range_common: failed, killing current "
253 force_sig(SIGKILL, current);
257 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
259 struct mm_struct *mm;
264 unsigned long addr, last;
265 int updated = 0, err;
268 for (addr = start; addr < end;) {
269 pgd = pgd_offset(mm, addr);
270 if (!pgd_present(*pgd)) {
271 last = ADD_ROUND(addr, PGDIR_SIZE);
274 if (pgd_newpage(*pgd)) {
276 err = os_unmap_memory((void *) addr,
279 panic("munmap failed, errno = %d\n",
286 pud = pud_offset(pgd, addr);
287 if (!pud_present(*pud)) {
288 last = ADD_ROUND(addr, PUD_SIZE);
291 if (pud_newpage(*pud)) {
293 err = os_unmap_memory((void *) addr,
296 panic("munmap failed, errno = %d\n",
303 pmd = pmd_offset(pud, addr);
304 if (!pmd_present(*pmd)) {
305 last = ADD_ROUND(addr, PMD_SIZE);
308 if (pmd_newpage(*pmd)) {
310 err = os_unmap_memory((void *) addr,
313 panic("munmap failed, errno = %d\n",
320 pte = pte_offset_kernel(pmd, addr);
321 if (!pte_present(*pte) || pte_newpage(*pte)) {
323 err = os_unmap_memory((void *) addr,
326 panic("munmap failed, errno = %d\n",
328 if (pte_present(*pte))
330 pte_val(*pte) & PAGE_MASK,
333 else if (pte_newprot(*pte)) {
335 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
342 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
348 struct mm_struct *mm = vma->vm_mm;
350 int r, w, x, prot, err = 0;
353 address &= PAGE_MASK;
354 pgd = pgd_offset(mm, address);
355 if (!pgd_present(*pgd))
358 pud = pud_offset(pgd, address);
359 if (!pud_present(*pud))
362 pmd = pmd_offset(pud, address);
363 if (!pmd_present(*pmd))
366 pte = pte_offset_kernel(pmd, address);
371 if (!pte_young(*pte)) {
374 } else if (!pte_dirty(*pte)) {
378 mm_id = &mm->context.skas.id;
379 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
380 (x ? UM_PROT_EXEC : 0));
381 if (pte_newpage(*pte)) {
382 if (pte_present(*pte)) {
383 unsigned long long offset;
386 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
387 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
390 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
392 else if (pte_newprot(*pte))
393 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
398 *pte = pte_mkuptodate(*pte);
403 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
404 force_sig(SIGKILL, current);
407 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
409 return pgd_offset(mm, address);
412 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
414 return pud_offset(pgd, address);
417 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
419 return pmd_offset(pud, address);
422 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
424 return pte_offset_kernel(pmd, address);
427 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
429 pgd_t *pgd = pgd_offset(task->mm, addr);
430 pud_t *pud = pud_offset(pgd, addr);
431 pmd_t *pmd = pmd_offset(pud, addr);
433 return pte_offset_map(pmd, addr);
436 void flush_tlb_all(void)
438 flush_tlb_mm(current->mm);
441 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
443 flush_tlb_kernel_range_common(start, end);
446 void flush_tlb_kernel_vm(void)
448 flush_tlb_kernel_range_common(start_vm, end_vm);
451 void __flush_tlb_one(unsigned long addr)
453 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
456 static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
457 int finished, void **flush)
459 struct host_vm_op *op;
462 for (i = 0; i <= last && !ret; i++) {
466 ret = map(&mmu->skas.id, op->u.mmap.addr,
467 op->u.mmap.len, op->u.mmap.prot,
468 op->u.mmap.fd, op->u.mmap.offset, finished,
472 ret = unmap(&mmu->skas.id, op->u.munmap.addr,
473 op->u.munmap.len, finished, flush);
476 ret = protect(&mmu->skas.id, op->u.mprotect.addr,
477 op->u.mprotect.len, op->u.mprotect.prot,
481 printk(KERN_ERR "Unknown op type %d in do_ops\n",
490 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
491 unsigned long end_addr, int force)
493 if (!proc_mm && (end_addr > CONFIG_STUB_START))
494 end_addr = CONFIG_STUB_START;
496 fix_range_common(mm, start_addr, end_addr, force, do_ops);
499 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
502 if (vma->vm_mm == NULL)
503 flush_tlb_kernel_range_common(start, end);
504 else fix_range(vma->vm_mm, start, end, 0);
507 void flush_tlb_mm(struct mm_struct *mm)
512 * Don't bother flushing if this address space is about to be
515 if (atomic_read(&mm->mm_users) == 0)
518 end = proc_mm ? task_size : CONFIG_STUB_START;
519 fix_range(mm, 0, end, 0);
522 void force_flush_all(void)
524 struct mm_struct *mm = current->mm;
525 struct vm_area_struct *vma = mm->mmap;
527 while (vma != NULL) {
528 fix_range(mm, vma->vm_start, vma->vm_end, 1);