2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
8 #include "asm/pgalloc.h"
9 #include "asm/tlbflush.h"
10 #include "choose-mode.h"
11 #include "mode_kern.h"
12 #include "as-layout.h"
13 #include "user_util.h"
19 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
20 int r, int w, int x, struct host_vm_op *ops, int *index,
21 int last_filled, union mm_context *mmu, void **flush,
22 int (*do_ops)(union mm_context *, struct host_vm_op *,
26 struct host_vm_op *last;
29 fd = phys_mapping(phys, &offset);
32 if((last->type == MMAP) &&
33 (last->u.mmap.addr + last->u.mmap.len == virt) &&
34 (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
35 (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
36 (last->u.mmap.offset + last->u.mmap.len == offset)){
37 last->u.mmap.len += len;
42 if(*index == last_filled){
43 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
47 ops[++*index] = ((struct host_vm_op) { .type = MMAP,
60 static int add_munmap(unsigned long addr, unsigned long len,
61 struct host_vm_op *ops, int *index, int last_filled,
62 union mm_context *mmu, void **flush,
63 int (*do_ops)(union mm_context *, struct host_vm_op *,
66 struct host_vm_op *last;
71 if((last->type == MUNMAP) &&
72 (last->u.munmap.addr + last->u.mmap.len == addr)){
73 last->u.munmap.len += len;
78 if(*index == last_filled){
79 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
83 ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
90 static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
91 int x, struct host_vm_op *ops, int *index,
92 int last_filled, union mm_context *mmu, void **flush,
93 int (*do_ops)(union mm_context *, struct host_vm_op *,
96 struct host_vm_op *last;
101 if((last->type == MPROTECT) &&
102 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
103 (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
104 (last->u.mprotect.x == x)){
105 last->u.mprotect.len += len;
110 if(*index == last_filled){
111 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
115 ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
125 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
127 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
128 unsigned long end_addr, int force,
129 int (*do_ops)(union mm_context *, struct host_vm_op *,
136 union mm_context *mmu = &mm->context;
137 unsigned long addr, end;
139 struct host_vm_op ops[1];
141 int op_index = -1, last_op = ARRAY_SIZE(ops) - 1;
148 for(addr = start_addr; addr < end_addr && !ret;){
149 npgd = pgd_offset(mm, addr);
150 if(!pgd_present(*npgd)){
151 end = ADD_ROUND(addr, PGDIR_SIZE);
154 if(force || pgd_newpage(*npgd)){
155 ret = add_munmap(addr, end - addr, ops,
156 &op_index, last_op, mmu,
158 pgd_mkuptodate(*npgd);
164 npud = pud_offset(npgd, addr);
165 if(!pud_present(*npud)){
166 end = ADD_ROUND(addr, PUD_SIZE);
169 if(force || pud_newpage(*npud)){
170 ret = add_munmap(addr, end - addr, ops,
171 &op_index, last_op, mmu,
173 pud_mkuptodate(*npud);
179 npmd = pmd_offset(npud, addr);
180 if(!pmd_present(*npmd)){
181 end = ADD_ROUND(addr, PMD_SIZE);
184 if(force || pmd_newpage(*npmd)){
185 ret = add_munmap(addr, end - addr, ops,
186 &op_index, last_op, mmu,
188 pmd_mkuptodate(*npmd);
194 npte = pte_offset_kernel(npmd, addr);
196 w = pte_write(*npte);
198 if (!pte_young(*npte)) {
201 } else if (!pte_dirty(*npte)) {
204 if(force || pte_newpage(*npte)){
205 if(pte_present(*npte))
207 pte_val(*npte) & PAGE_MASK,
208 PAGE_SIZE, r, w, x, ops,
209 &op_index, last_op, mmu,
211 else ret = add_munmap(addr, PAGE_SIZE, ops,
212 &op_index, last_op, mmu,
215 else if(pte_newprot(*npte))
216 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
217 &op_index, last_op, mmu,
220 *npte = pte_mkuptodate(*npte);
224 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
226 /* This is not an else because ret is modified above */
228 printk("fix_range_common: failed, killing current process\n");
229 force_sig(SIGKILL, current);
233 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
235 struct mm_struct *mm;
240 unsigned long addr, last;
241 int updated = 0, err;
244 for(addr = start; addr < end;){
245 pgd = pgd_offset(mm, addr);
246 if(!pgd_present(*pgd)){
247 last = ADD_ROUND(addr, PGDIR_SIZE);
250 if(pgd_newpage(*pgd)){
252 err = os_unmap_memory((void *) addr,
255 panic("munmap failed, errno = %d\n",
262 pud = pud_offset(pgd, addr);
263 if(!pud_present(*pud)){
264 last = ADD_ROUND(addr, PUD_SIZE);
267 if(pud_newpage(*pud)){
269 err = os_unmap_memory((void *) addr,
272 panic("munmap failed, errno = %d\n",
279 pmd = pmd_offset(pud, addr);
280 if(!pmd_present(*pmd)){
281 last = ADD_ROUND(addr, PMD_SIZE);
284 if(pmd_newpage(*pmd)){
286 err = os_unmap_memory((void *) addr,
289 panic("munmap failed, errno = %d\n",
296 pte = pte_offset_kernel(pmd, addr);
297 if(!pte_present(*pte) || pte_newpage(*pte)){
299 err = os_unmap_memory((void *) addr,
302 panic("munmap failed, errno = %d\n",
304 if(pte_present(*pte))
306 pte_val(*pte) & PAGE_MASK,
309 else if(pte_newprot(*pte)){
311 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
318 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
320 return(pgd_offset(mm, address));
323 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
325 return(pud_offset(pgd, address));
328 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
330 return(pmd_offset(pud, address));
333 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
335 return(pte_offset_kernel(pmd, address));
338 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
340 pgd_t *pgd = pgd_offset(task->mm, addr);
341 pud_t *pud = pud_offset(pgd, addr);
342 pmd_t *pmd = pmd_offset(pud, addr);
344 return(pte_offset_map(pmd, addr));
347 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
349 address &= PAGE_MASK;
350 flush_tlb_range(vma, address, address + PAGE_SIZE);
353 void flush_tlb_all(void)
355 flush_tlb_mm(current->mm);
358 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
360 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
361 flush_tlb_kernel_range_common, start, end);
364 void flush_tlb_kernel_vm(void)
366 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
367 flush_tlb_kernel_range_common(start_vm, end_vm));
370 void __flush_tlb_one(unsigned long addr)
372 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
375 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
378 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
382 void flush_tlb_mm(struct mm_struct *mm)
384 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
387 void force_flush_all(void)
389 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());