4 * Explicit pagetable population and nonlinear (random) mappings support.
6 * started by Ingo Molnar, Copyright (C) 2002, 2003
10 #include <linux/swap.h>
11 #include <linux/file.h>
12 #include <linux/mman.h>
13 #include <linux/pagemap.h>
14 #include <linux/swapops.h>
15 #include <linux/rmap.h>
16 #include <linux/module.h>
17 #include <linux/syscalls.h>
19 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
23 static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
24 unsigned long addr, pte_t *ptep)
30 if (pte_present(pte)) {
31 unsigned long pfn = pte_pfn(pte);
34 flush_cache_page(vma, addr, pfn);
35 pte = ptep_clear_flush(vma, addr, ptep);
36 if (unlikely(!pfn_valid(pfn))) {
37 print_bad_pte(vma, pte, addr);
40 page = pfn_to_page(pfn);
43 page_remove_rmap(page);
44 page_cache_release(page);
45 dec_mm_counter(mm, file_rss);
48 free_swap_and_cache(pte_to_swp_entry(pte));
49 pte_clear(mm, addr, ptep);
54 * Install a file page to a given virtual memory address, release any
55 * previously existing mapping.
57 int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
58 unsigned long addr, struct page *page, pgprot_t prot)
69 BUG_ON(vma->vm_flags & VM_RESERVED);
71 pgd = pgd_offset(mm, addr);
72 spin_lock(&mm->page_table_lock);
74 pud = pud_alloc(mm, pgd, addr);
78 pmd = pmd_alloc(mm, pud, addr);
82 pte = pte_alloc_map(mm, pmd, addr);
87 * This page may have been truncated. Tell the
91 inode = vma->vm_file->f_mapping->host;
92 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
93 if (!page->mapping || page->index >= size)
96 if (page_mapcount(page) > INT_MAX/2)
99 zap_pte(mm, vma, addr, pte);
101 inc_mm_counter(mm, file_rss);
102 flush_icache_page(vma, page);
103 set_pte_at(mm, addr, pte, mk_pte(page, prot));
104 page_add_file_rmap(page);
107 update_mmu_cache(vma, addr, pte_val);
111 spin_unlock(&mm->page_table_lock);
114 EXPORT_SYMBOL(install_page);
118 * Install a file pte to a given virtual memory address, release any
119 * previously existing mapping.
121 int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
122 unsigned long addr, unsigned long pgoff, pgprot_t prot)
131 BUG_ON(vma->vm_flags & VM_RESERVED);
133 pgd = pgd_offset(mm, addr);
134 spin_lock(&mm->page_table_lock);
136 pud = pud_alloc(mm, pgd, addr);
140 pmd = pmd_alloc(mm, pud, addr);
144 pte = pte_alloc_map(mm, pmd, addr);
148 zap_pte(mm, vma, addr, pte);
150 set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
153 update_mmu_cache(vma, addr, pte_val);
154 spin_unlock(&mm->page_table_lock);
158 spin_unlock(&mm->page_table_lock);
164 * sys_remap_file_pages - remap arbitrary pages of a shared backing store
165 * file within an existing vma.
166 * @start: start of the remapped virtual memory range
167 * @size: size of the remapped virtual memory range
168 * @prot: new protection bits of the range
169 * @pgoff: to be mapped page of the backing store file
170 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
172 * this syscall works purely via pagetables, so it's the most efficient
173 * way to map the same (large) file into a given virtual window. Unlike
174 * mmap()/mremap() it does not create any new vmas. The new mappings are
175 * also safe across swapout.
177 * NOTE: the 'prot' parameter right now is ignored, and the vma's default
178 * protection is used. Arbitrary protections might be implemented in the
181 asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
182 unsigned long __prot, unsigned long pgoff, unsigned long flags)
184 struct mm_struct *mm = current->mm;
185 struct address_space *mapping;
186 unsigned long end = start + size;
187 struct vm_area_struct *vma;
189 int has_write_lock = 0;
194 * Sanitize the syscall parameters:
196 start = start & PAGE_MASK;
197 size = size & PAGE_MASK;
199 /* Does the address range wrap, or is the span zero-sized? */
200 if (start + size <= start)
203 /* Can we represent this offset inside this architecture's pte's? */
204 #if PTE_FILE_MAX_BITS < BITS_PER_LONG
205 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
209 /* We need down_write() to change vma->vm_flags. */
210 down_read(&mm->mmap_sem);
212 vma = find_vma(mm, start);
215 * Make sure the vma is shared, that it supports prefaulting,
216 * and that the remapped range is valid and fully within
217 * the single existing vma. vm_private_data is used as a
218 * swapout cursor in a VM_NONLINEAR vma (unless VM_RESERVED
219 * or VM_LOCKED, but VM_LOCKED could be revoked later on).
221 if (vma && (vma->vm_flags & VM_SHARED) &&
222 (!vma->vm_private_data ||
223 (vma->vm_flags & (VM_NONLINEAR|VM_RESERVED))) &&
224 vma->vm_ops && vma->vm_ops->populate &&
225 end > start && start >= vma->vm_start &&
226 end <= vma->vm_end) {
228 /* Must set VM_NONLINEAR before any pages are populated. */
229 if (pgoff != linear_page_index(vma, start) &&
230 !(vma->vm_flags & VM_NONLINEAR)) {
231 if (!has_write_lock) {
232 up_read(&mm->mmap_sem);
233 down_write(&mm->mmap_sem);
237 mapping = vma->vm_file->f_mapping;
238 spin_lock(&mapping->i_mmap_lock);
239 flush_dcache_mmap_lock(mapping);
240 vma->vm_flags |= VM_NONLINEAR;
241 vma_prio_tree_remove(vma, &mapping->i_mmap);
242 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
243 flush_dcache_mmap_unlock(mapping);
244 spin_unlock(&mapping->i_mmap_lock);
247 err = vma->vm_ops->populate(vma, start, size,
249 pgoff, flags & MAP_NONBLOCK);
252 * We can't clear VM_NONLINEAR because we'd have to do
253 * it after ->populate completes, and that would prevent
254 * downgrading the lock. (Locks can't be upgraded).
257 if (likely(!has_write_lock))
258 up_read(&mm->mmap_sem);
260 up_write(&mm->mmap_sem);