4 * Copyright (C) 1994-1999 Linus Torvalds
8 * The msync() system call.
10 #include <linux/slab.h>
11 #include <linux/pagemap.h>
13 #include <linux/mman.h>
14 #include <linux/hugetlb.h>
15 #include <linux/syscalls.h>
17 #include <asm/pgtable.h>
18 #include <asm/tlbflush.h>
21 * Called with mm->page_table_lock held to protect against other
22 * threads/the swapper from ripping pte's out from under us.
25 static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
26 unsigned long addr, unsigned long end)
28 struct mm_struct *mm = vma->vm_mm;
33 pte = pte_offset_map(pmd, addr);
41 need_lockbreak(&mm->page_table_lock))
45 if (!pte_present(*pte))
47 if (!pte_maybe_dirty(*pte))
50 if (unlikely(!pfn_valid(pfn))) {
51 print_bad_pte(vma, *pte, addr);
54 page = pfn_to_page(pfn);
56 if (ptep_clear_flush_dirty(vma, addr, pte) ||
57 page_test_and_clear_dirty(page))
60 } while (pte++, addr += PAGE_SIZE, addr != end);
62 cond_resched_lock(&mm->page_table_lock);
67 static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
68 unsigned long addr, unsigned long end)
73 pmd = pmd_offset(pud, addr);
75 next = pmd_addr_end(addr, end);
76 if (pmd_none_or_clear_bad(pmd))
78 msync_pte_range(vma, pmd, addr, next);
79 } while (pmd++, addr = next, addr != end);
82 static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
83 unsigned long addr, unsigned long end)
88 pud = pud_offset(pgd, addr);
90 next = pud_addr_end(addr, end);
91 if (pud_none_or_clear_bad(pud))
93 msync_pmd_range(vma, pud, addr, next);
94 } while (pud++, addr = next, addr != end);
97 static void msync_page_range(struct vm_area_struct *vma,
98 unsigned long addr, unsigned long end)
100 struct mm_struct *mm = vma->vm_mm;
104 /* For hugepages we can't go walking the page table normally,
105 * but that's ok, hugetlbfs is memory based, so we don't need
106 * to do anything more on an msync().
107 * Can't do anything with VM_RESERVED regions either.
109 if (vma->vm_flags & (VM_HUGETLB|VM_RESERVED))
113 pgd = pgd_offset(mm, addr);
114 flush_cache_range(vma, addr, end);
115 spin_lock(&mm->page_table_lock);
117 next = pgd_addr_end(addr, end);
118 if (pgd_none_or_clear_bad(pgd))
120 msync_pud_range(vma, pgd, addr, next);
121 } while (pgd++, addr = next, addr != end);
122 spin_unlock(&mm->page_table_lock);
126 * MS_SYNC syncs the entire file - including mappings.
128 * MS_ASYNC does not start I/O (it used to, up to 2.5.67). Instead, it just
129 * marks the relevant pages dirty. The application may now run fsync() to
130 * write out the dirty pages and wait on the writeout and check the result.
131 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
132 * async writeout immediately.
133 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
136 static int msync_interval(struct vm_area_struct *vma,
137 unsigned long addr, unsigned long end, int flags)
140 struct file *file = vma->vm_file;
142 if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
145 if (file && (vma->vm_flags & VM_SHARED)) {
146 msync_page_range(vma, addr, end);
148 if (flags & MS_SYNC) {
149 struct address_space *mapping = file->f_mapping;
152 ret = filemap_fdatawrite(mapping);
153 if (file->f_op && file->f_op->fsync) {
155 * We don't take i_sem here because mmap_sem
158 err = file->f_op->fsync(file,file->f_dentry,1);
162 err = filemap_fdatawait(mapping);
170 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
173 struct vm_area_struct *vma;
174 int unmapped_error, error = -EINVAL;
177 current->flags |= PF_SYNCWRITE;
179 down_read(¤t->mm->mmap_sem);
180 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
182 if (start & ~PAGE_MASK)
184 if ((flags & MS_ASYNC) && (flags & MS_SYNC))
187 len = (len + ~PAGE_MASK) & PAGE_MASK;
195 * If the interval [start,end) covers some unmapped address ranges,
196 * just ignore them, but return -ENOMEM at the end.
198 vma = find_vma(current->mm, start);
201 /* Still start < end. */
205 /* Here start < vma->vm_end. */
206 if (start < vma->vm_start) {
207 unmapped_error = -ENOMEM;
208 start = vma->vm_start;
210 /* Here vma->vm_start <= start < vma->vm_end. */
211 if (end <= vma->vm_end) {
213 error = msync_interval(vma, start, end, flags);
217 error = unmapped_error;
220 /* Here vma->vm_start <= start < vma->vm_end < end. */
221 error = msync_interval(vma, start, vma->vm_end, flags);
228 up_read(¤t->mm->mmap_sem);
229 current->flags &= ~PF_SYNCWRITE;