4 * Copyright (C) 2015 Red Hat, Inc.
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
11 #include <linux/pagemap.h>
12 #include <linux/rmap.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/userfaultfd_k.h>
16 #include <linux/mmu_notifier.h>
17 #include <linux/hugetlb.h>
18 #include <linux/pagemap.h>
19 #include <asm/tlbflush.h>
22 static int mcopy_atomic_pte(struct mm_struct *dst_mm,
24 struct vm_area_struct *dst_vma,
25 unsigned long dst_addr,
26 unsigned long src_addr,
29 struct mem_cgroup *memcg;
30 pte_t _dst_pte, *dst_pte;
38 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
42 page_kaddr = kmap_atomic(page);
43 ret = copy_from_user(page_kaddr,
44 (const void __user *) src_addr,
46 kunmap_atomic(page_kaddr);
48 /* fallback to copy_from_user outside mmap_sem */
52 /* don't free the page */
61 * The memory barrier inside __SetPageUptodate makes sure that
62 * preceeding stores to the page contents become visible before
63 * the set_pte_at() write.
65 __SetPageUptodate(page);
68 if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
71 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
72 if (dst_vma->vm_flags & VM_WRITE)
73 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
76 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
77 if (!pte_none(*dst_pte))
78 goto out_release_uncharge_unlock;
80 inc_mm_counter(dst_mm, MM_ANONPAGES);
81 page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
82 mem_cgroup_commit_charge(page, memcg, false, false);
83 lru_cache_add_active_or_unevictable(page, dst_vma);
85 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
87 /* No need to invalidate - it was non-present before */
88 update_mmu_cache(dst_vma, dst_addr, dst_pte);
90 pte_unmap_unlock(dst_pte, ptl);
94 out_release_uncharge_unlock:
95 pte_unmap_unlock(dst_pte, ptl);
96 mem_cgroup_cancel_charge(page, memcg, false);
102 static int mfill_zeropage_pte(struct mm_struct *dst_mm,
104 struct vm_area_struct *dst_vma,
105 unsigned long dst_addr)
107 pte_t _dst_pte, *dst_pte;
111 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
112 dst_vma->vm_page_prot));
114 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
115 if (!pte_none(*dst_pte))
117 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
118 /* No need to invalidate - it was non-present before */
119 update_mmu_cache(dst_vma, dst_addr, dst_pte);
122 pte_unmap_unlock(dst_pte, ptl);
126 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
132 pgd = pgd_offset(mm, address);
133 pud = pud_alloc(mm, pgd, address);
136 * Note that we didn't run this because the pmd was
137 * missing, the *pmd may be already established and in
138 * turn it may also be a trans_huge_pmd.
140 pmd = pmd_alloc(mm, pud, address);
144 #ifdef CONFIG_HUGETLB_PAGE
146 * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
147 * called with mmap_sem held, it will release mmap_sem before returning.
149 static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
150 struct vm_area_struct *dst_vma,
151 unsigned long dst_start,
152 unsigned long src_start,
158 unsigned long src_addr, dst_addr;
162 unsigned long vma_hpagesize;
165 struct address_space *mapping;
168 * There is no default zero huge page for all huge page sizes as
169 * supported by hugetlb. A PMD_SIZE huge pages may exist as used
170 * by THP. Since we can not reliably insert a zero page, this
171 * feature is not supported.
174 up_read(&dst_mm->mmap_sem);
178 src_addr = src_start;
179 dst_addr = dst_start;
182 vma_hpagesize = vma_kernel_pagesize(dst_vma);
185 * Validate alignment based on huge page size
188 if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
193 * On routine entry dst_vma is set. If we had to drop mmap_sem and
194 * retry, dst_vma will be set to NULL and we must lookup again.
198 dst_vma = find_vma(dst_mm, dst_start);
199 if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
202 if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
206 * Make sure the vma is not shared, that the remaining dst
207 * range is both valid and fully within a single existing vma.
209 if (dst_vma->vm_flags & VM_SHARED)
211 if (dst_start < dst_vma->vm_start ||
212 dst_start + len > dst_vma->vm_end)
216 if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
217 (len - copied) & (vma_hpagesize - 1)))
221 * Only allow __mcopy_atomic_hugetlb on userfaultfd registered ranges.
223 if (!dst_vma->vm_userfaultfd_ctx.ctx)
227 * Ensure the dst_vma has a anon_vma.
230 if (unlikely(anon_vma_prepare(dst_vma)))
233 h = hstate_vma(dst_vma);
235 while (src_addr < src_start + len) {
238 BUG_ON(dst_addr >= dst_start + len);
239 VM_BUG_ON(dst_addr & ~huge_page_mask(h));
242 * Serialize via hugetlb_fault_mutex
244 idx = linear_page_index(dst_vma, dst_addr);
245 mapping = dst_vma->vm_file->f_mapping;
246 hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
248 mutex_lock(&hugetlb_fault_mutex_table[hash]);
251 dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
253 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
258 dst_pteval = huge_ptep_get(dst_pte);
259 if (!huge_pte_none(dst_pteval)) {
260 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
264 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
265 dst_addr, src_addr, &page);
267 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
271 if (unlikely(err == -EFAULT)) {
272 up_read(&dst_mm->mmap_sem);
275 err = copy_huge_page_from_user(page,
276 (const void __user *)src_addr,
277 pages_per_huge_page(h), true);
282 down_read(&dst_mm->mmap_sem);
290 dst_addr += vma_hpagesize;
291 src_addr += vma_hpagesize;
292 copied += vma_hpagesize;
294 if (fatal_signal_pending(current))
302 up_read(&dst_mm->mmap_sem);
306 * We encountered an error and are about to free a newly
307 * allocated huge page. It is possible that there was a
308 * reservation associated with the page that has been
309 * consumed. See the routine restore_reserve_on_error
310 * for details. Unfortunately, we can not call
311 * restore_reserve_on_error now as it would require holding
312 * mmap_sem. Clear the PagePrivate flag so that the global
313 * reserve count will not be incremented in free_huge_page.
314 * The reservation map will still indicate the reservation
315 * was consumed and possibly prevent later page allocation.
316 * This is better than leaking a global reservation.
318 ClearPagePrivate(page);
323 BUG_ON(!copied && !err);
324 return copied ? copied : err;
326 #else /* !CONFIG_HUGETLB_PAGE */
327 /* fail at build time if gcc attempts to use this */
328 extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
329 struct vm_area_struct *dst_vma,
330 unsigned long dst_start,
331 unsigned long src_start,
334 #endif /* CONFIG_HUGETLB_PAGE */
336 static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
337 unsigned long dst_start,
338 unsigned long src_start,
342 struct vm_area_struct *dst_vma;
345 unsigned long src_addr, dst_addr;
350 * Sanitize the command parameters:
352 BUG_ON(dst_start & ~PAGE_MASK);
353 BUG_ON(len & ~PAGE_MASK);
355 /* Does the address range wrap, or is the span zero-sized? */
356 BUG_ON(src_start + len <= src_start);
357 BUG_ON(dst_start + len <= dst_start);
359 src_addr = src_start;
360 dst_addr = dst_start;
364 down_read(&dst_mm->mmap_sem);
367 * Make sure the vma is not shared, that the dst range is
368 * both valid and fully within a single existing vma.
371 dst_vma = find_vma(dst_mm, dst_start);
372 if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
374 if (dst_start < dst_vma->vm_start ||
375 dst_start + len > dst_vma->vm_end)
379 * If this is a HUGETLB vma, pass off to appropriate routine
381 if (is_vm_hugetlb_page(dst_vma))
382 return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
383 src_start, len, zeropage);
386 * Be strict and only allow __mcopy_atomic on userfaultfd
387 * registered ranges to prevent userland errors going
388 * unnoticed. As far as the VM consistency is concerned, it
389 * would be perfectly safe to remove this check, but there's
390 * no useful usage for __mcopy_atomic ouside of userfaultfd
391 * registered ranges. This is after all why these are ioctls
392 * belonging to the userfaultfd and not syscalls.
394 if (!dst_vma->vm_userfaultfd_ctx.ctx)
398 * FIXME: only allow copying on anonymous vmas, tmpfs should
401 if (!vma_is_anonymous(dst_vma))
405 * Ensure the dst_vma has a anon_vma or this page
406 * would get a NULL anon_vma when moved in the
410 if (unlikely(anon_vma_prepare(dst_vma)))
413 while (src_addr < src_start + len) {
416 BUG_ON(dst_addr >= dst_start + len);
418 dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
419 if (unlikely(!dst_pmd)) {
424 dst_pmdval = pmd_read_atomic(dst_pmd);
426 * If the dst_pmd is mapped as THP don't
427 * override it and just be strict.
429 if (unlikely(pmd_trans_huge(dst_pmdval))) {
433 if (unlikely(pmd_none(dst_pmdval)) &&
434 unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
438 /* If an huge pmd materialized from under us fail */
439 if (unlikely(pmd_trans_huge(*dst_pmd))) {
444 BUG_ON(pmd_none(*dst_pmd));
445 BUG_ON(pmd_trans_huge(*dst_pmd));
448 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
449 dst_addr, src_addr, &page);
451 err = mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma,
456 if (unlikely(err == -EFAULT)) {
459 up_read(&dst_mm->mmap_sem);
462 page_kaddr = kmap(page);
463 err = copy_from_user(page_kaddr,
464 (const void __user *) src_addr,
476 dst_addr += PAGE_SIZE;
477 src_addr += PAGE_SIZE;
480 if (fatal_signal_pending(current))
488 up_read(&dst_mm->mmap_sem);
494 BUG_ON(!copied && !err);
495 return copied ? copied : err;
498 ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
499 unsigned long src_start, unsigned long len)
501 return __mcopy_atomic(dst_mm, dst_start, src_start, len, false);
504 ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
507 return __mcopy_atomic(dst_mm, start, 0, len, true);