]> git.karo-electronics.de Git - linux-beck.git/commitdiff
mm: factor out functionality to finish page faults
authorJan Kara <jack@suse.cz>
Wed, 14 Dec 2016 23:07:21 +0000 (15:07 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Dec 2016 00:04:09 +0000 (16:04 -0800)
Introduce finish_fault() as a helper function for finishing page faults.
It is rather thin wrapper around alloc_set_pte() but since we'd want to
call this from DAX code or filesystems, it is still useful to avoid some
boilerplate code.

Link: http://lkml.kernel.org/r/1479460644-25076-10-git-send-email-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
mm/memory.c

index 6e25f4916d6fffcd148562efa4ca9606736d4009..60a230e6ece74a737f0be00382f172b7dd2b4d03 100644 (file)
@@ -620,6 +620,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 
 int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
                struct page *page);
+int finish_fault(struct vm_fault *vmf);
 #endif
 
 /*
index 02504cd4ca0e243cb63aaa99508932e48bd86799..22f7f6e38515e8d84fd038d217da2a11cda8b8bb 100644 (file)
@@ -3074,6 +3074,38 @@ fault_handled:
        return ret;
 }
 
+
+/**
+ * finish_fault - finish page fault once we have prepared the page to fault
+ *
+ * @vmf: structure describing the fault
+ *
+ * This function handles all that is needed to finish a page fault once the
+ * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
+ * given page, adds reverse page mapping, handles memcg charges and LRU
+ * addition. The function returns 0 on success, VM_FAULT_ code in case of
+ * error.
+ *
+ * The function expects the page to be locked and on success it consumes a
+ * reference of a page being mapped (for the PTE which maps it).
+ */
+int finish_fault(struct vm_fault *vmf)
+{
+       struct page *page;
+       int ret;
+
+       /* Did we COW the page? */
+       if ((vmf->flags & FAULT_FLAG_WRITE) &&
+           !(vmf->vma->vm_flags & VM_SHARED))
+               page = vmf->cow_page;
+       else
+               page = vmf->page;
+       ret = alloc_set_pte(vmf, vmf->memcg, page);
+       if (vmf->pte)
+               pte_unmap_unlock(vmf->pte, vmf->ptl);
+       return ret;
+}
+
 static unsigned long fault_around_bytes __read_mostly =
        rounddown_pow_of_two(65536);
 
@@ -3213,9 +3245,7 @@ static int do_read_fault(struct vm_fault *vmf)
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
                return ret;
 
-       ret |= alloc_set_pte(vmf, NULL, vmf->page);
-       if (vmf->pte)
-               pte_unmap_unlock(vmf->pte, vmf->ptl);
+       ret |= finish_fault(vmf);
        unlock_page(vmf->page);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
                put_page(vmf->page);
@@ -3250,9 +3280,7 @@ static int do_cow_fault(struct vm_fault *vmf)
                copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
        __SetPageUptodate(vmf->cow_page);
 
-       ret |= alloc_set_pte(vmf, vmf->memcg, vmf->cow_page);
-       if (vmf->pte)
-               pte_unmap_unlock(vmf->pte, vmf->ptl);
+       ret |= finish_fault(vmf);
        if (!(ret & VM_FAULT_DAX_LOCKED)) {
                unlock_page(vmf->page);
                put_page(vmf->page);
@@ -3293,9 +3321,7 @@ static int do_shared_fault(struct vm_fault *vmf)
                }
        }
 
-       ret |= alloc_set_pte(vmf, NULL, vmf->page);
-       if (vmf->pte)
-               pte_unmap_unlock(vmf->pte, vmf->ptl);
+       ret |= finish_fault(vmf);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
                                        VM_FAULT_RETRY))) {
                unlock_page(vmf->page);