]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: add vmf_insert_pfn_pmd()
authorMatthew Wilcox <willy@linux.intel.com>
Tue, 8 Sep 2015 21:58:54 +0000 (14:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 8 Sep 2015 22:35:28 +0000 (15:35 -0700)
Similar to vm_insert_pfn(), but for PMDs rather than PTEs.  The 'vmf_'
prefix instead of 'vm_' prefix is intended to indicate that it returns a
VMF_ value rather than an errno (which would only have to be converted
into a VMF_ value anyway).

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c

index 70587ea079c3acb39e3e6f075d6b2206039584b1..f9b612fec4dd8242ca20fe2d0e45c8b520ad273f 100644 (file)
@@ -33,6 +33,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        unsigned long addr, pgprot_t newprot,
                        int prot_numa);
+int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
+                       unsigned long pfn, bool write);
 
 enum transparent_hugepage_flag {
        TRANSPARENT_HUGEPAGE_FLAG,
index c426a89e025c726259604b16c99a74ce080a638d..3ea6f908a5e01376da6be0d691e5d0c356bb8f4a 100644 (file)
@@ -869,6 +869,49 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                            flags);
 }
 
+static int insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+               pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       pmd_t entry;
+       spinlock_t *ptl;
+
+       ptl = pmd_lock(mm, pmd);
+       if (pmd_none(*pmd)) {
+               entry = pmd_mkhuge(pfn_pmd(pfn, prot));
+               if (write) {
+                       entry = pmd_mkyoung(pmd_mkdirty(entry));
+                       entry = maybe_pmd_mkwrite(entry, vma);
+               }
+               set_pmd_at(mm, addr, pmd, entry);
+               update_mmu_cache_pmd(vma, addr, pmd);
+       }
+       spin_unlock(ptl);
+       return VM_FAULT_NOPAGE;
+}
+
+int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+                       pmd_t *pmd, unsigned long pfn, bool write)
+{
+       pgprot_t pgprot = vma->vm_page_prot;
+       /*
+        * If we had pmd_special, we could avoid all these restrictions,
+        * but we need to be consistent with PTEs and architectures that
+        * can't support a 'special' bit.
+        */
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+                                               (VM_PFNMAP|VM_MIXEDMAP));
+       BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+       BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return VM_FAULT_SIGBUS;
+       if (track_pfn_insert(vma, &pgprot, pfn))
+               return VM_FAULT_SIGBUS;
+       return insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
+}
+
 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
                  struct vm_area_struct *vma)