]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/hugetlb.c
[PATCH] mm: update_hiwaters just in time
[mv-sheeva.git] / mm / hugetlb.c
index a1b30d45459e827249da8c6226ccf080f15d65a6..ac5f044bf5141d5bd19201b7cedc6610ca6a3876 100644 (file)
@@ -286,7 +286,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        entry = *src_pte;
                        ptepage = pte_page(entry);
                        get_page(ptepage);
-                       add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
+                       add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
                        set_huge_pte_at(dst, addr, dst_pte, entry);
                }
                spin_unlock(&src->page_table_lock);
@@ -310,6 +310,9 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        BUG_ON(start & ~HPAGE_MASK);
        BUG_ON(end & ~HPAGE_MASK);
 
+       /* Update high watermark before we lower rss */
+       update_hiwater_rss(mm);
+
        for (address = start; address < end; address += HPAGE_SIZE) {
                ptep = huge_pte_offset(mm, address);
                if (! ptep)
@@ -324,7 +327,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 
                page = pte_page(pte);
                put_page(page);
-               add_mm_counter(mm, rss,  - (HPAGE_SIZE / PAGE_SIZE));
+               add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
        }
        flush_tlb_range(vma, start, end);
 }
@@ -386,7 +389,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
                                goto out;
                        }
                }
-               add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
+               add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
                set_huge_pte_at(mm, addr, pte, make_huge_pte(vma, page));
        }
 out:
@@ -394,6 +397,28 @@ out:
        return ret;
 }
 
+/*
+ * On ia64 at least, it is possible to receive a hugetlb fault from a
+ * stale zero entry left in the TLB from earlier hardware prefetching.
+ * Low-level arch code should already have flushed the stale entry as
+ * part of its fault handling, but we do need to accept this minor fault
+ * and return successfully.  Whereas the "normal" case is that this is
+ * an access to a hugetlb page which has been truncated off since mmap.
+ */
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+                       unsigned long address, int write_access)
+{
+       int ret = VM_FAULT_SIGBUS;
+       pte_t *pte;
+
+       spin_lock(&mm->page_table_lock);
+       pte = huge_pte_offset(mm, address);
+       if (pte && !pte_none(*pte))
+               ret = VM_FAULT_MINOR;
+       spin_unlock(&mm->page_table_lock);
+       return ret;
+}
+
 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        struct page **pages, struct vm_area_struct **vmas,
                        unsigned long *position, int *length, int i)