]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/ksm.c
pwm: make the PWM_POLARITY flag optional
[karo-tx-linux.git] / mm / ksm.c
index 10618a36abb0b19e70cdeaf78fcff867092c0a15..659e2b5119c043671cf7a0e19afcf1c68a78a3c0 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -467,20 +467,6 @@ static void break_cow(struct rmap_item *rmap_item)
        mmput(mm);
 }
 
-static struct page *page_trans_compound_anon(struct page *page)
-{
-       if (PageTransCompound(page)) {
-               struct page *head = compound_head(page);
-               /*
-                * head may actually be splitted and freed from under
-                * us but it's ok here.
-                */
-               if (PageAnon(head))
-                       return head;
-       }
-       return NULL;
-}
-
 static struct page *get_mergeable_page(struct rmap_item *rmap_item)
 {
        struct mm_struct *mm = rmap_item->mm;
@@ -505,7 +491,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
        page = follow_page(vma, addr, FOLL_GET);
        if (IS_ERR_OR_NULL(page))
                goto out;
-       if (PageAnon(page) || page_trans_compound_anon(page)) {
+       if (PageAnon(page)) {
                flush_anon_page(vma, page, addr);
                flush_dcache_page(page);
        } else {
@@ -993,13 +979,13 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
        }
 
        get_page(kpage);
-       page_add_anon_rmap(kpage, vma, addr);
+       page_add_anon_rmap(kpage, vma, addr, false);
 
        flush_cache_page(vma, addr, pte_pfn(*ptep));
        ptep_clear_flush_notify(vma, addr, ptep);
        set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
 
-       page_remove_rmap(page);
+       page_remove_rmap(page, false);
        if (!page_mapped(page))
                try_to_free_swap(page);
        put_page(page);
@@ -1012,33 +998,6 @@ out:
        return err;
 }
 
-static int page_trans_compound_anon_split(struct page *page)
-{
-       int ret = 0;
-       struct page *transhuge_head = page_trans_compound_anon(page);
-       if (transhuge_head) {
-               /* Get the reference on the head to split it. */
-               if (get_page_unless_zero(transhuge_head)) {
-                       /*
-                        * Recheck we got the reference while the head
-                        * was still anonymous.
-                        */
-                       if (PageAnon(transhuge_head))
-                               ret = split_huge_page(transhuge_head);
-                       else
-                               /*
-                                * Retry later if split_huge_page run
-                                * from under us.
-                                */
-                               ret = 1;
-                       put_page(transhuge_head);
-               } else
-                       /* Retry later if split_huge_page run from under us. */
-                       ret = 1;
-       }
-       return ret;
-}
-
 /*
  * try_to_merge_one_page - take two pages and merge them into one
  * @vma: the vma that holds the pte pointing to page
@@ -1057,9 +1016,6 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
        if (page == kpage)                      /* ksm page forked */
                return 0;
 
-       if (PageTransCompound(page) && page_trans_compound_anon_split(page))
-               goto out;
-       BUG_ON(PageTransCompound(page));
        if (!PageAnon(page))
                goto out;
 
@@ -1072,6 +1028,13 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
         */
        if (!trylock_page(page))
                goto out;
+
+       if (PageTransCompound(page)) {
+               err = split_huge_page(page);
+               if (err)
+                       goto out_unlock;
+       }
+
        /*
         * If this anonymous page is mapped only here, its pte may need
         * to be write-protected.  If it's mapped elsewhere, all of its
@@ -1087,6 +1050,18 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
                         */
                        set_page_stable_node(page, NULL);
                        mark_page_accessed(page);
+                       /*
+                        * Stable page could be shared by several processes
+                        * and last process could own the page among them after
+                        * CoW or zapping for every process except last process
+                        * happens. Then, page table entry of the page
+                        * in last process can have no dirty bit.
+                        * In this case, MADV_FREE could discard the page
+                        * wrongly.
+                        * For preventing it, we mark stable page dirty.
+                        */
+                       if (!PageDirty(page))
+                               SetPageDirty(page);
                        err = 0;
                } else if (pages_identical(page, kpage))
                        err = replace_page(vma, page, kpage, orig_pte);
@@ -1102,6 +1077,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
                }
        }
 
+out_unlock:
        unlock_page(page);
 out:
        return err;
@@ -1409,7 +1385,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
                cond_resched();
                tree_rmap_item = rb_entry(*new, struct rmap_item, node);
                tree_page = get_mergeable_page(tree_rmap_item);
-               if (IS_ERR_OR_NULL(tree_page))
+               if (!tree_page)
                        return NULL;
 
                /*
@@ -1689,8 +1665,7 @@ next_mm:
                                cond_resched();
                                continue;
                        }
-                       if (PageAnon(*page) ||
-                           page_trans_compound_anon(*page)) {
+                       if (PageAnon(*page)) {
                                flush_anon_page(vma, *page, ksm_scan.address);
                                flush_dcache_page(*page);
                                rmap_item = get_next_rmap_item(slot,
@@ -1953,7 +1928,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
 
                SetPageDirty(new_page);
                __SetPageUptodate(new_page);
-               __set_page_locked(new_page);
+               __SetPageLocked(new_page);
        }
 
        return new_page;