]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: reduce rmap overhead for ex-KSM page copies created on swap faults
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 20 Feb 2013 02:14:02 +0000 (13:14 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 20 Feb 2013 05:52:21 +0000 (16:52 +1100)
When ex-KSM pages are faulted from swap cache, the fault handler is not
capable of re-establishing anon_vma-spanning KSM pages.  In this case, a
copy of the page is created instead, just like during a COW break.

These freshly made copies are known to be exclusive to the faulting VMA
and there is no reason to go look for this page in parent and sibling
processes during rmap operations.

Use page_add_new_anon_rmap() for these copies.  This also puts them on the
proper LRU lists and marks them SwapBacked, so we can get rid of doing
this ad-hoc in the KSM copy code.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Simon Jeons <simon.jeons@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Satoru Moriya <satoru.moriya@hds.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/ksm.c
mm/memory.c

index 51573858938d1435a5b74c2c36dbd833b5c27837..e1f1f278075fc63fdde1b808c118c6dee2e88a64 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1590,13 +1590,7 @@ struct page *ksm_does_need_to_copy(struct page *page,
 
                SetPageDirty(new_page);
                __SetPageUptodate(new_page);
-               SetPageSwapBacked(new_page);
                __set_page_locked(new_page);
-
-               if (!mlocked_vma_newpage(vma, new_page))
-                       lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
-               else
-                       add_page_to_unevictable_list(new_page);
        }
 
        return new_page;
index bc8bec762db7d8c42bed6167a289d5dc0e7842fb..569558810b90d572da037cd40c7694ddaab6ce28 100644 (file)
@@ -3044,7 +3044,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
        flush_icache_page(vma, page);
        set_pte_at(mm, address, page_table, pte);
-       do_page_add_anon_rmap(page, vma, address, exclusive);
+       if (swapcache) /* ksm created a completely new copy */
+               page_add_new_anon_rmap(page, vma, address);
+       else
+               do_page_add_anon_rmap(page, vma, address, exclusive);
        /* It's better to call commit-charge after rmap is established */
        mem_cgroup_commit_charge_swapin(page, ptr);