]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: introduce compaction and migration for ballooned pages
authorRafael Aquini <aquini@redhat.com>
Fri, 9 Nov 2012 03:04:31 +0000 (14:04 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 12 Nov 2012 04:17:11 +0000 (15:17 +1100)
Memory fragmentation introduced by ballooning might reduce significantly
the number of 2MB contiguous memory blocks that can be used within a
guest, thus imposing performance penalties associated with the reduced
number of transparent huge pages that could be used by the guest workload.

This patch introduces the helper functions as well as the necessary
changes to teach compaction and migration bits how to cope with pages
which are part of a guest memory balloon, in order to make them movable by
memory compaction procedures.

Signed-off-by: Rafael Aquini <aquini@redhat.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/compaction.c
mm/migrate.c

index 9eef55838fca5ff99580982e85ea0c0fa6dea839..76abd84316246decdad54f19b9d4b36a800e552a 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/backing-dev.h>
 #include <linux/sysctl.h>
 #include <linux/sysfs.h>
+#include <linux/balloon_compaction.h>
 #include "internal.h"
 
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
@@ -565,9 +566,24 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        goto next_pageblock;
                }
 
-               /* Check may be lockless but that's ok as we recheck later */
-               if (!PageLRU(page))
+               /*
+                * Check may be lockless but that's ok as we recheck later.
+                * It's possible to migrate LRU pages and balloon pages
+                * Skip any other type of page
+                */
+               if (!PageLRU(page)) {
+                       if (unlikely(balloon_page_movable(page))) {
+                               if (locked && balloon_page_isolate(page)) {
+                                       /* Successfully isolated */
+                                       cc->finished_update_migrate = true;
+                                       list_add(&page->lru, migratelist);
+                                       cc->nr_migratepages++;
+                                       nr_isolated++;
+                                       goto check_compact_cluster;
+                               }
+                       }
                        continue;
+               }
 
                /*
                 * PageLRU is set. lru_lock normally excludes isolation
@@ -621,6 +637,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                cc->nr_migratepages++;
                nr_isolated++;
 
+check_compact_cluster:
                /* Avoid isolating too much */
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
                        ++low_pfn;
index 2424efba605d46cf3146b52cf0326eeb05501e31..0410e80c9ac36f5670bd0cb327be801a2f7d9e51 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/hugetlb.h>
 #include <linux/hugetlb_cgroup.h>
 #include <linux/gfp.h>
+#include <linux/balloon_compaction.h>
 
 #include <asm/tlbflush.h>
 
@@ -79,7 +80,10 @@ void putback_lru_pages(struct list_head *l)
                list_del(&page->lru);
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
-               putback_lru_page(page);
+               if (unlikely(balloon_page_movable(page)))
+                       balloon_page_putback(page);
+               else
+                       putback_lru_page(page);
        }
 }
 
@@ -778,6 +782,18 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                }
        }
 
+       if (unlikely(balloon_page_movable(page))) {
+               /*
+                * A ballooned page does not need any special attention from
+                * physical to virtual reverse mapping procedures.
+                * Skip any attempt to unmap PTEs or to remap swap cache,
+                * in order to avoid burning cycles at rmap level, and perform
+                * the page migration right away (proteced by page lock).
+                */
+               rc = balloon_page_migrate(newpage, page, mode);
+               goto uncharge;
+       }
+
        /*
         * Corner case handling:
         * 1. When a new swap-cache page is read into, it is added to the LRU
@@ -814,7 +830,9 @@ skip_unmap:
                put_anon_vma(anon_vma);
 
 uncharge:
-       mem_cgroup_end_migration(mem, page, newpage, rc == MIGRATEPAGE_SUCCESS);
+       mem_cgroup_end_migration(mem, page, newpage,
+                                (rc == MIGRATEPAGE_SUCCESS ||
+                                 rc == MIGRATEPAGE_BALLOON_SUCCESS));
 unlock:
        unlock_page(page);
 out:
@@ -846,6 +864,20 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                        goto out;
 
        rc = __unmap_and_move(page, newpage, force, offlining, mode);
+
+       if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
+               /*
+                * A ballooned page has been migrated already.
+                * Now, it's the time to remove the old page from the isolated
+                * pageset list and handle it back to Buddy, wrap-up counters
+                * and return.
+                */
+               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                                   page_is_file_cache(page));
+               put_page(page);
+               __free_page(page);
+               return 0;
+       }
 out:
        if (rc != -EAGAIN) {
                /*