]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: introduce compaction and migration for ballooned pages
authorRafael Aquini <aquini@redhat.com>
Wed, 12 Dec 2012 00:02:42 +0000 (16:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 12 Dec 2012 01:22:27 +0000 (17:22 -0800)
Memory fragmentation introduced by ballooning might reduce significantly
the number of 2MB contiguous memory blocks that can be used within a guest,
thus imposing performance penalties associated with the reduced number of
transparent huge pages that could be used by the guest workload.

This patch introduces the helper functions as well as the necessary changes
to teach compaction and migration bits how to cope with pages which are
part of a guest memory balloon, in order to make them movable by memory
compaction procedures.

Signed-off-by: Rafael Aquini <aquini@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/compaction.c
mm/migrate.c

index 694eaabaaebdc0827c93d81c97f200fc534a115e..470474c03b6123aa57876b1aac7006964aca9028 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/backing-dev.h>
 #include <linux/sysctl.h>
 #include <linux/sysfs.h>
+#include <linux/balloon_compaction.h>
 #include "internal.h"
 
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
@@ -565,9 +566,24 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        goto next_pageblock;
                }
 
-               /* Check may be lockless but that's ok as we recheck later */
-               if (!PageLRU(page))
+               /*
+                * Check may be lockless but that's ok as we recheck later.
+                * It's possible to migrate LRU pages and balloon pages
+                * Skip any other type of page
+                */
+               if (!PageLRU(page)) {
+                       if (unlikely(balloon_page_movable(page))) {
+                               if (locked && balloon_page_isolate(page)) {
+                                       /* Successfully isolated */
+                                       cc->finished_update_migrate = true;
+                                       list_add(&page->lru, migratelist);
+                                       cc->nr_migratepages++;
+                                       nr_isolated++;
+                                       goto check_compact_cluster;
+                               }
+                       }
                        continue;
+               }
 
                /*
                 * PageLRU is set. lru_lock normally excludes isolation
@@ -621,6 +637,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                cc->nr_migratepages++;
                nr_isolated++;
 
+check_compact_cluster:
                /* Avoid isolating too much */
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
                        ++low_pfn;
index 33f5f82a6006081dc0c77bd80d4d63cbb98e2071..427343c0c29620bc2d0c34499a799eea645263f0 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/hugetlb.h>
 #include <linux/hugetlb_cgroup.h>
 #include <linux/gfp.h>
+#include <linux/balloon_compaction.h>
 
 #include <asm/tlbflush.h>
 
@@ -79,7 +80,10 @@ void putback_lru_pages(struct list_head *l)
                list_del(&page->lru);
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
-               putback_lru_page(page);
+               if (unlikely(balloon_page_movable(page)))
+                       balloon_page_putback(page);
+               else
+                       putback_lru_page(page);
        }
 }
 
@@ -768,6 +772,18 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
                }
        }
 
+       if (unlikely(balloon_page_movable(page))) {
+               /*
+                * A ballooned page does not need any special attention from
+                * physical to virtual reverse mapping procedures.
+                * Skip any attempt to unmap PTEs or to remap swap cache,
+                * in order to avoid burning cycles at rmap level, and perform
+                * the page migration right away (proteced by page lock).
+                */
+               rc = balloon_page_migrate(newpage, page, mode);
+               goto uncharge;
+       }
+
        /*
         * Corner case handling:
         * 1. When a new swap-cache page is read into, it is added to the LRU
@@ -804,7 +820,9 @@ skip_unmap:
                put_anon_vma(anon_vma);
 
 uncharge:
-       mem_cgroup_end_migration(mem, page, newpage, rc == MIGRATEPAGE_SUCCESS);
+       mem_cgroup_end_migration(mem, page, newpage,
+                                (rc == MIGRATEPAGE_SUCCESS ||
+                                 rc == MIGRATEPAGE_BALLOON_SUCCESS));
 unlock:
        unlock_page(page);
 out:
@@ -836,6 +854,18 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                        goto out;
 
        rc = __unmap_and_move(page, newpage, force, offlining, mode);
+
+       if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
+               /*
+                * A ballooned page has been migrated already.
+                * Now, it's the time to wrap-up counters,
+                * handle the page back to Buddy and return.
+                */
+               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                                   page_is_file_cache(page));
+               balloon_page_free(page);
+               return MIGRATEPAGE_SUCCESS;
+       }
 out:
        if (rc != -EAGAIN) {
                /*