]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: numa: take THP into account when migrating pages for NUMA balancing
authorMel Gorman <mgorman@suse.de>
Wed, 20 Feb 2013 02:14:37 +0000 (13:14 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 20 Feb 2013 05:52:44 +0000 (16:52 +1100)
Wanpeng Li pointed out that numamigrate_isolate_page() assumes that only
one base page is being migrated when in fact it can also be checking THP.
The consequences are that a migration will be attempted when a target node
is nearly full and fail later.  It's unlikely to be user-visible but it
should be fixed.  While we are there, migrate_balanced_pgdat() should
treat nr_migrate_pages as an unsigned long as it is treated as a
watermark.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Suggested-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Simon Jeons <simon.jeons@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/migrate.c

index 2fd8b4af47440a39a31d48a1096e24b571028455..77f4e70df24d0e9eb3c07a67724630e58eaeac9b 100644 (file)
@@ -1461,7 +1461,7 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
  * pages. Currently it only checks the watermarks which crude
  */
 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
-                                  int nr_migrate_pages)
+                                  unsigned long nr_migrate_pages)
 {
        int z;
        for (z = pgdat->nr_zones - 1; z >= 0; z--) {
@@ -1559,8 +1559,10 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 {
        int ret = 0;
 
+       VM_BUG_ON(compound_order(page) && !PageTransHuge(page));
+
        /* Avoid migrating to a node that is nearly full */
-       if (migrate_balanced_pgdat(pgdat, 1)) {
+       if (migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) {
                int page_lru;
 
                if (isolate_lru_page(page)) {