]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Revert "mm: have order > 0 compaction start off where it left"
authorMel Gorman <mgorman@suse.de>
Fri, 28 Sep 2012 00:19:47 +0000 (10:19 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Fri, 5 Oct 2012 04:01:06 +0000 (14:01 +1000)
This reverts commit 7db8889a ("mm: have order > 0 compaction start off
where it left") and commit de74f1cc ("mm: have order > 0 compaction start
near a pageblock with free pages").  These patches were a good idea and
tests confirmed that they massively reduced the amount of scanning but the
implementation is complex and tricky to understand.  A later patch will
cache what pageblocks should be skipped and reimplements the concept of
compact_cached_free_pfn on top for both migration and free scanners.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Richard Davies <richard@arachsys.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Avi Kivity <avi@redhat.com>
Acked-by: Rafael Aquini <aquini@redhat.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mmzone.h
mm/compaction.c
mm/internal.h
mm/page_alloc.c

index 9705f2d9dff096a2db4a74c4271830fc94ad39aa..4fcf517306e11840e8f9cca1b94dc82457ff074b 100644 (file)
@@ -358,10 +358,6 @@ struct zone {
         */
        spinlock_t              lock;
        int                     all_unreclaimable; /* All pages pinned */
-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
-       /* pfn where the last incremental compaction isolated free pages */
-       unsigned long           compact_cached_free_pfn;
-#endif
 #ifdef CONFIG_MEMORY_HOTPLUG
        /* see spanned/present_pages for more description */
        seqlock_t               span_seqlock;
index 2e68837bea60e2b4ec69f5d45b4a00225e095296..6b3cafee9f56a341482382aa4d58f3654260c293 100644 (file)
@@ -536,20 +536,6 @@ next_pageblock:
 
 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
 #ifdef CONFIG_COMPACTION
-/*
- * Returns the start pfn of the last page block in a zone.  This is the starting
- * point for full compaction of a zone.  Compaction searches for free pages from
- * the end of each zone, while isolate_freepages_block scans forward inside each
- * page block.
- */
-static unsigned long start_free_pfn(struct zone *zone)
-{
-       unsigned long free_pfn;
-       free_pfn = zone->zone_start_pfn + zone->spanned_pages;
-       free_pfn &= ~(pageblock_nr_pages-1);
-       return free_pfn;
-}
-
 /*
  * Based on information in the current compact_control, find blocks
  * suitable for isolating free pages from and then isolate them.
@@ -618,19 +604,8 @@ static void isolate_freepages(struct zone *zone,
                 * looking for free pages, the search will restart here as
                 * page migration may have returned some pages to the allocator
                 */
-               if (isolated) {
+               if (isolated)
                        high_pfn = max(high_pfn, pfn);
-
-                       /*
-                        * If the free scanner has wrapped, update
-                        * compact_cached_free_pfn to point to the highest
-                        * pageblock with free pages. This reduces excessive
-                        * scanning of full pageblocks near the end of the
-                        * zone
-                        */
-                       if (cc->order > 0 && cc->wrapped)
-                               zone->compact_cached_free_pfn = high_pfn;
-               }
        }
 
        /* split_free_page does not map the pages */
@@ -638,11 +613,6 @@ static void isolate_freepages(struct zone *zone,
 
        cc->free_pfn = high_pfn;
        cc->nr_freepages = nr_freepages;
-
-       /* If compact_cached_free_pfn is reset then set it now */
-       if (cc->order > 0 && !cc->wrapped &&
-                       zone->compact_cached_free_pfn == start_free_pfn(zone))
-               zone->compact_cached_free_pfn = high_pfn;
 }
 
 /*
@@ -737,26 +707,8 @@ static int compact_finished(struct zone *zone,
        if (fatal_signal_pending(current))
                return COMPACT_PARTIAL;
 
-       /*
-        * A full (order == -1) compaction run starts at the beginning and
-        * end of a zone; it completes when the migrate and free scanner meet.
-        * A partial (order > 0) compaction can start with the free scanner
-        * at a random point in the zone, and may have to restart.
-        */
-       if (cc->free_pfn <= cc->migrate_pfn) {
-               if (cc->order > 0 && !cc->wrapped) {
-                       /* We started partway through; restart at the end. */
-                       unsigned long free_pfn = start_free_pfn(zone);
-                       zone->compact_cached_free_pfn = free_pfn;
-                       cc->free_pfn = free_pfn;
-                       cc->wrapped = 1;
-                       return COMPACT_CONTINUE;
-               }
-               return COMPACT_COMPLETE;
-       }
-
-       /* We wrapped around and ended up where we started. */
-       if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn)
+       /* Compaction run completes if the migrate and free scanner meet */
+       if (cc->free_pfn <= cc->migrate_pfn)
                return COMPACT_COMPLETE;
 
        /*
@@ -862,15 +814,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
        /* Setup to move all movable pages to the end of the zone */
        cc->migrate_pfn = zone->zone_start_pfn;
-
-       if (cc->order > 0) {
-               /* Incremental compaction. Start where the last one stopped. */
-               cc->free_pfn = zone->compact_cached_free_pfn;
-               cc->start_free_pfn = cc->free_pfn;
-       } else {
-               /* Order == -1 starts at the end of the zone. */
-               cc->free_pfn = start_free_pfn(zone);
-       }
+       cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
+       cc->free_pfn &= ~(pageblock_nr_pages-1);
 
        migrate_prep_local();
 
index 67d48651928f653267e63c9470ffcf471f3de7f0..f4674fc397cfeafc117abddb0d42ffab3b2d3a8e 100644 (file)
@@ -119,14 +119,8 @@ struct compact_control {
        unsigned long nr_freepages;     /* Number of isolated free pages */
        unsigned long nr_migratepages;  /* Number of pages to migrate */
        unsigned long free_pfn;         /* isolate_freepages search base */
-       unsigned long start_free_pfn;   /* where we started the search */
        unsigned long migrate_pfn;      /* isolate_migratepages search base */
        bool sync;                      /* Synchronous migration */
-       bool wrapped;                   /* Order > 0 compactions are
-                                          incremental, once free_pfn
-                                          and migrate_pfn meet, we restart
-                                          from the top of the zone;
-                                          remember we wrapped around. */
 
        int order;                      /* order a direct compactor needs */
        int migratetype;                /* MOVABLE, RECLAIMABLE etc */
index c7920cb5d26bd9495a9d446f632c57385209ee1d..ec3a55f34f62eb516718513cac64ca1e4dbdbcbf 100644 (file)
@@ -4489,11 +4489,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
 
                zone->spanned_pages = size;
                zone->present_pages = realsize;
-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
-               zone->compact_cached_free_pfn = zone->zone_start_pfn +
-                                               zone->spanned_pages;
-               zone->compact_cached_free_pfn &= ~(pageblock_nr_pages-1);
-#endif
 #ifdef CONFIG_NUMA
                zone->node = nid;
                zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)