]> git.karo-electronics.de Git - linux-beck.git/blobdiff - mm/compaction.c
Merge branch 'for-3.20/bdi' of git://git.kernel.dk/linux-block
[linux-beck.git] / mm / compaction.c
index 66f7c365e888936a1c0ebae846c20d927d8a0d2d..b68736c8a1ce0d5c74bd8ebfa78b502c5be37cf6 100644 (file)
@@ -41,6 +41,8 @@ static const char *const compaction_status_string[] = {
        "continue",
        "partial",
        "complete",
+       "no_suitable_page",
+       "not_suitable_zone",
 };
 #endif
 
@@ -122,6 +124,77 @@ static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
 }
 
 #ifdef CONFIG_COMPACTION
+
+/* Do not skip compaction more than 64 times */
+#define COMPACT_MAX_DEFER_SHIFT 6
+
+/*
+ * Compaction is deferred when compaction fails to result in a page
+ * allocation success. 1 << compact_defer_limit compactions are skipped up
+ * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
+ */
+void defer_compaction(struct zone *zone, int order)
+{
+       zone->compact_considered = 0;
+       zone->compact_defer_shift++;
+
+       if (order < zone->compact_order_failed)
+               zone->compact_order_failed = order;
+
+       if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
+               zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
+
+       trace_mm_compaction_defer_compaction(zone, order);
+}
+
+/* Returns true if compaction should be skipped this time */
+bool compaction_deferred(struct zone *zone, int order)
+{
+       unsigned long defer_limit = 1UL << zone->compact_defer_shift;
+
+       if (order < zone->compact_order_failed)
+               return false;
+
+       /* Avoid possible overflow */
+       if (++zone->compact_considered > defer_limit)
+               zone->compact_considered = defer_limit;
+
+       if (zone->compact_considered >= defer_limit)
+               return false;
+
+       trace_mm_compaction_deferred(zone, order);
+
+       return true;
+}
+
+/*
+ * Update defer tracking counters after successful compaction of given order,
+ * which means an allocation either succeeded (alloc_success == true) or is
+ * expected to succeed.
+ */
+void compaction_defer_reset(struct zone *zone, int order,
+               bool alloc_success)
+{
+       if (alloc_success) {
+               zone->compact_considered = 0;
+               zone->compact_defer_shift = 0;
+       }
+       if (order >= zone->compact_order_failed)
+               zone->compact_order_failed = order + 1;
+
+       trace_mm_compaction_defer_reset(zone, order);
+}
+
+/* Returns true if restarting compaction after many failures */
+bool compaction_restarting(struct zone *zone, int order)
+{
+       if (order < zone->compact_order_failed)
+               return false;
+
+       return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
+               zone->compact_considered >= 1UL << zone->compact_defer_shift;
+}
+
 /* Returns true if the pageblock should be scanned for pages to isolate. */
 static inline bool isolation_suitable(struct compact_control *cc,
                                        struct page *page)
@@ -430,11 +503,12 @@ isolate_fail:
 
        }
 
+       trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
+                                       nr_scanned, total_isolated);
+
        /* Record how far we have got within the block */
        *start_pfn = blockpfn;
 
-       trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
-
        /*
         * If strict isolation is requested by CMA then check that all the
         * pages requested were isolated. If there were any failures, 0 is
@@ -590,6 +664,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
        unsigned long flags = 0;
        bool locked = false;
        struct page *page = NULL, *valid_page = NULL;
+       unsigned long start_pfn = low_pfn;
 
        /*
         * Ensure that there are not too many pages isolated from the LRU
@@ -750,7 +825,8 @@ isolate_success:
        if (low_pfn == end_pfn)
                update_pageblock_skip(cc, valid_page, nr_isolated, true);
 
-       trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
+       trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
+                                               nr_scanned, nr_isolated);
 
        count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
        if (nr_isolated)
@@ -1046,7 +1122,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
        return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
 }
 
-static int compact_finished(struct zone *zone, struct compact_control *cc,
+static int __compact_finished(struct zone *zone, struct compact_control *cc,
                            const int migratetype)
 {
        unsigned int order;
@@ -1101,7 +1177,20 @@ static int compact_finished(struct zone *zone, struct compact_control *cc,
                        return COMPACT_PARTIAL;
        }
 
-       return COMPACT_CONTINUE;
+       return COMPACT_NO_SUITABLE_PAGE;
+}
+
+static int compact_finished(struct zone *zone, struct compact_control *cc,
+                           const int migratetype)
+{
+       int ret;
+
+       ret = __compact_finished(zone, cc, migratetype);
+       trace_mm_compaction_finished(zone, cc->order, ret);
+       if (ret == COMPACT_NO_SUITABLE_PAGE)
+               ret = COMPACT_CONTINUE;
+
+       return ret;
 }
 
 /*
@@ -1111,7 +1200,7 @@ static int compact_finished(struct zone *zone, struct compact_control *cc,
  *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
  *   COMPACT_CONTINUE - If compaction should run now
  */
-unsigned long compaction_suitable(struct zone *zone, int order,
+static unsigned long __compaction_suitable(struct zone *zone, int order,
                                        int alloc_flags, int classzone_idx)
 {
        int fragindex;
@@ -1155,11 +1244,24 @@ unsigned long compaction_suitable(struct zone *zone, int order,
         */
        fragindex = fragmentation_index(zone, order);
        if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
-               return COMPACT_SKIPPED;
+               return COMPACT_NOT_SUITABLE_ZONE;
 
        return COMPACT_CONTINUE;
 }
 
+unsigned long compaction_suitable(struct zone *zone, int order,
+                                       int alloc_flags, int classzone_idx)
+{
+       unsigned long ret;
+
+       ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
+       trace_mm_compaction_suitable(zone, order, ret);
+       if (ret == COMPACT_NOT_SUITABLE_ZONE)
+               ret = COMPACT_SKIPPED;
+
+       return ret;
+}
+
 static int compact_zone(struct zone *zone, struct compact_control *cc)
 {
        int ret;
@@ -1373,6 +1475,8 @@ unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
        if (!order || !may_enter_fs || !may_perform_io)
                return COMPACT_SKIPPED;
 
+       trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);
+
        /* Compact each zone in the list */
        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
                                                                ac->nodemask) {