]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/compaction.c
mm: migration: allow migration to operate asynchronously and avoid synchronous compac...
[mv-sheeva.git] / mm / compaction.c
index 8fe917ec7c11be1b8ebd18ef4f2374d73677d8e7..47fca1069343c82f448b8e56db7237d8843eb8eb 100644 (file)
@@ -33,6 +33,7 @@ struct compact_control {
        unsigned long nr_migratepages;  /* Number of pages to migrate */
        unsigned long free_pfn;         /* isolate_freepages search base */
        unsigned long migrate_pfn;      /* isolate_migratepages search base */
+       bool sync;                      /* Synchronous migration */
 
        /* Account for isolated anon and file pages */
        unsigned long nr_anon;
@@ -455,7 +456,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 
                nr_migrate = cc->nr_migratepages;
                migrate_pages(&cc->migratepages, compaction_alloc,
-                                               (unsigned long)cc, 0);
+                               (unsigned long)cc, 0,
+                               cc->sync);
                update_nr_listpages(cc);
                nr_remaining = cc->nr_migratepages;
 
@@ -482,7 +484,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
 }
 
 unsigned long compact_zone_order(struct zone *zone,
-                                               int order, gfp_t gfp_mask)
+                                               int order, gfp_t gfp_mask,
+                                               bool sync)
 {
        struct compact_control cc = {
                .nr_freepages = 0,
@@ -490,6 +493,7 @@ unsigned long compact_zone_order(struct zone *zone,
                .order = order,
                .migratetype = allocflags_to_migratetype(gfp_mask),
                .zone = zone,
+               .sync = sync,
        };
        INIT_LIST_HEAD(&cc.freepages);
        INIT_LIST_HEAD(&cc.migratepages);
@@ -505,11 +509,13 @@ int sysctl_extfrag_threshold = 500;
  * @order: The order of the current allocation
  * @gfp_mask: The GFP mask of the current allocation
  * @nodemask: The allowed nodes to allocate from
+ * @sync: Whether migration is synchronous or not
  *
  * This is the main entry point for direct page compaction.
  */
 unsigned long try_to_compact_pages(struct zonelist *zonelist,
-                       int order, gfp_t gfp_mask, nodemask_t *nodemask)
+                       int order, gfp_t gfp_mask, nodemask_t *nodemask,
+                       bool sync)
 {
        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        int may_enter_fs = gfp_mask & __GFP_FS;
@@ -533,7 +539,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
                                                                nodemask) {
                int status;
 
-               status = compact_zone_order(zone, order, gfp_mask);
+               status = compact_zone_order(zone, order, gfp_mask, sync);
                rc = max(status, rc);
 
                /* If a normal allocation would succeed, stop compacting */