]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/compaction.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / mm / compaction.c
index 0409a4ad6ea1363611d49269ecbb5ef88afe5c87..949198d012602002ca75fd47a03845e571eca53e 100644 (file)
@@ -634,22 +634,6 @@ isolate_freepages_range(struct compact_control *cc,
        return pfn;
 }
 
-/* Update the number of anon and file isolated pages in the zone */
-static void acct_isolated(struct zone *zone, struct compact_control *cc)
-{
-       struct page *page;
-       unsigned int count[2] = { 0, };
-
-       if (list_empty(&cc->migratepages))
-               return;
-
-       list_for_each_entry(page, &cc->migratepages, lru)
-               count[!!page_is_file_cache(page)]++;
-
-       mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, count[0]);
-       mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, count[1]);
-}
-
 /* Similar to reclaim, but different enough that they don't share logic */
 static bool too_many_isolated(struct zone *zone)
 {
@@ -834,6 +818,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                    page_count(page) > page_mapcount(page))
                        goto isolate_fail;
 
+               /*
+                * Only allow to migrate anonymous pages in GFP_NOFS context
+                * because those do not depend on fs locks.
+                */
+               if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
+                       goto isolate_fail;
+
                /* If we already hold the lock, we can skip some rechecking */
                if (!locked) {
                        locked = compact_trylock_irqsave(zone_lru_lock(zone),
@@ -866,6 +857,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
                /* Successfully isolated */
                del_page_from_lru_list(page, lruvec, page_lru(page));
+               inc_node_page_state(page,
+                               NR_ISOLATED_ANON + page_is_file_cache(page));
 
 isolate_success:
                list_add(&page->lru, &cc->migratepages);
@@ -902,7 +895,6 @@ isolate_fail:
                                spin_unlock_irqrestore(zone_lru_lock(zone), flags);
                                locked = false;
                        }
-                       acct_isolated(zone, cc);
                        putback_movable_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
                        cc->last_migrated_pfn = 0;
@@ -988,7 +980,6 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
                        break;
        }
-       acct_isolated(cc->zone, cc);
 
        return pfn;
 }
@@ -1258,10 +1249,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                low_pfn = isolate_migratepages_block(cc, low_pfn,
                                                block_end_pfn, isolate_mode);
 
-               if (!low_pfn || cc->contended) {
-                       acct_isolated(zone, cc);
+               if (!low_pfn || cc->contended)
                        return ISOLATE_ABORT;
-               }
 
                /*
                 * Either we isolated something and proceed with migration. Or
@@ -1271,7 +1260,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                break;
        }
 
-       acct_isolated(zone, cc);
        /* Record where migration scanner will be restarted. */
        cc->migrate_pfn = low_pfn;
 
@@ -1696,14 +1684,16 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
                unsigned int alloc_flags, const struct alloc_context *ac,
                enum compact_priority prio)
 {
-       int may_enter_fs = gfp_mask & __GFP_FS;
        int may_perform_io = gfp_mask & __GFP_IO;
        struct zoneref *z;
        struct zone *zone;
        enum compact_result rc = COMPACT_SKIPPED;
 
-       /* Check if the GFP flags allow compaction */
-       if (!may_enter_fs || !may_perform_io)
+       /*
+        * Check if the GFP flags allow compaction - GFP_NOIO is really
+        * tricky context because the migration might require IO
+        */
+       if (!may_perform_io)
                return COMPACT_SKIPPED;
 
        trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
@@ -1770,6 +1760,7 @@ static void compact_node(int nid)
                .mode = MIGRATE_SYNC,
                .ignore_skip_hint = true,
                .whole_zone = true,
+               .gfp_mask = GFP_KERNEL,
        };
 
 
@@ -1895,6 +1886,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                .classzone_idx = pgdat->kcompactd_classzone_idx,
                .mode = MIGRATE_SYNC_LIGHT,
                .ignore_skip_hint = true,
+               .gfp_mask = GFP_KERNEL,
 
        };
        trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
@@ -2043,33 +2035,38 @@ void kcompactd_stop(int nid)
  * away, we get changed to run anywhere: as the first one comes back,
  * restore their cpu bindings.
  */
-static int cpu_callback(struct notifier_block *nfb, unsigned long action,
-                       void *hcpu)
+static int kcompactd_cpu_online(unsigned int cpu)
 {
        int nid;
 
-       if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
-               for_each_node_state(nid, N_MEMORY) {
-                       pg_data_t *pgdat = NODE_DATA(nid);
-                       const struct cpumask *mask;
+       for_each_node_state(nid, N_MEMORY) {
+               pg_data_t *pgdat = NODE_DATA(nid);
+               const struct cpumask *mask;
 
-                       mask = cpumask_of_node(pgdat->node_id);
+               mask = cpumask_of_node(pgdat->node_id);
 
-                       if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
-                               /* One of our CPUs online: restore mask */
-                               set_cpus_allowed_ptr(pgdat->kcompactd, mask);
-               }
+               if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
+                       /* One of our CPUs online: restore mask */
+                       set_cpus_allowed_ptr(pgdat->kcompactd, mask);
        }
-       return NOTIFY_OK;
+       return 0;
 }
 
 static int __init kcompactd_init(void)
 {
        int nid;
+       int ret;
+
+       ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+                                       "mm/compaction:online",
+                                       kcompactd_cpu_online, NULL);
+       if (ret < 0) {
+               pr_err("kcompactd: failed to register hotplug callbacks.\n");
+               return ret;
+       }
 
        for_each_node_state(nid, N_MEMORY)
                kcompactd_run(nid);
-       hotcpu_notifier(cpu_callback, 0);
        return 0;
 }
 subsys_initcall(kcompactd_init)