]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/page_alloc.c
fddi: 64 bit bug in smt_add_para()
[karo-tx-linux.git] / mm / page_alloc.c
index f9d925451bfd31d6e6382997f5eb10789ef8da1a..c66fb875104ab99feccb0e116b48b9d54a6011e4 100644 (file)
@@ -1928,6 +1928,17 @@ this_zone_full:
                zlc_active = 0;
                goto zonelist_scan;
        }
+
+       if (page)
+               /*
+                * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
+                * necessary to allocate the page. The expectation is
+                * that the caller is taking steps that will free more
+                * memory. The caller should avoid the page being used
+                * for !PFMEMALLOC purposes.
+                */
+               page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
+
        return page;
 }
 
@@ -2091,7 +2102,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
        int migratetype, bool sync_migration,
-       bool *deferred_compaction,
+       bool *contended_compaction, bool *deferred_compaction,
        unsigned long *did_some_progress)
 {
        struct page *page;
@@ -2106,7 +2117,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 
        current->flags |= PF_MEMALLOC;
        *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
-                                               nodemask, sync_migration);
+                                               nodemask, sync_migration,
+                                               contended_compaction);
        current->flags &= ~PF_MEMALLOC;
        if (*did_some_progress != COMPACT_SKIPPED) {
 
@@ -2152,7 +2164,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
        int migratetype, bool sync_migration,
-       bool *deferred_compaction,
+       bool *contended_compaction, bool *deferred_compaction,
        unsigned long *did_some_progress)
 {
        return NULL;
@@ -2325,6 +2337,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        unsigned long did_some_progress;
        bool sync_migration = false;
        bool deferred_compaction = false;
+       bool contended_compaction = false;
 
        /*
         * In the slowpath, we sanity check order to avoid ever trying to
@@ -2378,18 +2391,17 @@ rebalance:
 
        /* Allocate without watermarks if the context allows */
        if (alloc_flags & ALLOC_NO_WATERMARKS) {
+               /*
+                * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
+                * the allocation is high priority and these type of
+                * allocations are system rather than user orientated
+                */
+               zonelist = node_zonelist(numa_node_id(), gfp_mask);
+
                page = __alloc_pages_high_priority(gfp_mask, order,
                                zonelist, high_zoneidx, nodemask,
                                preferred_zone, migratetype);
                if (page) {
-                       /*
-                        * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
-                        * necessary to allocate the page. The expectation is
-                        * that the caller is taking steps that will free more
-                        * memory. The caller should avoid the page being used
-                        * for !PFMEMALLOC purposes.
-                        */
-                       page->pfmemalloc = true;
                        goto got_pg;
                }
        }
@@ -2415,6 +2427,7 @@ rebalance:
                                        nodemask,
                                        alloc_flags, preferred_zone,
                                        migratetype, sync_migration,
+                                       &contended_compaction,
                                        &deferred_compaction,
                                        &did_some_progress);
        if (page)
@@ -2424,10 +2437,11 @@ rebalance:
        /*
         * If compaction is deferred for high-order allocations, it is because
         * sync compaction recently failed. In this is the case and the caller
-        * has requested the system not be heavily disrupted, fail the
-        * allocation now instead of entering direct reclaim
+        * requested a movable allocation that does not heavily disrupt the
+        * system then fail the allocation instead of entering direct reclaim.
         */
-       if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
+       if ((deferred_compaction || contended_compaction) &&
+                                               (gfp_mask & __GFP_NO_KSWAPD))
                goto nopage;
 
        /* Try direct reclaim and then allocating */
@@ -2498,6 +2512,7 @@ rebalance:
                                        nodemask,
                                        alloc_flags, preferred_zone,
                                        migratetype, sync_migration,
+                                       &contended_compaction,
                                        &deferred_compaction,
                                        &did_some_progress);
                if (page)
@@ -2562,8 +2577,6 @@ retry_cpuset:
                page = __alloc_pages_slowpath(gfp_mask, order,
                                zonelist, high_zoneidx, nodemask,
                                preferred_zone, migratetype);
-       else
-               page->pfmemalloc = false;
 
        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
 
@@ -4370,6 +4383,8 @@ void __init set_pageblock_order(void)
  *   - mark all pages reserved
  *   - mark all memory queues empty
  *   - clear the memory bitmaps
+ *
+ * NOTE: pgdat should get zeroed by caller.
  */
 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                unsigned long *zones_size, unsigned long *zholes_size)
@@ -4380,9 +4395,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
        int ret;
 
        pgdat_resize_init(pgdat);
-       pgdat->nr_zones = 0;
        init_waitqueue_head(&pgdat->kswapd_wait);
-       pgdat->kswapd_max_order = 0;
+       init_waitqueue_head(&pgdat->pfmemalloc_wait);
        pgdat_page_cgroup_init(pgdat);
 
        for (j = 0; j < MAX_NR_ZONES; j++) {
@@ -4443,11 +4457,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
 
                zone_pcp_init(zone);
                lruvec_init(&zone->lruvec, zone);
-               zap_zone_vm_stats(zone);
-               zone->flags = 0;
-#ifdef CONFIG_MEMORY_ISOLATION
-               zone->nr_pageblock_isolate = 0;
-#endif
                if (!size)
                        continue;
 
@@ -4507,6 +4516,9 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
 {
        pg_data_t *pgdat = NODE_DATA(nid);
 
+       /* pg_data_t should be reset to zero when it's allocated */
+       WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
+
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
        calculate_node_totalpages(pgdat, zones_size, zholes_size);