From 18e08acfc18dd31cfba493e643c3ae762c88523e Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Fri, 28 Sep 2012 10:19:54 +1000 Subject: [PATCH] mm, numa: reclaim from all nodes within reclaim distance RECLAIM_DISTANCE represents the distance between nodes at which it is deemed too costly to allocate from; it's preferred to try to reclaim from a local zone before falling back to allocating on a remote node with such a distance. To do this, zone_reclaim_mode is set if the distance between any two nodes on the system is greather than this distance. This, however, ends up causing the page allocator to reclaim from every zone regardless of its affinity. What we really want is to reclaim only from zones that are closer than RECLAIM_DISTANCE. This patch adds a nodemask to each node that represents the set of nodes that are within this distance. During the zone iteration, if the bit for a zone's node is set for the local node, then reclaim is attempted; otherwise, the zone is skipped. Signed-off-by: David Rientjes Cc: Mel Gorman Cc: Minchan Kim Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 1 + mm/page_alloc.c | 31 ++++++++++++++++++++----------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 75cba17a618c..59067c128bee 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -708,6 +708,7 @@ typedef struct pglist_data { unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; + nodemask_t reclaim_nodes; /* Nodes allowed to reclaim from */ wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d60289b080e7..cc253095d914 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1798,6 +1798,11 @@ static void zlc_clear_zones_full(struct zonelist *zonelist) bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); } +static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) +{ + return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes); +} + #else /* CONFIG_NUMA */ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) @@ -1818,6 +1823,11 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) static void zlc_clear_zones_full(struct zonelist *zonelist) { } + +static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) +{ + return true; +} #endif /* CONFIG_NUMA */ /* @@ -1902,7 +1912,8 @@ zonelist_scan: did_zlc_setup = 1; } - if (zone_reclaim_mode == 0) + if (zone_reclaim_mode == 0 || + !zone_allows_reclaim(preferred_zone, zone)) goto this_zone_full; /* @@ -3363,21 +3374,13 @@ static void build_zonelists(pg_data_t *pgdat) j = 0; while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { - int distance = node_distance(local_node, node); - - /* - * If another node is sufficiently far away then it is better - * to reclaim pages in a zone before going off node. - */ - if (distance > RECLAIM_DISTANCE) - zone_reclaim_mode = 1; - /* * We don't want to pressure a particular node. * So adding penalty to the first node in same * distance group to make it round-robin. */ - if (distance != node_distance(local_node, prev_node)) + if (node_distance(local_node, node) != + node_distance(local_node, prev_node)) node_load[node] = load; prev_node = node; @@ -4545,12 +4548,18 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, unsigned long node_start_pfn, unsigned long *zholes_size) { pg_data_t *pgdat = NODE_DATA(nid); + int i; /* pg_data_t should be reset to zero when it's allocated */ WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); pgdat->node_id = nid; pgdat->node_start_pfn = node_start_pfn; + for_each_online_node(i) + if (node_distance(nid, i) <= RECLAIM_DISTANCE) { + node_set(i, pgdat->reclaim_nodes); + zone_reclaim_mode = 1; + } calculate_node_totalpages(pgdat, zones_size, zholes_size); alloc_node_mem_map(pgdat); -- 2.39.5