From: Minchan Kim Date: Wed, 26 Sep 2012 01:34:00 +0000 (+1000) Subject: memory-hotplug: fix zone stat mismatch X-Git-Tag: next-20120927~1^2~196 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=75e793e5af05b6439875537274de0018a549ac37;p=karo-tx-linux.git memory-hotplug: fix zone stat mismatch During memory-hotplug, I found NR_ISOLATED_[ANON|FILE] are increasing, causing the kernel to hang. When the system doesn't have enough free pages, it enters reclaim but never reclaim any pages due to too_many_isolated()==true and loops forever. The cause is that when we do memory-hotadd after memory-remove, __zone_pcp_update() clears a zone's ZONE_STAT_ITEMS in setup_pageset() although the vm_stat_diff of all CPUs still have values. In addtion, when we offline all pages of the zone, we reset them in zone_pcp_reset without draining so we loss some zone stat item. Reviewed-by: Wen Congyang Signed-off-by: Minchan Kim Cc: Kamezawa Hiroyuki Cc: Yasuaki Ishimatsu Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton --- diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index a5bb15018b5c..92a86b2cce33 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -198,6 +198,8 @@ extern void __dec_zone_state(struct zone *, enum zone_stat_item); void refresh_cpu_vm_stats(int); void refresh_zone_stat_thresholds(void); +void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); + int calculate_pressure_threshold(struct zone *zone); int calculate_normal_threshold(struct zone *zone); void set_pgdat_percpu_threshold(pg_data_t *pgdat, @@ -251,6 +253,8 @@ static inline void __dec_zone_page_state(struct page *page, static inline void refresh_cpu_vm_stats(int cpu) { } static inline void refresh_zone_stat_thresholds(void) { } +static inline void drain_zonestat(struct zone *zone, + struct per_cpu_pageset *pset) { } #endif /* CONFIG_SMP */ static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e980363d0b5c..d976957cf61b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5905,6 +5905,7 @@ static int __meminit __zone_pcp_update(void *data) local_irq_save(flags); if (pcp->count > 0) free_pcppages_bulk(zone, pcp->count, pcp); + drain_zonestat(zone, pset); setup_pageset(pset, batch); local_irq_restore(flags); } @@ -5921,10 +5922,16 @@ void __meminit zone_pcp_update(struct zone *zone) void zone_pcp_reset(struct zone *zone) { unsigned long flags; + int cpu; + struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ local_irq_save(flags); if (zone->pageset != &boot_pageset) { + for_each_online_cpu(cpu) { + pset = per_cpu_ptr(zone->pageset, cpu); + drain_zonestat(zone, pset); + } free_percpu(zone->pageset); zone->pageset = &boot_pageset; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 5044b65e1576..c30af3a2f960 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -495,6 +495,18 @@ void refresh_cpu_vm_stats(int cpu) atomic_long_add(global_diff[i], &vm_stat[i]); } +void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) +{ + int i; + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + if (pset->vm_stat_diff[i]) { + int v = pset->vm_stat_diff[i]; + pset->vm_stat_diff[i] = 0; + atomic_long_add(v, &zone->vm_stat[i]); + atomic_long_add(v, &vm_stat[i]); + } +} #endif #ifdef CONFIG_NUMA