]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
memory-hotplug: fix zone stat mismatch
authorMinchan Kim <minchan@kernel.org>
Fri, 28 Sep 2012 00:19:58 +0000 (10:19 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 4 Oct 2012 05:03:36 +0000 (15:03 +1000)
During memory-hotplug, I found NR_ISOLATED_[ANON|FILE] are increasing,
causing the kernel to hang.  When the system doesn't have enough free
pages, it enters reclaim but never reclaim any pages due to
too_many_isolated()==true and loops forever.

The cause is that when we do memory-hotadd after memory-remove,
__zone_pcp_update() clears a zone's ZONE_STAT_ITEMS in setup_pageset()
although the vm_stat_diff of all CPUs still have values.

In addtion, when we offline all pages of the zone, we reset them in
zone_pcp_reset without draining so we loss some zone stat item.

Reviewed-by: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/vmstat.h
mm/page_alloc.c
mm/vmstat.c

index a5bb15018b5c4f70d8cfcdc1dad04c6517df393a..92a86b2cce33f5bda884b98c9e3ace0412ed2319 100644 (file)
@@ -198,6 +198,8 @@ extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 void refresh_cpu_vm_stats(int);
 void refresh_zone_stat_thresholds(void);
 
+void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
+
 int calculate_pressure_threshold(struct zone *zone);
 int calculate_normal_threshold(struct zone *zone);
 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
@@ -251,6 +253,8 @@ static inline void __dec_zone_page_state(struct page *page,
 static inline void refresh_cpu_vm_stats(int cpu) { }
 static inline void refresh_zone_stat_thresholds(void) { }
 
+static inline void drain_zonestat(struct zone *zone,
+                       struct per_cpu_pageset *pset) { }
 #endif         /* CONFIG_SMP */
 
 static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
index eac9c181d6bea400f5ea911f6dacb0493417cef5..6062f8c613743b5ec874b375485ebb7dc3cf0f49 100644 (file)
@@ -5914,6 +5914,7 @@ static int __meminit __zone_pcp_update(void *data)
                local_irq_save(flags);
                if (pcp->count > 0)
                        free_pcppages_bulk(zone, pcp->count, pcp);
+               drain_zonestat(zone, pset);
                setup_pageset(pset, batch);
                local_irq_restore(flags);
        }
@@ -5930,10 +5931,16 @@ void __meminit zone_pcp_update(struct zone *zone)
 void zone_pcp_reset(struct zone *zone)
 {
        unsigned long flags;
+       int cpu;
+       struct per_cpu_pageset *pset;
 
        /* avoid races with drain_pages()  */
        local_irq_save(flags);
        if (zone->pageset != &boot_pageset) {
+               for_each_online_cpu(cpu) {
+                       pset = per_cpu_ptr(zone->pageset, cpu);
+                       drain_zonestat(zone, pset);
+               }
                free_percpu(zone->pageset);
                zone->pageset = &boot_pageset;
        }
index 5044b65e15760d2d325070bb156fc9c847889f33..c30af3a2f9601f3aceec37595c39b7643dc5e68e 100644 (file)
@@ -495,6 +495,18 @@ void refresh_cpu_vm_stats(int cpu)
                        atomic_long_add(global_diff[i], &vm_stat[i]);
 }
 
+void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
+{
+       int i;
+
+       for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+               if (pset->vm_stat_diff[i]) {
+                       int v = pset->vm_stat_diff[i];
+                       pset->vm_stat_diff[i] = 0;
+                       atomic_long_add(v, &zone->vm_stat[i]);
+                       atomic_long_add(v, &vm_stat[i]);
+               }
+}
 #endif
 
 #ifdef CONFIG_NUMA