From 3345296adb7d162c05b846259970323f0b830251 Mon Sep 17 00:00:00 2001 From: Greg Thelen Date: Thu, 25 Aug 2011 09:47:43 +1000 Subject: [PATCH] Both mem_cgroup_charge_statistics() and mem_cgroup_move_account() were unnecessarily disabling preemption when adjusting per-cpu counters: preempt_disable() __this_cpu_xxx() __this_cpu_yyy() preempt_enable() This change does not disable preemption and thus CPU switch is possible within these routines. This does not cause a problem because the total of all cpu counters is summed when reporting stats. Now both mem_cgroup_charge_statistics() and mem_cgroup_move_account() look like: this_cpu_xxx() this_cpu_yyy() akpm: this is an optimisation for x86 and a deoptimisation for non-x86. The non-x86 situation will be fixed as architectures implement their atomic this_cpu_foo() operations. Signed-off-by: Greg Thelen Reported-by: KAMEZAWA Hiroyuki Acked-by: KAMEZAWA Hiroyuki Cc: Johannes Weiner Cc: Valdis Kletnieks Cc: Balbir Singh Cc: Daisuke Nishimura Signed-off-by: Andrew Morton --- mm/memcontrol.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f367fadefe2f..54b35b35ea04 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -664,26 +664,22 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, bool file, int nr_pages) { - preempt_disable(); - if (file) - __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], + this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages); else - __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], + this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_pages); /* pagein of a big page is an event. So, ignore page size */ - if (nr_pages > 0) - __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); - else { - __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); + if (nr_pages > 0) { + this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); + } else { + this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); nr_pages = -nr_pages; /* for event */ } - __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); - - preempt_enable(); + this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); } unsigned long @@ -2704,10 +2700,8 @@ static int mem_cgroup_move_account(struct page *page, if (PageCgroupFileMapped(pc)) { /* Update mapped_file data for mem_cgroup */ - preempt_disable(); - __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); - __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); - preempt_enable(); + this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); } mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages); if (uncharge) -- 2.39.5