]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Both mem_cgroup_charge_statistics() and mem_cgroup_move_account() were
authorGreg Thelen <gthelen@google.com>
Wed, 24 Aug 2011 23:47:43 +0000 (09:47 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 Sep 2011 06:19:35 +0000 (16:19 +1000)
unnecessarily disabling preemption when adjusting per-cpu counters:
    preempt_disable()
    __this_cpu_xxx()
    __this_cpu_yyy()
    preempt_enable()

This change does not disable preemption and thus CPU switch is possible
within these routines.  This does not cause a problem because the total
of all cpu counters is summed when reporting stats.  Now both
mem_cgroup_charge_statistics() and mem_cgroup_move_account() look like:
    this_cpu_xxx()
    this_cpu_yyy()

akpm: this is an optimisation for x86 and a deoptimisation for non-x86.
The non-x86 situation will be fixed as architectures implement their
atomic this_cpu_foo() operations.

Signed-off-by: Greg Thelen <gthelen@google.com>
Reported-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memcontrol.c

index f367fadefe2f723a04772a6fcb1dde7c8882990a..54b35b35ea04477748cd62104ee3e02d6a7a53b6 100644 (file)
@@ -664,26 +664,22 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
                                         bool file, int nr_pages)
 {
-       preempt_disable();
-
        if (file)
-               __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
+               this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
                                nr_pages);
        else
-               __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
+               this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
                                nr_pages);
 
        /* pagein of a big page is an event. So, ignore page size */
-       if (nr_pages > 0)
-               __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
-       else {
-               __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
+       if (nr_pages > 0) {
+               this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
+       else {
+               this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
                nr_pages = -nr_pages; /* for event */
        }
 
-       __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
-
-       preempt_enable();
+       this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
 }
 
 unsigned long
@@ -2704,10 +2700,8 @@ static int mem_cgroup_move_account(struct page *page,
 
        if (PageCgroupFileMapped(pc)) {
                /* Update mapped_file data for mem_cgroup */
-               preempt_disable();
-               __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
-               __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
-               preempt_enable();
+               this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
+               this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
        }
        mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
        if (uncharge)