]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/vmscan.c
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[karo-tx-linux.git] / mm / vmscan.c
index 71b1c29948dba30aab0a894ddc7c84eb62acde2b..dd984470248fe3a137d211be0789f8aa8446f95a 100644 (file)
@@ -195,25 +195,25 @@ static unsigned long zone_reclaimable_pages(struct zone *zone)
 {
        unsigned long nr;
 
-       nr = zone_page_state(zone, NR_ACTIVE_FILE) +
-            zone_page_state(zone, NR_INACTIVE_FILE) +
-            zone_page_state(zone, NR_ISOLATED_FILE);
+       nr = zone_page_state_snapshot(zone, NR_ACTIVE_FILE) +
+            zone_page_state_snapshot(zone, NR_INACTIVE_FILE) +
+            zone_page_state_snapshot(zone, NR_ISOLATED_FILE);
 
        if (get_nr_swap_pages() > 0)
-               nr += zone_page_state(zone, NR_ACTIVE_ANON) +
-                     zone_page_state(zone, NR_INACTIVE_ANON) +
-                     zone_page_state(zone, NR_ISOLATED_ANON);
+               nr += zone_page_state_snapshot(zone, NR_ACTIVE_ANON) +
+                     zone_page_state_snapshot(zone, NR_INACTIVE_ANON) +
+                     zone_page_state_snapshot(zone, NR_ISOLATED_ANON);
 
        return nr;
 }
 
 bool zone_reclaimable(struct zone *zone)
 {
-       return zone_page_state(zone, NR_PAGES_SCANNED) <
+       return zone_page_state_snapshot(zone, NR_PAGES_SCANNED) <
                zone_reclaimable_pages(zone) * 6;
 }
 
-static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
        if (!mem_cgroup_disabled())
                return mem_cgroup_get_lru_size(lruvec, lru);
@@ -228,14 +228,6 @@ int register_shrinker(struct shrinker *shrinker)
 {
        size_t size = sizeof(*shrinker->nr_deferred);
 
-       /*
-        * If we only have one possible node in the system anyway, save
-        * ourselves the trouble and disable NUMA aware behavior. This way we
-        * will save memory and some small loop time later.
-        */
-       if (nr_node_ids == 1)
-               shrinker->flags &= ~SHRINKER_NUMA_AWARE;
-
        if (shrinker->flags & SHRINKER_NUMA_AWARE)
                size *= nr_node_ids;
 
@@ -611,12 +603,10 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                            bool reclaimed)
 {
        unsigned long flags;
-       struct mem_cgroup *memcg;
 
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
 
-       memcg = mem_cgroup_begin_page_stat(page);
        spin_lock_irqsave(&mapping->tree_lock, flags);
        /*
         * The non racy check for a busy page.
@@ -656,7 +646,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                mem_cgroup_swapout(page, swap);
                __delete_from_swap_cache(page);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
-               mem_cgroup_end_page_stat(memcg);
                swapcache_free(swap);
        } else {
                void (*freepage)(struct page *);
@@ -682,9 +671,8 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                if (reclaimed && page_is_file_cache(page) &&
                    !mapping_exiting(mapping) && !dax_mapping(mapping))
                        shadow = workingset_eviction(mapping, page);
-               __delete_from_page_cache(page, shadow, memcg);
+               __delete_from_page_cache(page, shadow);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
-               mem_cgroup_end_page_stat(memcg);
 
                if (freepage != NULL)
                        freepage(page);
@@ -694,7 +682,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
 
 cannot_free:
        spin_unlock_irqrestore(&mapping->tree_lock, flags);
-       mem_cgroup_end_page_stat(memcg);
        return 0;
 }
 
@@ -1931,8 +1918,8 @@ static bool inactive_file_is_low(struct lruvec *lruvec)
        unsigned long inactive;
        unsigned long active;
 
-       inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE);
-       active = get_lru_size(lruvec, LRU_ACTIVE_FILE);
+       inactive = lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
+       active = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE);
 
        return active > inactive;
 }
@@ -2071,7 +2058,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
         * system is under heavy pressure.
         */
        if (!inactive_file_is_low(lruvec) &&
-           get_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
+           lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
                scan_balance = SCAN_FILE;
                goto out;
        }
@@ -2097,10 +2084,10 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
         * anon in [0], file in [1]
         */
 
-       anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
-               get_lru_size(lruvec, LRU_INACTIVE_ANON);
-       file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
-               get_lru_size(lruvec, LRU_INACTIVE_FILE);
+       anon  = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) +
+               lruvec_lru_size(lruvec, LRU_INACTIVE_ANON);
+       file  = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) +
+               lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
 
        spin_lock_irq(&zone->lru_lock);
        if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
@@ -2138,7 +2125,7 @@ out:
                        unsigned long size;
                        unsigned long scan;
 
-                       size = get_lru_size(lruvec, lru);
+                       size = lruvec_lru_size(lruvec, lru);
                        scan = size >> sc->priority;
 
                        if (!scan && pass && force_scan)