]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/vmscan.c
ARM: EXYNOS: Clear SYS_WDTRESET bit to use watchdog reset
[karo-tx-linux.git] / mm / vmscan.c
index 1d251b5b0a06cda3f33791699ba1823ec7068cd8..eeb3bc9d1d361b6f20821073485f1b8e7c4931d3 100644 (file)
@@ -94,11 +94,6 @@ struct scan_control {
        nodemask_t      *nodemask;
 };
 
-struct mem_cgroup_zone {
-       struct mem_cgroup *mem_cgroup;
-       struct zone *zone;
-};
-
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
 
 #ifdef ARCH_HAS_PREFETCH
@@ -150,10 +145,10 @@ static bool global_reclaim(struct scan_control *sc)
 }
 #endif
 
-static unsigned long get_lruvec_size(struct lruvec *lruvec, enum lru_list lru)
+static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
        if (!mem_cgroup_disabled())
-               return mem_cgroup_get_lruvec_size(lruvec, lru);
+               return mem_cgroup_get_lru_size(lruvec, lru);
 
        return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
 }
@@ -1030,15 +1025,13 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                unsigned long *nr_scanned, struct scan_control *sc,
                isolate_mode_t mode, enum lru_list lru)
 {
-       struct list_head *src;
+       struct list_head *src = &lruvec->lists[lru];
        unsigned long nr_taken = 0;
        unsigned long scan;
-       int file = is_file_lru(lru);
-
-       src = &lruvec->lists[lru];
 
        for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
                struct page *page;
+               int nr_pages;
 
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
@@ -1047,9 +1040,10 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
                switch (__isolate_lru_page(page, mode)) {
                case 0:
-                       mem_cgroup_lru_del_list(page, lru);
+                       nr_pages = hpage_nr_pages(page);
+                       mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
                        list_move(&page->lru, dst);
-                       nr_taken += hpage_nr_pages(page);
+                       nr_taken += nr_pages;
                        break;
 
                case -EBUSY:
@@ -1063,11 +1057,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
        }
 
        *nr_scanned = scan;
-
-       trace_mm_vmscan_lru_isolate(sc->order,
-                       nr_to_scan, scan,
-                       nr_taken,
-                       mode, file);
+       trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
+                                   nr_taken, mode, is_file_lru(lru));
        return nr_taken;
 }
 
@@ -1104,15 +1095,16 @@ int isolate_lru_page(struct page *page)
 
        if (PageLRU(page)) {
                struct zone *zone = page_zone(page);
+               struct lruvec *lruvec;
 
                spin_lock_irq(&zone->lru_lock);
+               lruvec = mem_cgroup_page_lruvec(page, zone);
                if (PageLRU(page)) {
                        int lru = page_lru(page);
-                       ret = 0;
                        get_page(page);
                        ClearPageLRU(page);
-
-                       del_page_from_lru_list(zone, page, lru);
+                       del_page_from_lru_list(page, lruvec, lru);
+                       ret = 0;
                }
                spin_unlock_irq(&zone->lru_lock);
        }
@@ -1145,8 +1137,7 @@ static int too_many_isolated(struct zone *zone, int file,
 }
 
 static noinline_for_stack void
-putback_inactive_pages(struct lruvec *lruvec,
-                      struct list_head *page_list)
+putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
 {
        struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
        struct zone *zone = lruvec_zone(lruvec);
@@ -1167,9 +1158,13 @@ putback_inactive_pages(struct lruvec *lruvec,
                        spin_lock_irq(&zone->lru_lock);
                        continue;
                }
+
+               lruvec = mem_cgroup_page_lruvec(page, zone);
+
                SetPageLRU(page);
                lru = page_lru(page);
-               add_page_to_lru_list(zone, page, lru);
+               add_page_to_lru_list(page, lruvec, lru);
+
                if (is_active_lru(lru)) {
                        int file = is_file_lru(lru);
                        int numpages = hpage_nr_pages(page);
@@ -1178,7 +1173,7 @@ putback_inactive_pages(struct lruvec *lruvec,
                if (put_page_testzero(page)) {
                        __ClearPageLRU(page);
                        __ClearPageActive(page);
-                       del_page_from_lru_list(zone, page, lru);
+                       del_page_from_lru_list(page, lruvec, lru);
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&zone->lru_lock);
@@ -1240,11 +1235,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        if (global_reclaim(sc)) {
                zone->pages_scanned += nr_scanned;
                if (current_is_kswapd())
-                       __count_zone_vm_events(PGSCAN_KSWAPD, zone,
-                                              nr_scanned);
+                       __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
                else
-                       __count_zone_vm_events(PGSCAN_DIRECT, zone,
-                                              nr_scanned);
+                       __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
        }
        spin_unlock_irq(&zone->lru_lock);
 
@@ -1328,30 +1321,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
  * But we had to alter page->flags anyway.
  */
 
-static void move_active_pages_to_lru(struct zone *zone,
+static void move_active_pages_to_lru(struct lruvec *lruvec,
                                     struct list_head *list,
                                     struct list_head *pages_to_free,
                                     enum lru_list lru)
 {
+       struct zone *zone = lruvec_zone(lruvec);
        unsigned long pgmoved = 0;
        struct page *page;
+       int nr_pages;
 
        while (!list_empty(list)) {
-               struct lruvec *lruvec;
-
                page = lru_to_page(list);
+               lruvec = mem_cgroup_page_lruvec(page, zone);
 
                VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
 
-               lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+               nr_pages = hpage_nr_pages(page);
+               mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
                list_move(&page->lru, &lruvec->lists[lru]);
-               pgmoved += hpage_nr_pages(page);
+               pgmoved += nr_pages;
 
                if (put_page_testzero(page)) {
                        __ClearPageLRU(page);
                        __ClearPageActive(page);
-                       del_page_from_lru_list(zone, page, lru);
+                       del_page_from_lru_list(page, lruvec, lru);
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&zone->lru_lock);
@@ -1457,8 +1452,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
         */
        reclaim_stat->recent_rotated[file] += nr_rotated;
 
-       move_active_pages_to_lru(zone, &l_active, &l_hold, lru);
-       move_active_pages_to_lru(zone, &l_inactive, &l_hold, lru - LRU_ACTIVE);
+       move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
+       move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
        __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 
@@ -1539,9 +1534,9 @@ static int inactive_file_is_low(struct lruvec *lruvec)
        return inactive_file_is_low_global(lruvec_zone(lruvec));
 }
 
-static int inactive_list_is_low(struct lruvec *lruvec, int file)
+static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
 {
-       if (file)
+       if (is_file_lru(lru))
                return inactive_file_is_low(lruvec);
        else
                return inactive_anon_is_low(lruvec);
@@ -1550,10 +1545,8 @@ static int inactive_list_is_low(struct lruvec *lruvec, int file)
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
                                 struct lruvec *lruvec, struct scan_control *sc)
 {
-       int file = is_file_lru(lru);
-
        if (is_active_lru(lru)) {
-               if (inactive_list_is_low(lruvec, file))
+               if (inactive_list_is_low(lruvec, lru))
                        shrink_active_list(nr_to_scan, lruvec, sc, lru);
                return 0;
        }
@@ -1613,10 +1606,10 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
                goto out;
        }
 
-       anon  = get_lruvec_size(lruvec, LRU_ACTIVE_ANON) +
-               get_lruvec_size(lruvec, LRU_INACTIVE_ANON);
-       file  = get_lruvec_size(lruvec, LRU_ACTIVE_FILE) +
-               get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
+       anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
+               get_lru_size(lruvec, LRU_INACTIVE_ANON);
+       file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
+               get_lru_size(lruvec, LRU_INACTIVE_FILE);
 
        if (global_reclaim(sc)) {
                free  = zone_page_state(zone, NR_FREE_PAGES);
@@ -1635,7 +1628,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
         * This scanning priority is essentially the inverse of IO cost.
         */
        anon_prio = vmscan_swappiness(sc);
-       file_prio = 200 - vmscan_swappiness(sc);
+       file_prio = 200 - anon_prio;
 
        /*
         * OK, so we have swap space and a fair amount of page cache
@@ -1679,7 +1672,7 @@ out:
                int file = is_file_lru(lru);
                unsigned long scan;
 
-               scan = get_lruvec_size(lruvec, lru);
+               scan = get_lru_size(lruvec, lru);
                if (sc->priority || noswap || !vmscan_swappiness(sc)) {
                        scan >>= sc->priority;
                        if (!scan && force_scan)
@@ -1748,10 +1741,9 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec,
         * inactive lists are large enough, continue reclaiming
         */
        pages_for_compaction = (2UL << sc->order);
-       inactive_lru_pages = get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
+       inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
        if (nr_swap_pages > 0)
-               inactive_lru_pages += get_lruvec_size(lruvec,
-                                                     LRU_INACTIVE_ANON);
+               inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
        if (sc->nr_reclaimed < pages_for_compaction &&
                        inactive_lru_pages > pages_for_compaction)
                return true;
@@ -1769,8 +1761,7 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec,
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
-static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz,
-                                  struct scan_control *sc)
+static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
 {
        unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
@@ -1778,9 +1769,6 @@ static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz,
        unsigned long nr_reclaimed, nr_scanned;
        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
        struct blk_plug plug;
-       struct lruvec *lruvec;
-
-       lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
 
 restart:
        nr_reclaimed = 0;
@@ -1842,12 +1830,10 @@ static void shrink_zone(struct zone *zone, struct scan_control *sc)
 
        memcg = mem_cgroup_iter(root, NULL, &reclaim);
        do {
-               struct mem_cgroup_zone mz = {
-                       .mem_cgroup = memcg,
-                       .zone = zone,
-               };
+               struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+
+               shrink_lruvec(lruvec, sc);
 
-               shrink_mem_cgroup_zone(&mz, sc);
                /*
                 * Limit reclaim has historically picked one memcg and
                 * scanned it with decreasing priority levels until
@@ -2172,10 +2158,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
                .priority = 0,
                .target_mem_cgroup = memcg,
        };
-       struct mem_cgroup_zone mz = {
-               .mem_cgroup = memcg,
-               .zone = zone,
-       };
+       struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
 
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -2191,7 +2174,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
         * will pick up pages from other mem cgroup's as well. We hack
         * the priority and make it zero.
         */
-       shrink_mem_cgroup_zone(&mz, &sc);
+       shrink_lruvec(lruvec, &sc);
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
@@ -3263,6 +3246,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
                        zone = pagezone;
                        spin_lock_irq(&zone->lru_lock);
                }
+               lruvec = mem_cgroup_page_lruvec(page, zone);
 
                if (!PageLRU(page) || !PageUnevictable(page))
                        continue;
@@ -3272,11 +3256,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
 
                        VM_BUG_ON(PageActive(page));
                        ClearPageUnevictable(page);
-                       __dec_zone_state(zone, NR_UNEVICTABLE);
-                       lruvec = mem_cgroup_lru_move_lists(zone, page,
-                                               LRU_UNEVICTABLE, lru);
-                       list_move(&page->lru, &lruvec->lists[lru]);
-                       __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
+                       del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
+                       add_page_to_lru_list(page, lruvec, lru);
                        pgrescued++;
                }
        }