]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/vmscan.c
per-zone and reclaim enhancements for memory controller: calculate active/inactive...
[karo-tx-linux.git] / mm / vmscan.c
index b7d868cbca0950706284de7b336194341c44e172..be4dfe87be03eef25206710409d9ae4e44143975 100644 (file)
@@ -70,6 +70,13 @@ struct scan_control {
 
        int order;
 
+       /*
+        * Pages that have (or should have) IO pending.  If we run into
+        * a lot of these, we're better off waiting a little for IO to
+        * finish rather than scanning more pages in the VM.
+        */
+       int nr_io_pages;
+
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
 
@@ -119,6 +126,12 @@ long vm_total_pages;       /* The total number of pages which the VM controls */
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
+#ifdef CONFIG_CGROUP_MEM_CONT
+#define scan_global_lru(sc)    (!(sc)->mem_cgroup)
+#else
+#define scan_global_lru(sc)    (1)
+#endif
+
 /*
  * Add a shrinker callback to be called from the vm
  */
@@ -499,8 +512,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                         */
                        if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
                                wait_on_page_writeback(page);
-                       else
+                       else {
+                               sc->nr_io_pages++;
                                goto keep_locked;
+                       }
                }
 
                referenced = page_referenced(page, 1, sc->mem_cgroup);
@@ -539,8 +554,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (PageDirty(page)) {
                        if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
                                goto keep_locked;
-                       if (!may_enter_fs)
+                       if (!may_enter_fs) {
+                               sc->nr_io_pages++;
                                goto keep_locked;
+                       }
                        if (!sc->may_writepage)
                                goto keep_locked;
 
@@ -551,8 +568,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        case PAGE_ACTIVATE:
                                goto activate_locked;
                        case PAGE_SUCCESS:
-                               if (PageWriteback(page) || PageDirty(page))
+                               if (PageWriteback(page) || PageDirty(page)) {
+                                       sc->nr_io_pages++;
                                        goto keep;
+                               }
                                /*
                                 * A synchronous write - probably a ramdisk.  Go
                                 * ahead and try to reclaim the page.
@@ -1259,6 +1278,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                sc->nr_scanned = 0;
+               sc->nr_io_pages = 0;
                if (!priority)
                        disable_swap_token();
                nr_reclaimed += shrink_zones(priority, zones, sc);
@@ -1266,11 +1286,12 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
                 */
-               if (sc->mem_cgroup == NULL)
+               if (scan_global_lru(sc)) {
                        shrink_slab(sc->nr_scanned, gfp_mask, lru_pages);
-               if (reclaim_state) {
-                       nr_reclaimed += reclaim_state->reclaimed_slab;
-                       reclaim_state->reclaimed_slab = 0;
+                       if (reclaim_state) {
+                               nr_reclaimed += reclaim_state->reclaimed_slab;
+                               reclaim_state->reclaimed_slab = 0;
+                       }
                }
                total_scanned += sc->nr_scanned;
                if (nr_reclaimed >= sc->swap_cluster_max) {
@@ -1292,11 +1313,12 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
                }
 
                /* Take a nap, wait for some writeback to complete */
-               if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
+               if (sc->nr_scanned && priority < DEF_PRIORITY - 2 &&
+                               sc->nr_io_pages > sc->swap_cluster_max)
                        congestion_wait(WRITE, HZ/10);
        }
        /* top priority shrink_caches still had more to do? don't OOM, then */
-       if (!sc->all_unreclaimable && sc->mem_cgroup == NULL)
+       if (!sc->all_unreclaimable && scan_global_lru(sc))
                ret = 1;
 out:
        /*
@@ -1350,15 +1372,12 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .mem_cgroup = mem_cont,
                .isolate_pages = mem_cgroup_isolate_pages,
        };
-       int node;
        struct zone **zones;
        int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE);
 
-       for_each_online_node(node) {
-               zones = NODE_DATA(node)->node_zonelists[target_zone].zones;
-               if (do_try_to_free_pages(zones, sc.gfp_mask, &sc))
-                       return 1;
-       }
+       zones = NODE_DATA(numa_node_id())->node_zonelists[target_zone].zones;
+       if (do_try_to_free_pages(zones, sc.gfp_mask, &sc))
+               return 1;
        return 0;
 }
 #endif
@@ -1424,6 +1443,7 @@ loop_again:
                if (!priority)
                        disable_swap_token();
 
+               sc.nr_io_pages = 0;
                all_zones_ok = 1;
 
                /*
@@ -1516,7 +1536,8 @@ loop_again:
                 * OK, kswapd is getting into trouble.  Take a nap, then take
                 * another pass across the zones.
                 */
-               if (total_scanned && priority < DEF_PRIORITY - 2)
+               if (total_scanned && priority < DEF_PRIORITY - 2 &&
+                                       sc.nr_io_pages > sc.swap_cluster_max)
                        congestion_wait(WRITE, HZ/10);
 
                /*