]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/vmscan.c
Workloads that are allocating frequently and writing files place a large
[karo-tx-linux.git] / mm / vmscan.c
index fd58e9756dffb20a4420c014de60c8f0ee7ae9d0..6e805feb95da05ecee48480c41c510da84d981ef 100644 (file)
@@ -752,7 +752,9 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
 static unsigned long shrink_page_list(struct list_head *page_list,
                                      struct zone *zone,
                                      struct scan_control *sc,
-                                     int priority)
+                                     int priority,
+                                     unsigned long *ret_nr_dirty,
+                                     unsigned long *ret_nr_writeback)
 {
        LIST_HEAD(ret_pages);
        LIST_HEAD(free_pages);
@@ -760,6 +762,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
        unsigned long nr_dirty = 0;
        unsigned long nr_congested = 0;
        unsigned long nr_reclaimed = 0;
+       unsigned long nr_writeback = 0;
 
        cond_resched();
 
@@ -796,6 +799,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 
                if (PageWriteback(page)) {
+                       nr_writeback++;
                        /*
                         * Synchronous reclaim cannot queue pages for
                         * writeback due to the possibility of stack overflow
@@ -1001,6 +1005,8 @@ keep_lumpy:
 
        list_splice(&ret_pages, page_list);
        count_vm_events(PGACTIVATE, pgactivate);
+       *ret_nr_dirty += nr_dirty;
+       *ret_nr_writeback += nr_writeback;
        return nr_reclaimed;
 }
 
@@ -1467,6 +1473,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
        unsigned long nr_taken;
        unsigned long nr_anon;
        unsigned long nr_file;
+       unsigned long nr_dirty = 0;
+       unsigned long nr_writeback = 0;
        isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
@@ -1519,12 +1527,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
 
        spin_unlock_irq(&zone->lru_lock);
 
-       nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority);
+       nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority,
+                                               &nr_dirty, &nr_writeback);
 
        /* Check if we should syncronously wait for writeback */
        if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
                set_reclaim_mode(priority, sc, true);
-               nr_reclaimed += shrink_page_list(&page_list, zone, sc, priority);
+               nr_reclaimed += shrink_page_list(&page_list, zone, sc,
+                                       priority, &nr_dirty, &nr_writeback);
        }
 
        if (!scanning_global_lru(sc))
@@ -1537,6 +1547,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
 
        putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
 
+       /*
+        * If we have encountered a high number of dirty pages under writeback
+        * then we are reaching the end of the LRU too quickly and global
+        * limits are not enough to throttle processes due to the page
+        * distribution throughout zones. Scale the number of dirty pages that
+        * must be under writeback before being throttled to priority.
+        */
+       if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
+               wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
+
        trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
                zone_idx(zone),
                nr_scanned, nr_reclaimed,