]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/vmscan.c
powerpc/perf: Clear MMCR2 when enabling PMU
[karo-tx-linux.git] / mm / vmscan.c
index 71f23c0c1090c8f4041b2c738d40c616f4dd9185..0f16ffe8eb67c6fcd0350add4a5a4b6092cb6905 100644 (file)
@@ -464,7 +464,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
         * stalls if we need to run get_block().  We could test
         * PagePrivate for that.
         *
-        * If this process is currently in __generic_file_aio_write() against
+        * If this process is currently in __generic_file_write_iter() against
         * this page's queue, we can perform writeback even if that
         * will block.
         *
@@ -1573,20 +1573,18 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
                 * If dirty pages are scanned that are not queued for IO, it
                 * implies that flushers are not keeping up. In this case, flag
                 * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
-                * pages from reclaim context. It will forcibly stall in the
-                * next check.
+                * pages from reclaim context.
                 */
                if (nr_unqueued_dirty == nr_taken)
                        zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
 
                /*
-                * In addition, if kswapd scans pages marked marked for
-                * immediate reclaim and under writeback (nr_immediate), it
-                * implies that pages are cycling through the LRU faster than
+                * If kswapd scans pages marked marked for immediate
+                * reclaim and under writeback (nr_immediate), it implies
+                * that pages are cycling through the LRU faster than
                 * they are written so also forcibly stall.
                 */
-               if ((nr_unqueued_dirty == nr_taken || nr_immediate) &&
-                   current_may_throttle())
+               if (nr_immediate && current_may_throttle())
                        congestion_wait(BLK_RW_ASYNC, HZ/10);
        }