]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/vmscan.c
Merge branches 'x86-cpu-for-linus', 'x86-boot-for-linus', 'x86-cpufeature-for-linus...
[mv-sheeva.git] / mm / vmscan.c
index b2b4c4a0ada24491f2dbb6b2c84a54b71a633aa8..49f15ef0a99a3f7f78adcd687f3ed54c421b2a0b 100644 (file)
@@ -1413,7 +1413,6 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
                       unsigned long *nr_anon,
                       unsigned long *nr_file)
 {
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
        struct zone *zone = mz->zone;
        unsigned int count[NR_LRU_LISTS] = { 0, };
        unsigned long nr_active = 0;
@@ -1434,6 +1433,7 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
                count[lru] += numpages;
        }
 
+       preempt_disable();
        __count_vm_events(PGDEACTIVATE, nr_active);
 
        __mod_zone_page_state(zone, NR_ACTIVE_FILE,
@@ -1448,8 +1448,9 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
        *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
        *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
 
-       reclaim_stat->recent_scanned[0] += *nr_anon;
-       reclaim_stat->recent_scanned[1] += *nr_file;
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
+       __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
+       preempt_enable();
 }
 
 /*
@@ -1511,6 +1512,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
        unsigned long nr_writeback = 0;
        isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
        struct zone *zone = mz->zone;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1544,19 +1546,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
                        __count_zone_vm_events(PGSCAN_DIRECT, zone,
                                               nr_scanned);
        }
+       spin_unlock_irq(&zone->lru_lock);
 
-       if (nr_taken == 0) {
-               spin_unlock_irq(&zone->lru_lock);
+       if (nr_taken == 0)
                return 0;
-       }
 
        update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
 
-       __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
-       __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
-
-       spin_unlock_irq(&zone->lru_lock);
-
        nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
                                                &nr_dirty, &nr_writeback);
 
@@ -1569,6 +1565,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
 
        spin_lock_irq(&zone->lru_lock);
 
+       reclaim_stat->recent_scanned[0] += nr_anon;
+       reclaim_stat->recent_scanned[1] += nr_file;
+
        if (current_is_kswapd())
                __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
        __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
@@ -1642,18 +1641,6 @@ static void move_active_pages_to_lru(struct zone *zone,
        unsigned long pgmoved = 0;
        struct page *page;
 
-       if (buffer_heads_over_limit) {
-               spin_unlock_irq(&zone->lru_lock);
-               list_for_each_entry(page, list, lru) {
-                       if (page_has_private(page) && trylock_page(page)) {
-                               if (page_has_private(page))
-                                       try_to_release_page(page, 0);
-                               unlock_page(page);
-                       }
-               }
-               spin_lock_irq(&zone->lru_lock);
-       }
-
        while (!list_empty(list)) {
                struct lruvec *lruvec;
 
@@ -1703,6 +1690,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
 
        lru_add_drain();
 
+       reset_reclaim_mode(sc);
+
        if (!sc->may_unmap)
                isolate_mode |= ISOLATE_UNMAPPED;
        if (!sc->may_writepage)
@@ -1735,6 +1724,14 @@ static void shrink_active_list(unsigned long nr_to_scan,
                        continue;
                }
 
+               if (unlikely(buffer_heads_over_limit)) {
+                       if (page_has_private(page) && trylock_page(page)) {
+                               if (page_has_private(page))
+                                       try_to_release_page(page, 0);
+                               unlock_page(page);
+                       }
+               }
+
                if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
                        nr_rotated += hpage_nr_pages(page);
                        /*
@@ -2198,7 +2195,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
         * If compaction is deferred, reclaim up to a point where
         * compaction will have a chance of success when re-enabled
         */
-       if (compaction_deferred(zone))
+       if (compaction_deferred(zone, sc->order))
                return watermark_ok;
 
        /* If compaction is not ready to start, keep reclaiming */
@@ -2238,6 +2235,14 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
        unsigned long nr_soft_scanned;
        bool aborted_reclaim = false;
 
+       /*
+        * If the number of buffer_heads in the machine exceeds the maximum
+        * allowed level, force direct reclaim to scan the highmem zone as
+        * highmem pages could be pinning lowmem pages storing buffer_heads
+        */
+       if (buffer_heads_over_limit)
+               sc->gfp_mask |= __GFP_HIGHMEM;
+
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
                if (!populated_zone(zone))
@@ -2258,8 +2263,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
                                 * Even though compaction is invoked for any
                                 * non-zero order, only frequent costly order
                                 * reclamation is disruptive enough to become a
-                                * noticable problem, like transparent huge page
-                                * allocations.
+                                * noticeable problem, like transparent huge
+                                * page allocations.
                                 */
                                if (compaction_ready(zone, sc)) {
                                        aborted_reclaim = true;
@@ -2340,7 +2345,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        unsigned long writeback_threshold;
        bool aborted_reclaim;
 
-       get_mems_allowed();
        delayacct_freepages_start();
 
        if (global_reclaim(sc))
@@ -2404,7 +2408,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 
 out:
        delayacct_freepages_end();
-       put_mems_allowed();
 
        if (sc->nr_reclaimed)
                return sc->nr_reclaimed;
@@ -2727,6 +2730,17 @@ loop_again:
                         */
                        age_active_anon(zone, &sc, priority);
 
+                       /*
+                        * If the number of buffer_heads in the machine
+                        * exceeds the maximum allowed level and this node
+                        * has a highmem zone, force kswapd to reclaim from
+                        * it to relieve lowmem pressure.
+                        */
+                       if (buffer_heads_over_limit && is_highmem_idx(i)) {
+                               end_zone = i;
+                               break;
+                       }
+
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
@@ -2802,7 +2816,8 @@ loop_again:
                                                COMPACT_SKIPPED)
                                testorder = 0;
 
-                       if (!zone_watermark_ok_safe(zone, testorder,
+                       if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
+                                   !zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone) + balance_gap,
                                        end_zone, 0)) {
                                shrink_zone(priority, zone, &sc);