]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/vmscan.c
mm: search from free_area_cache for the bigger size
[mv-sheeva.git] / mm / vmscan.c
index c52b23552659af5bce0038dd6fa7ac10d044be40..87e4d6a6dc11f50e1d96a5aeea8400305c4408fb 100644 (file)
@@ -1138,7 +1138,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
  * @mz:                The mem_cgroup_zone to pull pages from.
  * @dst:       The temp list to put pages on to.
  * @nr_scanned:        The number of pages that were scanned.
- * @order:     The caller's attempted allocation order
+ * @sc:                The scan_control struct for this reclaim session
  * @mode:      One of the LRU isolation modes
  * @active:    True [1] if isolating active pages
  * @file:      True [1] if isolating file [!anon] pages
@@ -1147,8 +1147,8 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                struct mem_cgroup_zone *mz, struct list_head *dst,
-               unsigned long *nr_scanned, int order, isolate_mode_t mode,
-               int active, int file)
+               unsigned long *nr_scanned, struct scan_control *sc,
+               isolate_mode_t mode, int active, int file)
 {
        struct lruvec *lruvec;
        struct list_head *src;
@@ -1194,7 +1194,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                        BUG();
                }
 
-               if (!order)
+               if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
                        continue;
 
                /*
@@ -1208,8 +1208,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                 */
                zone_id = page_zone_id(page);
                page_pfn = page_to_pfn(page);
-               pfn = page_pfn & ~((1 << order) - 1);
-               end_pfn = pfn + (1 << order);
+               pfn = page_pfn & ~((1 << sc->order) - 1);
+               end_pfn = pfn + (1 << sc->order);
                for (; pfn < end_pfn; pfn++) {
                        struct page *cursor_page;
 
@@ -1275,7 +1275,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
        *nr_scanned = scan;
 
-       trace_mm_vmscan_lru_isolate(order,
+       trace_mm_vmscan_lru_isolate(sc->order,
                        nr_to_scan, scan,
                        nr_taken,
                        nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
@@ -1509,7 +1509,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
        unsigned long nr_file;
        unsigned long nr_dirty = 0;
        unsigned long nr_writeback = 0;
-       isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;
+       isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
        struct zone *zone = mz->zone;
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
@@ -1522,20 +1522,19 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
 
        set_reclaim_mode(priority, sc, false);
        if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
-               reclaim_mode |= ISOLATE_ACTIVE;
+               isolate_mode |= ISOLATE_ACTIVE;
 
        lru_add_drain();
 
        if (!sc->may_unmap)
-               reclaim_mode |= ISOLATE_UNMAPPED;
+               isolate_mode |= ISOLATE_UNMAPPED;
        if (!sc->may_writepage)
-               reclaim_mode |= ISOLATE_CLEAN;
+               isolate_mode |= ISOLATE_CLEAN;
 
        spin_lock_irq(&zone->lru_lock);
 
-       nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list,
-                                    &nr_scanned, sc->order,
-                                    reclaim_mode, 0, file);
+       nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
+                                    sc, isolate_mode, 0, file);
        if (global_reclaim(sc)) {
                zone->pages_scanned += nr_scanned;
                if (current_is_kswapd())
@@ -1699,21 +1698,20 @@ static void shrink_active_list(unsigned long nr_to_scan,
        struct page *page;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
        unsigned long nr_rotated = 0;
-       isolate_mode_t reclaim_mode = ISOLATE_ACTIVE;
+       isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
        struct zone *zone = mz->zone;
 
        lru_add_drain();
 
        if (!sc->may_unmap)
-               reclaim_mode |= ISOLATE_UNMAPPED;
+               isolate_mode |= ISOLATE_UNMAPPED;
        if (!sc->may_writepage)
-               reclaim_mode |= ISOLATE_CLEAN;
+               isolate_mode |= ISOLATE_CLEAN;
 
        spin_lock_irq(&zone->lru_lock);
 
-       nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold,
-                                    &nr_scanned, sc->order,
-                                    reclaim_mode, 1, file);
+       nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
+                                    isolate_mode, 1, file);
        if (global_reclaim(sc))
                zone->pages_scanned += nr_scanned;
 
@@ -2112,7 +2110,12 @@ restart:
                 * with multiple processes reclaiming pages, the total
                 * freeing target can get unreasonably large.
                 */
-               if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
+               if (nr_reclaimed >= nr_to_reclaim)
+                       nr_to_reclaim = 0;
+               else
+                       nr_to_reclaim -= nr_reclaimed;
+
+               if (!nr_to_reclaim && priority < DEF_PRIORITY)
                        break;
        }
        blk_finish_plug(&plug);
@@ -2195,7 +2198,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
         * If compaction is deferred, reclaim up to a point where
         * compaction will have a chance of success when re-enabled
         */
-       if (compaction_deferred(zone))
+       if (compaction_deferred(zone, sc->order))
                return watermark_ok;
 
        /* If compaction is not ready to start, keep reclaiming */
@@ -2753,7 +2756,7 @@ loop_again:
                 */
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
-                       int nr_slab;
+                       int nr_slab, testorder;
                        unsigned long balance_gap;
 
                        if (!populated_zone(zone))
@@ -2786,7 +2789,20 @@ loop_again:
                                (zone->present_pages +
                                        KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
                                KSWAPD_ZONE_BALANCE_GAP_RATIO);
-                       if (!zone_watermark_ok_safe(zone, order,
+                       /*
+                        * Kswapd reclaims only single pages with compaction
+                        * enabled. Trying too hard to reclaim until contiguous
+                        * free pages have become available can hurt performance
+                        * by evicting too much useful data from memory.
+                        * Do not reclaim more than needed for compaction.
+                        */
+                       testorder = order;
+                       if (COMPACTION_BUILD && order &&
+                                       compaction_suitable(zone, order) !=
+                                               COMPACT_SKIPPED)
+                               testorder = 0;
+
+                       if (!zone_watermark_ok_safe(zone, testorder,
                                        high_wmark_pages(zone) + balance_gap,
                                        end_zone, 0)) {
                                shrink_zone(priority, zone, &sc);
@@ -2815,7 +2831,7 @@ loop_again:
                                continue;
                        }
 
-                       if (!zone_watermark_ok_safe(zone, order,
+                       if (!zone_watermark_ok_safe(zone, testorder,
                                        high_wmark_pages(zone), end_zone, 0)) {
                                all_zones_ok = 0;
                                /*
@@ -2903,6 +2919,8 @@ out:
         * and it is potentially going to sleep here.
         */
        if (order) {
+               int zones_need_compaction = 1;
+
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
 
@@ -2912,6 +2930,10 @@ out:
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;
 
+                       /* Would compaction fail due to lack of free memory? */
+                       if (compaction_suitable(zone, order) == COMPACT_SKIPPED)
+                               goto loop_again;
+
                        /* Confirm the zone is balanced for order-0 */
                        if (!zone_watermark_ok(zone, 0,
                                        high_wmark_pages(zone), 0, 0)) {
@@ -2919,11 +2941,17 @@ out:
                                goto loop_again;
                        }
 
+                       /* Check if the memory needs to be defragmented. */
+                       if (zone_watermark_ok(zone, order,
+                                   low_wmark_pages(zone), *classzone_idx, 0))
+                               zones_need_compaction = 0;
+
                        /* If balanced, clear the congested flag */
                        zone_clear_flag(zone, ZONE_CONGESTED);
-                       if (i <= *classzone_idx)
-                               balanced += zone->present_pages;
                }
+
+               if (zones_need_compaction)
+                       compact_pgdat(pgdat, order);
        }
 
        /*