]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: compaction: minimise the time IRQs are disabled while isolating pages for migration
authorAndrea Arcangeli <aarcange@redhat.com>
Tue, 22 Mar 2011 23:33:10 +0000 (16:33 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 23 Mar 2011 00:44:05 +0000 (17:44 -0700)
compaction_alloc() isolates pages for migration in isolate_migratepages.
While it's scanning, IRQs are disabled on the mistaken assumption the
scanning should be short.  Tests show this to be true for the most part
but contention times on the LRU lock can be increased.  Before this patch,
the IRQ disabled times for a simple test looked like

  Total sampled time IRQs off (not real total time): 5493
  Event shrink_inactive_list..shrink_zone                  1596 us count 1
  Event shrink_inactive_list..shrink_zone                  1530 us count 1
  Event shrink_inactive_list..shrink_zone                   956 us count 1
  Event shrink_inactive_list..shrink_zone                   541 us count 1
  Event shrink_inactive_list..shrink_zone                   531 us count 1
  Event split_huge_page..add_to_swap                        232 us count 1
  Event save_args..call_softirq                              36 us count 1
  Event save_args..call_softirq                              35 us count 2
  Event __wake_up..__wake_up                                  1 us count 1

This patch reduces the worst-case IRQs-disabled latencies by releasing the
lock every SWAP_CLUSTER_MAX pages that are scanned and releasing the CPU if
necessary. The cost of this is that the processing performing compaction will
be slower but IRQs being disabled for too long a time has worse consequences
as the following report shows;

  Total sampled time IRQs off (not real total time): 4367
  Event shrink_inactive_list..shrink_zone                   881 us count 1
  Event shrink_inactive_list..shrink_zone                   875 us count 1
  Event shrink_inactive_list..shrink_zone                   868 us count 1
  Event shrink_inactive_list..shrink_zone                   555 us count 1
  Event split_huge_page..add_to_swap                        495 us count 1
  Event compact_zone..compact_zone_order                    269 us count 1
  Event split_huge_page..add_to_swap                        266 us count 1
  Event shrink_inactive_list..shrink_zone                    85 us count 1
  Event save_args..call_softirq                              36 us count 2
  Event __wake_up..__wake_up                                  1 us count 1

[akpm@linux-foundation.org: simplify with s/unlocked/locked/]
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Arthur Marsh <arthur.marsh@internode.on.net>
Cc: Clemens Ladisch <cladisch@googlemail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/compaction.c

index b27802e04b916e1dfc081e779b38f9ed5ac7487e..021a2960ef9e18d061c972590f7651a35e7652ce 100644 (file)
@@ -277,9 +277,27 @@ static unsigned long isolate_migratepages(struct zone *zone,
        }
 
        /* Time to isolate some pages for migration */
+       cond_resched();
        spin_lock_irq(&zone->lru_lock);
        for (; low_pfn < end_pfn; low_pfn++) {
                struct page *page;
+               bool locked = true;
+
+               /* give a chance to irqs before checking need_resched() */
+               if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
+                       spin_unlock_irq(&zone->lru_lock);
+                       locked = false;
+               }
+               if (need_resched() || spin_is_contended(&zone->lru_lock)) {
+                       if (locked)
+                               spin_unlock_irq(&zone->lru_lock);
+                       cond_resched();
+                       spin_lock_irq(&zone->lru_lock);
+                       if (fatal_signal_pending(current))
+                               break;
+               } else if (!locked)
+                       spin_lock_irq(&zone->lru_lock);
+
                if (!pfn_valid_within(low_pfn))
                        continue;
                nr_scanned++;