]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - mm/vmscan.c
[SCSI] sas: clear parent->rphy in sas_rphy_delete
[mv-sheeva.git] / mm / vmscan.c
index 58270aea669a8e1202ccdf6cc900b26ccb10ba56..bf903b2d198f0820a2d03041b06de25af7a4d1d7 100644 (file)
@@ -458,7 +458,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                 * Try to allocate it some swap space here.
                 */
                if (PageAnon(page) && !PageSwapCache(page)) {
-                       if (!add_to_swap(page))
+                       if (!add_to_swap(page, GFP_ATOMIC))
                                goto activate_locked;
                }
 #endif /* CONFIG_SWAP */
@@ -569,13 +569,43 @@ keep:
 }
 
 #ifdef CONFIG_MIGRATION
+static inline void move_to_lru(struct page *page)
+{
+       list_del(&page->lru);
+       if (PageActive(page)) {
+               /*
+                * lru_cache_add_active checks that
+                * the PG_active bit is off.
+                */
+               ClearPageActive(page);
+               lru_cache_add_active(page);
+       } else {
+               lru_cache_add(page);
+       }
+       put_page(page);
+}
+
+/*
+ * Add isolated pages on the list back to the LRU
+ *
+ * returns the number of pages put back.
+ */
+int putback_lru_pages(struct list_head *l)
+{
+       struct page *page;
+       struct page *page2;
+       int count = 0;
+
+       list_for_each_entry_safe(page, page2, l, lru) {
+               move_to_lru(page);
+               count++;
+       }
+       return count;
+}
+
 /*
  * swapout a single page
  * page is locked upon entry, unlocked on exit
- *
- * return codes:
- *     0 = complete
- *     1 = retry
  */
 static int swap_page(struct page *page)
 {
@@ -616,7 +646,7 @@ unlock_retry:
        unlock_page(page);
 
 retry:
-       return 1;
+       return -EAGAIN;
 }
 /*
  * migrate_pages
@@ -635,16 +665,19 @@ retry:
  * is only swapping out pages and never touches the second
  * list. The direct migration patchset
  * extends this function to avoid the use of swap.
+ *
+ * Return: Number of pages not migrated when "to" ran empty.
  */
-int migrate_pages(struct list_head *l, struct list_head *t)
+int migrate_pages(struct list_head *from, struct list_head *to,
+                 struct list_head *moved, struct list_head *failed)
 {
        int retry;
-       LIST_HEAD(failed);
        int nr_failed = 0;
        int pass = 0;
        struct page *page;
        struct page *page2;
        int swapwrite = current->flags & PF_SWAPWRITE;
+       int rc;
 
        if (!swapwrite)
                current->flags |= PF_SWAPWRITE;
@@ -652,20 +685,26 @@ int migrate_pages(struct list_head *l, struct list_head *t)
 redo:
        retry = 0;
 
-       list_for_each_entry_safe(page, page2, l, lru) {
+       list_for_each_entry_safe(page, page2, from, lru) {
                cond_resched();
 
+               rc = 0;
+               if (page_count(page) == 1)
+                       /* page was freed from under us. So we are done. */
+                       goto next;
+
                /*
                 * Skip locked pages during the first two passes to give the
                 * functions holding the lock time to release the page. Later we
                 * use lock_page() to have a higher chance of acquiring the
                 * lock.
                 */
+               rc = -EAGAIN;
                if (pass > 2)
                        lock_page(page);
                else
                        if (TestSetPageLocked(page))
-                               goto retry_later;
+                               goto next;
 
                /*
                 * Only wait on writeback if we have already done a pass where
@@ -674,18 +713,19 @@ redo:
                if (pass > 0) {
                        wait_on_page_writeback(page);
                } else {
-                       if (PageWriteback(page)) {
-                               unlock_page(page);
-                               goto retry_later;
-                       }
+                       if (PageWriteback(page))
+                               goto unlock_page;
                }
 
+               /*
+                * Anonymous pages must have swap cache references otherwise
+                * the information contained in the page maps cannot be
+                * preserved.
+                */
                if (PageAnon(page) && !PageSwapCache(page)) {
-                       if (!add_to_swap(page)) {
-                               unlock_page(page);
-                               list_move(&page->lru, &failed);
-                               nr_failed++;
-                               continue;
+                       if (!add_to_swap(page, GFP_KERNEL)) {
+                               rc = -ENOMEM;
+                               goto unlock_page;
                        }
                }
 
@@ -693,10 +733,23 @@ redo:
                 * Page is properly locked and writeback is complete.
                 * Try to migrate the page.
                 */
-               if (!swap_page(page))
-                       continue;
-retry_later:
-               retry++;
+               rc = swap_page(page);
+               goto next;
+
+unlock_page:
+               unlock_page(page);
+
+next:
+               if (rc == -EAGAIN) {
+                       retry++;
+               } else if (rc) {
+                       /* Permanent failure */
+                       list_move(&page->lru, failed);
+                       nr_failed++;
+               } else {
+                       /* Success */
+                       list_move(&page->lru, moved);
+               }
        }
        if (retry && pass++ < 10)
                goto redo;
@@ -704,11 +757,50 @@ retry_later:
        if (!swapwrite)
                current->flags &= ~PF_SWAPWRITE;
 
-       if (!list_empty(&failed))
-               list_splice(&failed, l);
-
        return nr_failed + retry;
 }
+
+static void lru_add_drain_per_cpu(void *dummy)
+{
+       lru_add_drain();
+}
+
+/*
+ * Isolate one page from the LRU lists and put it on the
+ * indicated list. Do necessary cache draining if the
+ * page is not on the LRU lists yet.
+ *
+ * Result:
+ *  0 = page not on LRU list
+ *  1 = page removed from LRU list and added to the specified list.
+ * -ENOENT = page is being freed elsewhere.
+ */
+int isolate_lru_page(struct page *page)
+{
+       int rc = 0;
+       struct zone *zone = page_zone(page);
+
+redo:
+       spin_lock_irq(&zone->lru_lock);
+       rc = __isolate_lru_page(page);
+       if (rc == 1) {
+               if (PageActive(page))
+                       del_page_from_active_list(zone, page);
+               else
+                       del_page_from_inactive_list(zone, page);
+       }
+       spin_unlock_irq(&zone->lru_lock);
+       if (rc == 0) {
+               /*
+                * Maybe this page is still waiting for a cpu to drain it
+                * from one of the lru lists?
+                */
+               rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+               if (rc == 0 && PageLRU(page))
+                       goto redo;
+       }
+       return rc;
+}
 #endif
 
 /*
@@ -758,48 +850,6 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
        return nr_taken;
 }
 
-static void lru_add_drain_per_cpu(void *dummy)
-{
-       lru_add_drain();
-}
-
-/*
- * Isolate one page from the LRU lists and put it on the
- * indicated list. Do necessary cache draining if the
- * page is not on the LRU lists yet.
- *
- * Result:
- *  0 = page not on LRU list
- *  1 = page removed from LRU list and added to the specified list.
- * -ENOENT = page is being freed elsewhere.
- */
-int isolate_lru_page(struct page *page)
-{
-       int rc = 0;
-       struct zone *zone = page_zone(page);
-
-redo:
-       spin_lock_irq(&zone->lru_lock);
-       rc = __isolate_lru_page(page);
-       if (rc == 1) {
-               if (PageActive(page))
-                       del_page_from_active_list(zone, page);
-               else
-                       del_page_from_inactive_list(zone, page);
-       }
-       spin_unlock_irq(&zone->lru_lock);
-       if (rc == 0) {
-               /*
-                * Maybe this page is still waiting for a cpu to drain it
-                * from one of the lru lists?
-                */
-               rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
-               if (rc == 0 && PageLRU(page))
-                       goto redo;
-       }
-       return rc;
-}
-
 /*
  * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
  */
@@ -865,40 +915,6 @@ done:
        pagevec_release(&pvec);
 }
 
-static inline void move_to_lru(struct page *page)
-{
-       list_del(&page->lru);
-       if (PageActive(page)) {
-               /*
-                * lru_cache_add_active checks that
-                * the PG_active bit is off.
-                */
-               ClearPageActive(page);
-               lru_cache_add_active(page);
-       } else {
-               lru_cache_add(page);
-       }
-       put_page(page);
-}
-
-/*
- * Add isolated pages on the list back to the LRU
- *
- * returns the number of pages put back.
- */
-int putback_lru_pages(struct list_head *l)
-{
-       struct page *page;
-       struct page *page2;
-       int count = 0;
-
-       list_for_each_entry_safe(page, page2, l, lru) {
-               move_to_lru(page);
-               count++;
-       }
-       return count;
-}
-
 /*
  * This moves pages from the active list to the inactive list.
  *