]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - mm/page-writeback.c
signalfd: fill in ssi_int for posix timers and message queues
[karo-tx-linux.git] / mm / page-writeback.c
index 94c6d8988ab3239019ecc720b7ac0624e06016df..e5c4ca2fc55735dd52a9b51aca44c6667601fd12 100644 (file)
@@ -872,9 +872,11 @@ int write_cache_pages(struct address_space *mapping,
        int done = 0;
        struct pagevec pvec;
        int nr_pages;
+       pgoff_t uninitialized_var(writeback_index);
        pgoff_t index;
        pgoff_t end;            /* Inclusive */
-       int scanned = 0;
+       pgoff_t done_index;
+       int cycled;
        int range_whole = 0;
 
        if (wbc->nonblocking && bdi_write_congested(bdi)) {
@@ -884,85 +886,147 @@ int write_cache_pages(struct address_space *mapping,
 
        pagevec_init(&pvec, 0);
        if (wbc->range_cyclic) {
-               index = mapping->writeback_index; /* Start from prev offset */
+               writeback_index = mapping->writeback_index; /* prev offset */
+               index = writeback_index;
+               if (index == 0)
+                       cycled = 1;
+               else
+                       cycled = 0;
                end = -1;
        } else {
                index = wbc->range_start >> PAGE_CACHE_SHIFT;
                end = wbc->range_end >> PAGE_CACHE_SHIFT;
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                        range_whole = 1;
-               scanned = 1;
+               cycled = 1; /* ignore range_cyclic tests */
        }
 retry:
-       while (!done && (index <= end) &&
-              (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                                             PAGECACHE_TAG_DIRTY,
-                                             min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
-               unsigned i;
+       done_index = index;
+       while (!done && (index <= end)) {
+               int i;
+
+               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+                             PAGECACHE_TAG_DIRTY,
+                             min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+               if (nr_pages == 0)
+                       break;
 
-               scanned = 1;
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
 
                        /*
-                        * At this point we hold neither mapping->tree_lock nor
-                        * lock on the page itself: the page may be truncated or
-                        * invalidated (changing page->mapping to NULL), or even
-                        * swizzled back from swapper_space to tmpfs file
-                        * mapping
+                        * At this point, the page may be truncated or
+                        * invalidated (changing page->mapping to NULL), or
+                        * even swizzled back from swapper_space to tmpfs file
+                        * mapping. However, page->index will not change
+                        * because we have a reference on the page.
                         */
+                       if (page->index > end) {
+                               /*
+                                * can't be range_cyclic (1st pass) because
+                                * end == -1 in that case.
+                                */
+                               done = 1;
+                               break;
+                       }
+
+                       done_index = page->index + 1;
+
                        lock_page(page);
 
+                       /*
+                        * Page truncated or invalidated. We can freely skip it
+                        * then, even for data integrity operations: the page
+                        * has disappeared concurrently, so there could be no
+                        * real expectation of this data interity operation
+                        * even if there is now a new, dirty page at the same
+                        * pagecache address.
+                        */
                        if (unlikely(page->mapping != mapping)) {
+continue_unlock:
                                unlock_page(page);
                                continue;
                        }
 
-                       if (!wbc->range_cyclic && page->index > end) {
-                               done = 1;
-                               unlock_page(page);
-                               continue;
+                       if (!PageDirty(page)) {
+                               /* someone wrote it for us */
+                               goto continue_unlock;
                        }
 
-                       if (wbc->sync_mode != WB_SYNC_NONE)
-                               wait_on_page_writeback(page);
-
-                       if (PageWriteback(page) ||
-                           !clear_page_dirty_for_io(page)) {
-                               unlock_page(page);
-                               continue;
+                       if (PageWriteback(page)) {
+                               if (wbc->sync_mode != WB_SYNC_NONE)
+                                       wait_on_page_writeback(page);
+                               else
+                                       goto continue_unlock;
                        }
 
+                       BUG_ON(PageWriteback(page));
+                       if (!clear_page_dirty_for_io(page))
+                               goto continue_unlock;
+
                        ret = (*writepage)(page, wbc, data);
 
-                       if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
-                               unlock_page(page);
-                               ret = 0;
+                       if (unlikely(ret)) {
+                               if (ret == AOP_WRITEPAGE_ACTIVATE) {
+                                       unlock_page(page);
+                                       ret = 0;
+                               } else {
+                                       /*
+                                        * done_index is set past this page,
+                                        * so media errors will not choke
+                                        * background writeout for the entire
+                                        * file. This has consequences for
+                                        * range_cyclic semantics (ie. it may
+                                        * not be suitable for data integrity
+                                        * writeout).
+                                        */
+                                       done = 1;
+                                       break;
+                               }
+                       }
+
+                       if (wbc->nr_to_write > 0) {
+                               wbc->nr_to_write--;
+                               if (wbc->nr_to_write == 0 &&
+                                   wbc->sync_mode == WB_SYNC_NONE) {
+                                       /*
+                                        * We stop writing back only if we are
+                                        * not doing integrity sync. In case of
+                                        * integrity sync we have to keep going
+                                        * because someone may be concurrently
+                                        * dirtying pages, and we might have
+                                        * synced a lot of newly appeared dirty
+                                        * pages, but have not synced all of the
+                                        * old dirty pages.
+                                        */
+                                       done = 1;
+                                       break;
+                               }
                        }
-                       if (ret || (--(wbc->nr_to_write) <= 0))
-                               done = 1;
+
                        if (wbc->nonblocking && bdi_write_congested(bdi)) {
                                wbc->encountered_congestion = 1;
                                done = 1;
+                               break;
                        }
                }
                pagevec_release(&pvec);
                cond_resched();
        }
-       if (!scanned && !done) {
+       if (!cycled && !done) {
                /*
+                * range_cyclic:
                 * We hit the last page and there is more work to be done: wrap
                 * back to the start of the file
                 */
-               scanned = 1;
+               cycled = 1;
                index = 0;
+               end = writeback_index - 1;
                goto retry;
        }
        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
-               mapping->writeback_index = index;
+               mapping->writeback_index = done_index;
 
-       if (wbc->range_cont)
-               wbc->range_start = index << PAGE_CACHE_SHIFT;
        return ret;
 }
 EXPORT_SYMBOL(write_cache_pages);
@@ -1088,7 +1152,7 @@ int __set_page_dirty_nobuffers(struct page *page)
                if (!mapping)
                        return 1;
 
-               write_lock_irq(&mapping->tree_lock);
+               spin_lock_irq(&mapping->tree_lock);
                mapping2 = page_mapping(page);
                if (mapping2) { /* Race with truncate? */
                        BUG_ON(mapping2 != mapping);
@@ -1102,7 +1166,7 @@ int __set_page_dirty_nobuffers(struct page *page)
                        radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
                }
-               write_unlock_irq(&mapping->tree_lock);
+               spin_unlock_irq(&mapping->tree_lock);
                if (mapping->host) {
                        /* !PageAnon && !swapper_space */
                        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -1258,7 +1322,7 @@ int test_clear_page_writeback(struct page *page)
                struct backing_dev_info *bdi = mapping->backing_dev_info;
                unsigned long flags;
 
-               write_lock_irqsave(&mapping->tree_lock, flags);
+               spin_lock_irqsave(&mapping->tree_lock, flags);
                ret = TestClearPageWriteback(page);
                if (ret) {
                        radix_tree_tag_clear(&mapping->page_tree,
@@ -1269,7 +1333,7 @@ int test_clear_page_writeback(struct page *page)
                                __bdi_writeout_inc(bdi);
                        }
                }
-               write_unlock_irqrestore(&mapping->tree_lock, flags);
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
        } else {
                ret = TestClearPageWriteback(page);
        }
@@ -1287,7 +1351,7 @@ int test_set_page_writeback(struct page *page)
                struct backing_dev_info *bdi = mapping->backing_dev_info;
                unsigned long flags;
 
-               write_lock_irqsave(&mapping->tree_lock, flags);
+               spin_lock_irqsave(&mapping->tree_lock, flags);
                ret = TestSetPageWriteback(page);
                if (!ret) {
                        radix_tree_tag_set(&mapping->page_tree,
@@ -1300,7 +1364,7 @@ int test_set_page_writeback(struct page *page)
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
-               write_unlock_irqrestore(&mapping->tree_lock, flags);
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
        } else {
                ret = TestSetPageWriteback(page);
        }