X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=mm%2Ftruncate.c;h=0f4b6d18ab0ed663360e0fba11f46f23e8c6b5b3;hb=1b4244647ceaad42ea6eb12899d58753d82b7727;hp=9bfb8e853860df2da0d752948352f87e3c099abd;hpb=6bfe5c9d6f4dcaa998f67e691359cf7b1c4b443d;p=mv-sheeva.git diff --git a/mm/truncate.c b/mm/truncate.c index 9bfb8e85386..0f4b6d18ab0 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -51,6 +51,33 @@ static inline void truncate_partial_page(struct page *page, unsigned partial) do_invalidatepage(page, partial); } +/* + * This cancels just the dirty bit on the kernel page itself, it + * does NOT actually remove dirty bits on any mmap's that may be + * around. It also leaves the page tagged dirty, so any sync + * activity will still find it on the dirty lists, and in particular, + * clear_page_dirty_for_io() will still look at the dirty bits in + * the VM. + * + * Doing this should *normally* only ever be done when a page + * is truncated, and is not actually mapped anywhere at all. However, + * fs/buffer.c does this when it notices that somebody has cleaned + * out all the buffers on a page without actually doing it through + * the VM. Can you say "ext3 is horribly ugly"? Tought you could. + */ +void cancel_dirty_page(struct page *page, unsigned int account_size) +{ + if (TestClearPageDirty(page)) { + struct address_space *mapping = page->mapping; + if (mapping && mapping_cap_account_dirty(mapping)) { + dec_zone_page_state(page, NR_FILE_DIRTY); + if (account_size) + task_io_account_cancelled_write(account_size); + } + } +} +EXPORT_SYMBOL(cancel_dirty_page); + /* * If truncate cannot remove the fs-private metadata from the page, the page * becomes anonymous. It will be left on the LRU and may even be mapped into @@ -58,7 +85,7 @@ static inline void truncate_partial_page(struct page *page, unsigned partial) * * We need to bale out if page->mapping is no longer equal to the original * mapping. This happens a) when the VM reclaimed the page while we waited on - * its lock, b) when a concurrent invalidate_inode_pages got there first and + * its lock, b) when a concurrent invalidate_mapping_pages got there first and * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. */ static void @@ -67,11 +94,11 @@ truncate_complete_page(struct address_space *mapping, struct page *page) if (page->mapping != mapping) return; + cancel_dirty_page(page, PAGE_CACHE_SIZE); + if (PagePrivate(page)) do_invalidatepage(page, 0); - if (test_clear_page_dirty(page)) - task_io_account_cancelled_write(PAGE_CACHE_SIZE); ClearPageUptodate(page); ClearPageMappedToDisk(page); remove_from_page_cache(page); @@ -79,7 +106,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) } /* - * This is for invalidate_inode_pages(). That function can be called at + * This is for invalidate_mapping_pages(). That function can be called at * any time, and is not supposed to throw away dirty pages. But pages can * be marked dirty at any time too, so use remove_mapping which safely * discards clean, unused pages. @@ -283,12 +310,7 @@ unlock: } return ret; } - -unsigned long invalidate_inode_pages(struct address_space *mapping) -{ - return invalidate_mapping_pages(mapping, 0, ~0UL); -} -EXPORT_SYMBOL(invalidate_inode_pages); +EXPORT_SYMBOL(invalidate_mapping_pages); /* * This is like invalidate_complete_page(), except it ignores the page's @@ -321,6 +343,15 @@ failed: return 0; } +static int do_launder_page(struct address_space *mapping, struct page *page) +{ + if (!PageDirty(page)) + return 0; + if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) + return 0; + return mapping->a_ops->launder_page(page); +} + /** * invalidate_inode_pages2_range - remove range of pages from an address_space * @mapping: the address_space @@ -344,13 +375,12 @@ int invalidate_inode_pages2_range(struct address_space *mapping, pagevec_init(&pvec, 0); next = start; - while (next <= end && !ret && !wrapped && + while (next <= end && !wrapped && pagevec_lookup(&pvec, mapping, next, min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { - for (i = 0; !ret && i < pagevec_count(&pvec); i++) { + for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; pgoff_t page_index; - int was_dirty; lock_page(page); if (page->mapping != mapping) { @@ -386,18 +416,14 @@ int invalidate_inode_pages2_range(struct address_space *mapping, PAGE_CACHE_SIZE, 0); } } - was_dirty = test_clear_page_dirty(page); - if (!invalidate_complete_page2(mapping, page)) { - if (was_dirty) - set_page_dirty(page); + ret = do_launder_page(mapping, page); + if (ret == 0 && !invalidate_complete_page2(mapping, page)) ret = -EIO; - } unlock_page(page); } pagevec_release(&pvec); cond_resched(); } - WARN_ON_ONCE(ret); return ret; } EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);