2 * page.c - buffer/page management specific to NILFS
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * Written by Ryusuke Konishi <ryusuke@osrg.net>,
17 * Seiji Kihara <kihara@osrg.net>.
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/swap.h>
23 #include <linux/bitops.h>
24 #include <linux/page-flags.h>
25 #include <linux/list.h>
26 #include <linux/highmem.h>
27 #include <linux/pagevec.h>
28 #include <linux/gfp.h>
34 #define NILFS_BUFFER_INHERENT_BITS \
35 ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
36 (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked))
38 static struct buffer_head *
39 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
40 int blkbits, unsigned long b_state)
43 unsigned long first_block;
44 struct buffer_head *bh;
46 if (!page_has_buffers(page))
47 create_empty_buffers(page, 1 << blkbits, b_state);
49 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
50 bh = nilfs_page_get_nth_block(page, block - first_block);
57 struct buffer_head *nilfs_grab_buffer(struct inode *inode,
58 struct address_space *mapping,
60 unsigned long b_state)
62 int blkbits = inode->i_blkbits;
63 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
65 struct buffer_head *bh;
67 page = grab_cache_page(mapping, index);
71 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
81 * nilfs_forget_buffer - discard dirty state
82 * @inode: owner inode of the buffer
83 * @bh: buffer head of the buffer to be discarded
85 void nilfs_forget_buffer(struct buffer_head *bh)
87 struct page *page = bh->b_page;
88 const unsigned long clear_bits =
89 (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
90 1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
91 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
94 set_mask_bits(&bh->b_state, clear_bits, 0);
95 if (nilfs_page_buffers_clean(page))
96 __nilfs_clear_page_dirty(page);
99 ClearPageUptodate(page);
100 ClearPageMappedToDisk(page);
106 * nilfs_copy_buffer -- copy buffer data and flags
107 * @dbh: destination buffer
108 * @sbh: source buffer
110 void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
112 void *kaddr0, *kaddr1;
114 struct page *spage = sbh->b_page, *dpage = dbh->b_page;
115 struct buffer_head *bh;
117 kaddr0 = kmap_atomic(spage);
118 kaddr1 = kmap_atomic(dpage);
119 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
120 kunmap_atomic(kaddr1);
121 kunmap_atomic(kaddr0);
123 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
124 dbh->b_blocknr = sbh->b_blocknr;
125 dbh->b_bdev = sbh->b_bdev;
128 bits = sbh->b_state & ((1UL << BH_Uptodate) | (1UL << BH_Mapped));
129 while ((bh = bh->b_this_page) != dbh) {
134 if (bits & (1UL << BH_Uptodate))
135 SetPageUptodate(dpage);
137 ClearPageUptodate(dpage);
138 if (bits & (1UL << BH_Mapped))
139 SetPageMappedToDisk(dpage);
141 ClearPageMappedToDisk(dpage);
145 * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
146 * @page: page to be checked
148 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
149 * Otherwise, it returns non-zero value.
151 int nilfs_page_buffers_clean(struct page *page)
153 struct buffer_head *bh, *head;
155 bh = head = page_buffers(page);
157 if (buffer_dirty(bh))
159 bh = bh->b_this_page;
160 } while (bh != head);
164 void nilfs_page_bug(struct page *page)
166 struct address_space *m;
169 if (unlikely(!page)) {
170 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
175 ino = m ? m->host->i_ino : 0;
177 printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
178 "mapping=%p ino=%lu\n",
179 page, page_ref_count(page),
180 (unsigned long long)page->index, page->flags, m, ino);
182 if (page_has_buffers(page)) {
183 struct buffer_head *bh, *head;
186 bh = head = page_buffers(page);
189 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
190 i++, bh, atomic_read(&bh->b_count),
191 (unsigned long long)bh->b_blocknr, bh->b_state);
192 bh = bh->b_this_page;
193 } while (bh != head);
198 * nilfs_copy_page -- copy the page with buffers
199 * @dst: destination page
201 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
203 * This function is for both data pages and btnode pages. The dirty flag
204 * should be treated by caller. The page must not be under i/o.
205 * Both src and dst page must be locked
207 static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
209 struct buffer_head *dbh, *dbufs, *sbh, *sbufs;
210 unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
212 BUG_ON(PageWriteback(dst));
214 sbh = sbufs = page_buffers(src);
215 if (!page_has_buffers(dst))
216 create_empty_buffers(dst, sbh->b_size, 0);
219 mask |= (1UL << BH_Dirty);
221 dbh = dbufs = page_buffers(dst);
225 dbh->b_state = sbh->b_state & mask;
226 dbh->b_blocknr = sbh->b_blocknr;
227 dbh->b_bdev = sbh->b_bdev;
228 sbh = sbh->b_this_page;
229 dbh = dbh->b_this_page;
230 } while (dbh != dbufs);
232 copy_highpage(dst, src);
234 if (PageUptodate(src) && !PageUptodate(dst))
235 SetPageUptodate(dst);
236 else if (!PageUptodate(src) && PageUptodate(dst))
237 ClearPageUptodate(dst);
238 if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
239 SetPageMappedToDisk(dst);
240 else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
241 ClearPageMappedToDisk(dst);
246 sbh = sbh->b_this_page;
247 dbh = dbh->b_this_page;
248 } while (dbh != dbufs);
251 int nilfs_copy_dirty_pages(struct address_space *dmap,
252 struct address_space *smap)
259 pagevec_init(&pvec, 0);
261 if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY,
265 for (i = 0; i < pagevec_count(&pvec); i++) {
266 struct page *page = pvec.pages[i], *dpage;
269 if (unlikely(!PageDirty(page)))
270 NILFS_PAGE_BUG(page, "inconsistent dirty state");
272 dpage = grab_cache_page(dmap, page->index);
273 if (unlikely(!dpage)) {
274 /* No empty page is added to the page cache */
279 if (unlikely(!page_has_buffers(page)))
281 "found empty page in dat page cache");
283 nilfs_copy_page(dpage, page, 1);
284 __set_page_dirty_nobuffers(dpage);
290 pagevec_release(&pvec);
299 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
300 * @dmap: destination page cache
301 * @smap: source page cache
303 * No pages must no be added to the cache during this process.
304 * This must be ensured by the caller.
306 void nilfs_copy_back_pages(struct address_space *dmap,
307 struct address_space *smap)
314 pagevec_init(&pvec, 0);
316 n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE);
319 index = pvec.pages[n - 1]->index + 1;
321 for (i = 0; i < pagevec_count(&pvec); i++) {
322 struct page *page = pvec.pages[i], *dpage;
323 pgoff_t offset = page->index;
326 dpage = find_lock_page(dmap, offset);
328 /* override existing page on the destination cache */
329 WARN_ON(PageDirty(dpage));
330 nilfs_copy_page(dpage, page, 0);
336 /* move the page to the destination cache */
337 spin_lock_irq(&smap->tree_lock);
338 page2 = radix_tree_delete(&smap->page_tree, offset);
339 WARN_ON(page2 != page);
342 spin_unlock_irq(&smap->tree_lock);
344 spin_lock_irq(&dmap->tree_lock);
345 err = radix_tree_insert(&dmap->page_tree, offset, page);
346 if (unlikely(err < 0)) {
347 WARN_ON(err == -EEXIST);
348 page->mapping = NULL;
349 put_page(page); /* for cache */
351 page->mapping = dmap;
354 radix_tree_tag_set(&dmap->page_tree,
356 PAGECACHE_TAG_DIRTY);
358 spin_unlock_irq(&dmap->tree_lock);
362 pagevec_release(&pvec);
369 * nilfs_clear_dirty_pages - discard dirty pages in address space
370 * @mapping: address space with dirty pages for discarding
371 * @silent: suppress [true] or print [false] warning messages
373 void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
379 pagevec_init(&pvec, 0);
381 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
383 for (i = 0; i < pagevec_count(&pvec); i++) {
384 struct page *page = pvec.pages[i];
387 nilfs_clear_dirty_page(page, silent);
390 pagevec_release(&pvec);
396 * nilfs_clear_dirty_page - discard dirty page
397 * @page: dirty page that will be discarded
398 * @silent: suppress [true] or print [false] warning messages
400 void nilfs_clear_dirty_page(struct page *page, bool silent)
402 struct inode *inode = page->mapping->host;
403 struct super_block *sb = inode->i_sb;
405 BUG_ON(!PageLocked(page));
408 nilfs_warning(sb, __func__,
409 "discard page: offset %lld, ino %lu",
410 page_offset(page), inode->i_ino);
413 ClearPageUptodate(page);
414 ClearPageMappedToDisk(page);
416 if (page_has_buffers(page)) {
417 struct buffer_head *bh, *head;
418 const unsigned long clear_bits =
419 (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
420 1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
421 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
423 bh = head = page_buffers(page);
427 nilfs_warning(sb, __func__,
428 "discard block %llu, size %zu",
429 (u64)bh->b_blocknr, bh->b_size);
431 set_mask_bits(&bh->b_state, clear_bits, 0);
433 } while (bh = bh->b_this_page, bh != head);
436 __nilfs_clear_page_dirty(page);
439 unsigned nilfs_page_count_clean_buffers(struct page *page,
440 unsigned from, unsigned to)
442 unsigned block_start, block_end;
443 struct buffer_head *bh, *head;
446 for (bh = head = page_buffers(page), block_start = 0;
447 bh != head || !block_start;
448 block_start = block_end, bh = bh->b_this_page) {
449 block_end = block_start + bh->b_size;
450 if (block_end > from && block_start < to && !buffer_dirty(bh))
456 void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
458 mapping->host = inode;
460 mapping_set_gfp_mask(mapping, GFP_NOFS);
461 mapping->private_data = NULL;
462 mapping->a_ops = &empty_aops;
466 * NILFS2 needs clear_page_dirty() in the following two cases:
468 * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
469 * page dirty flags when it copies back pages from the shadow cache
470 * (gcdat->{i_mapping,i_btnode_cache}) to its original cache
471 * (dat->{i_mapping,i_btnode_cache}).
473 * 2) Some B-tree operations like insertion or deletion may dispose buffers
474 * in dirty state, and this needs to cancel the dirty state of their pages.
476 int __nilfs_clear_page_dirty(struct page *page)
478 struct address_space *mapping = page->mapping;
481 spin_lock_irq(&mapping->tree_lock);
482 if (test_bit(PG_dirty, &page->flags)) {
483 radix_tree_tag_clear(&mapping->page_tree,
485 PAGECACHE_TAG_DIRTY);
486 spin_unlock_irq(&mapping->tree_lock);
487 return clear_page_dirty_for_io(page);
489 spin_unlock_irq(&mapping->tree_lock);
492 return TestClearPageDirty(page);
496 * nilfs_find_uncommitted_extent - find extent of uncommitted data
498 * @start_blk: start block offset (in)
499 * @blkoff: start offset of the found extent (out)
501 * This function searches an extent of buffers marked "delayed" which
502 * starts from a block offset equal to or larger than @start_blk. If
503 * such an extent was found, this will store the start offset in
504 * @blkoff and return its length in blocks. Otherwise, zero is
507 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
513 unsigned int nblocks_in_page;
514 unsigned long length = 0;
519 if (inode->i_mapping->nrpages == 0)
522 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
523 nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
525 pagevec_init(&pvec, 0);
528 pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
533 if (length > 0 && pvec.pages[0]->index > index)
536 b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
539 page = pvec.pages[i];
542 if (page_has_buffers(page)) {
543 struct buffer_head *bh, *head;
545 bh = head = page_buffers(page);
549 if (buffer_delay(bh)) {
553 } else if (length > 0) {
556 } while (++b, bh = bh->b_this_page, bh != head);
561 b += nblocks_in_page;
565 } while (++i < pagevec_count(&pvec));
567 index = page->index + 1;
568 pagevec_release(&pvec);
575 pagevec_release(&pvec);