2 * inode.c - NILFS inode operations.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * Written by Ryusuke Konishi.
20 #include <linux/buffer_head.h>
21 #include <linux/gfp.h>
22 #include <linux/mpage.h>
23 #include <linux/pagemap.h>
24 #include <linux/writeback.h>
25 #include <linux/uio.h>
35 * struct nilfs_iget_args - arguments used during comparison between inodes
37 * @cno: checkpoint number
38 * @root: pointer on NILFS root object (mounted checkpoint)
39 * @for_gc: inode for GC flag
41 struct nilfs_iget_args {
44 struct nilfs_root *root;
48 static int nilfs_iget_test(struct inode *inode, void *opaque);
50 void nilfs_inode_add_blocks(struct inode *inode, int n)
52 struct nilfs_root *root = NILFS_I(inode)->i_root;
54 inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
56 atomic64_add(n, &root->blocks_count);
59 void nilfs_inode_sub_blocks(struct inode *inode, int n)
61 struct nilfs_root *root = NILFS_I(inode)->i_root;
63 inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
65 atomic64_sub(n, &root->blocks_count);
69 * nilfs_get_block() - get a file block on the filesystem (callback function)
70 * @inode - inode struct of the target file
71 * @blkoff - file block number
72 * @bh_result - buffer head to be mapped on
73 * @create - indicate whether allocating the block or not when it has not
76 * This function does not issue actual read request of the specified data
77 * block. It is done by VFS.
79 int nilfs_get_block(struct inode *inode, sector_t blkoff,
80 struct buffer_head *bh_result, int create)
82 struct nilfs_inode_info *ii = NILFS_I(inode);
83 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
86 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
88 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
89 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
90 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
91 if (ret >= 0) { /* found */
92 map_bh(bh_result, inode->i_sb, blknum);
94 bh_result->b_size = (ret << inode->i_blkbits);
97 /* data block was not found */
98 if (ret == -ENOENT && create) {
99 struct nilfs_transaction_info ti;
101 bh_result->b_blocknr = 0;
102 err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
105 err = nilfs_bmap_insert(ii->i_bmap, blkoff,
106 (unsigned long)bh_result);
107 if (unlikely(err != 0)) {
108 if (err == -EEXIST) {
110 * The get_block() function could be called
111 * from multiple callers for an inode.
112 * However, the page having this block must
113 * be locked in this case.
116 "nilfs_get_block: a race condition "
117 "while inserting a data block. "
118 "(inode number=%lu, file block "
121 (unsigned long long)blkoff);
124 nilfs_transaction_abort(inode->i_sb);
127 nilfs_mark_inode_dirty_sync(inode);
128 nilfs_transaction_commit(inode->i_sb); /* never fails */
129 /* Error handling should be detailed */
130 set_buffer_new(bh_result);
131 set_buffer_delay(bh_result);
132 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
134 } else if (ret == -ENOENT) {
135 /* not found is not error (e.g. hole); must return without
136 the mapped state flag. */
147 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
148 * address_space_operations.
149 * @file - file struct of the file to be read
150 * @page - the page to be read
152 static int nilfs_readpage(struct file *file, struct page *page)
154 return mpage_readpage(page, nilfs_get_block);
158 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
159 * address_space_operations.
160 * @file - file struct of the file to be read
161 * @mapping - address_space struct used for reading multiple pages
162 * @pages - the pages to be read
163 * @nr_pages - number of pages to be read
165 static int nilfs_readpages(struct file *file, struct address_space *mapping,
166 struct list_head *pages, unsigned nr_pages)
168 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
171 static int nilfs_writepages(struct address_space *mapping,
172 struct writeback_control *wbc)
174 struct inode *inode = mapping->host;
177 if (inode->i_sb->s_flags & MS_RDONLY) {
178 nilfs_clear_dirty_pages(mapping, false);
182 if (wbc->sync_mode == WB_SYNC_ALL)
183 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
189 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
191 struct inode *inode = page->mapping->host;
194 if (inode->i_sb->s_flags & MS_RDONLY) {
196 * It means that filesystem was remounted in read-only
197 * mode because of error or metadata corruption. But we
198 * have dirty pages that try to be flushed in background.
199 * So, here we simply discard this dirty page.
201 nilfs_clear_dirty_page(page, false);
206 redirty_page_for_writepage(wbc, page);
209 if (wbc->sync_mode == WB_SYNC_ALL) {
210 err = nilfs_construct_segment(inode->i_sb);
213 } else if (wbc->for_reclaim)
214 nilfs_flush_segment(inode->i_sb, inode->i_ino);
219 static int nilfs_set_page_dirty(struct page *page)
221 struct inode *inode = page->mapping->host;
222 int ret = __set_page_dirty_nobuffers(page);
224 if (page_has_buffers(page)) {
225 unsigned nr_dirty = 0;
226 struct buffer_head *bh, *head;
229 * This page is locked by callers, and no other thread
230 * concurrently marks its buffers dirty since they are
231 * only dirtied through routines in fs/buffer.c in
232 * which call sites of mark_buffer_dirty are protected
235 bh = head = page_buffers(page);
237 /* Do not mark hole blocks dirty */
238 if (buffer_dirty(bh) || !buffer_mapped(bh))
241 set_buffer_dirty(bh);
243 } while (bh = bh->b_this_page, bh != head);
246 nilfs_set_file_dirty(inode, nr_dirty);
248 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
250 nilfs_set_file_dirty(inode, nr_dirty);
255 void nilfs_write_failed(struct address_space *mapping, loff_t to)
257 struct inode *inode = mapping->host;
259 if (to > inode->i_size) {
260 truncate_pagecache(inode, inode->i_size);
261 nilfs_truncate(inode);
265 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
266 loff_t pos, unsigned len, unsigned flags,
267 struct page **pagep, void **fsdata)
270 struct inode *inode = mapping->host;
271 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
276 err = block_write_begin(mapping, pos, len, flags, pagep,
279 nilfs_write_failed(mapping, pos + len);
280 nilfs_transaction_abort(inode->i_sb);
285 static int nilfs_write_end(struct file *file, struct address_space *mapping,
286 loff_t pos, unsigned len, unsigned copied,
287 struct page *page, void *fsdata)
289 struct inode *inode = mapping->host;
290 unsigned start = pos & (PAGE_SIZE - 1);
294 nr_dirty = nilfs_page_count_clean_buffers(page, start,
296 copied = generic_write_end(file, mapping, pos, len, copied, page,
298 nilfs_set_file_dirty(inode, nr_dirty);
299 err = nilfs_transaction_commit(inode->i_sb);
300 return err ? : copied;
304 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
306 struct inode *inode = file_inode(iocb->ki_filp);
308 if (iov_iter_rw(iter) == WRITE)
311 /* Needs synchronization with the cleaner */
312 return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
315 const struct address_space_operations nilfs_aops = {
316 .writepage = nilfs_writepage,
317 .readpage = nilfs_readpage,
318 .writepages = nilfs_writepages,
319 .set_page_dirty = nilfs_set_page_dirty,
320 .readpages = nilfs_readpages,
321 .write_begin = nilfs_write_begin,
322 .write_end = nilfs_write_end,
323 /* .releasepage = nilfs_releasepage, */
324 .invalidatepage = block_invalidatepage,
325 .direct_IO = nilfs_direct_IO,
326 .is_partially_uptodate = block_is_partially_uptodate,
329 static int nilfs_insert_inode_locked(struct inode *inode,
330 struct nilfs_root *root,
333 struct nilfs_iget_args args = {
334 .ino = ino, .root = root, .cno = 0, .for_gc = 0
337 return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
340 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
342 struct super_block *sb = dir->i_sb;
343 struct the_nilfs *nilfs = sb->s_fs_info;
345 struct nilfs_inode_info *ii;
346 struct nilfs_root *root;
350 inode = new_inode(sb);
351 if (unlikely(!inode))
354 mapping_set_gfp_mask(inode->i_mapping,
355 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
357 root = NILFS_I(dir)->i_root;
359 ii->i_state = 1 << NILFS_I_NEW;
362 err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
364 goto failed_ifile_create_inode;
365 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
367 atomic64_inc(&root->inodes_count);
368 inode_init_owner(inode, dir, mode);
370 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
372 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
373 err = nilfs_bmap_read(ii->i_bmap, NULL);
375 goto failed_after_creation;
377 set_bit(NILFS_I_BMAP, &ii->i_state);
378 /* No lock is needed; iget() ensures it. */
381 ii->i_flags = nilfs_mask_flags(
382 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
384 /* ii->i_file_acl = 0; */
385 /* ii->i_dir_acl = 0; */
386 ii->i_dir_start_lookup = 0;
387 nilfs_set_inode_flags(inode);
388 spin_lock(&nilfs->ns_next_gen_lock);
389 inode->i_generation = nilfs->ns_next_generation++;
390 spin_unlock(&nilfs->ns_next_gen_lock);
391 if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
393 goto failed_after_creation;
396 err = nilfs_init_acl(inode, dir);
398 goto failed_after_creation; /* never occur. When supporting
399 nilfs_init_acl(), proper cancellation of
400 above jobs should be considered */
404 failed_after_creation:
406 unlock_new_inode(inode);
407 iput(inode); /* raw_inode will be deleted through
408 nilfs_evict_inode() */
411 failed_ifile_create_inode:
412 make_bad_inode(inode);
413 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be
419 void nilfs_set_inode_flags(struct inode *inode)
421 unsigned int flags = NILFS_I(inode)->i_flags;
422 unsigned int new_fl = 0;
424 if (flags & FS_SYNC_FL)
426 if (flags & FS_APPEND_FL)
428 if (flags & FS_IMMUTABLE_FL)
429 new_fl |= S_IMMUTABLE;
430 if (flags & FS_NOATIME_FL)
432 if (flags & FS_DIRSYNC_FL)
434 inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
435 S_NOATIME | S_DIRSYNC);
438 int nilfs_read_inode_common(struct inode *inode,
439 struct nilfs_inode *raw_inode)
441 struct nilfs_inode_info *ii = NILFS_I(inode);
444 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
445 i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
446 i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
447 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
448 inode->i_size = le64_to_cpu(raw_inode->i_size);
449 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
450 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
451 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
452 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
453 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
454 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
455 if (inode->i_nlink == 0)
456 return -ESTALE; /* this inode is deleted */
458 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
459 ii->i_flags = le32_to_cpu(raw_inode->i_flags);
461 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
462 ii->i_dir_acl = S_ISREG(inode->i_mode) ?
463 0 : le32_to_cpu(raw_inode->i_dir_acl);
465 ii->i_dir_start_lookup = 0;
466 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
468 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
469 S_ISLNK(inode->i_mode)) {
470 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
473 set_bit(NILFS_I_BMAP, &ii->i_state);
474 /* No lock is needed; iget() ensures it. */
479 static int __nilfs_read_inode(struct super_block *sb,
480 struct nilfs_root *root, unsigned long ino,
483 struct the_nilfs *nilfs = sb->s_fs_info;
484 struct buffer_head *bh;
485 struct nilfs_inode *raw_inode;
488 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
489 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
493 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
495 err = nilfs_read_inode_common(inode, raw_inode);
499 if (S_ISREG(inode->i_mode)) {
500 inode->i_op = &nilfs_file_inode_operations;
501 inode->i_fop = &nilfs_file_operations;
502 inode->i_mapping->a_ops = &nilfs_aops;
503 } else if (S_ISDIR(inode->i_mode)) {
504 inode->i_op = &nilfs_dir_inode_operations;
505 inode->i_fop = &nilfs_dir_operations;
506 inode->i_mapping->a_ops = &nilfs_aops;
507 } else if (S_ISLNK(inode->i_mode)) {
508 inode->i_op = &nilfs_symlink_inode_operations;
509 inode_nohighmem(inode);
510 inode->i_mapping->a_ops = &nilfs_aops;
512 inode->i_op = &nilfs_special_inode_operations;
514 inode, inode->i_mode,
515 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
517 nilfs_ifile_unmap_inode(root->ifile, ino, bh);
519 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
520 nilfs_set_inode_flags(inode);
521 mapping_set_gfp_mask(inode->i_mapping,
522 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
526 nilfs_ifile_unmap_inode(root->ifile, ino, bh);
530 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
534 static int nilfs_iget_test(struct inode *inode, void *opaque)
536 struct nilfs_iget_args *args = opaque;
537 struct nilfs_inode_info *ii;
539 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
543 if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
544 return !args->for_gc;
546 return args->for_gc && args->cno == ii->i_cno;
549 static int nilfs_iget_set(struct inode *inode, void *opaque)
551 struct nilfs_iget_args *args = opaque;
553 inode->i_ino = args->ino;
555 NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
556 NILFS_I(inode)->i_cno = args->cno;
557 NILFS_I(inode)->i_root = NULL;
559 if (args->root && args->ino == NILFS_ROOT_INO)
560 nilfs_get_root(args->root);
561 NILFS_I(inode)->i_root = args->root;
566 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
569 struct nilfs_iget_args args = {
570 .ino = ino, .root = root, .cno = 0, .for_gc = 0
573 return ilookup5(sb, ino, nilfs_iget_test, &args);
576 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
579 struct nilfs_iget_args args = {
580 .ino = ino, .root = root, .cno = 0, .for_gc = 0
583 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
586 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
592 inode = nilfs_iget_locked(sb, root, ino);
593 if (unlikely(!inode))
594 return ERR_PTR(-ENOMEM);
595 if (!(inode->i_state & I_NEW))
598 err = __nilfs_read_inode(sb, root, ino, inode);
603 unlock_new_inode(inode);
607 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
610 struct nilfs_iget_args args = {
611 .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
616 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
617 if (unlikely(!inode))
618 return ERR_PTR(-ENOMEM);
619 if (!(inode->i_state & I_NEW))
622 err = nilfs_init_gcinode(inode);
627 unlock_new_inode(inode);
631 void nilfs_write_inode_common(struct inode *inode,
632 struct nilfs_inode *raw_inode, int has_bmap)
634 struct nilfs_inode_info *ii = NILFS_I(inode);
636 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
637 raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
638 raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
639 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
640 raw_inode->i_size = cpu_to_le64(inode->i_size);
641 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
642 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
643 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
644 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
645 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
647 raw_inode->i_flags = cpu_to_le32(ii->i_flags);
648 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
650 if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
651 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
653 /* zero-fill unused portion in the case of super root block */
654 raw_inode->i_xattr = 0;
655 raw_inode->i_pad = 0;
656 memset((void *)raw_inode + sizeof(*raw_inode), 0,
657 nilfs->ns_inode_size - sizeof(*raw_inode));
661 nilfs_bmap_write(ii->i_bmap, raw_inode);
662 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
663 raw_inode->i_device_code =
664 cpu_to_le64(huge_encode_dev(inode->i_rdev));
665 /* When extending inode, nilfs->ns_inode_size should be checked
666 for substitutions of appended fields */
669 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
671 ino_t ino = inode->i_ino;
672 struct nilfs_inode_info *ii = NILFS_I(inode);
673 struct inode *ifile = ii->i_root->ifile;
674 struct nilfs_inode *raw_inode;
676 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
678 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
679 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
680 if (flags & I_DIRTY_DATASYNC)
681 set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
683 nilfs_write_inode_common(inode, raw_inode, 0);
684 /* XXX: call with has_bmap = 0 is a workaround to avoid
685 deadlock of bmap. This delays update of i_bmap to just
687 nilfs_ifile_unmap_inode(ifile, ino, ibh);
690 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
692 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
698 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
701 ret = nilfs_bmap_last_key(ii->i_bmap, &b);
710 b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
711 ret = nilfs_bmap_truncate(ii->i_bmap, b);
712 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
713 if (!ret || (ret == -ENOMEM &&
714 nilfs_bmap_truncate(ii->i_bmap, b) == 0))
718 nilfs_warning(ii->vfs_inode.i_sb, __func__,
719 "failed to truncate bmap (ino=%lu, err=%d)",
720 ii->vfs_inode.i_ino, ret);
723 void nilfs_truncate(struct inode *inode)
725 unsigned long blkoff;
726 unsigned int blocksize;
727 struct nilfs_transaction_info ti;
728 struct super_block *sb = inode->i_sb;
729 struct nilfs_inode_info *ii = NILFS_I(inode);
731 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
733 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
736 blocksize = sb->s_blocksize;
737 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
738 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
740 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
742 nilfs_truncate_bmap(ii, blkoff);
744 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
746 nilfs_set_transaction_flag(NILFS_TI_SYNC);
748 nilfs_mark_inode_dirty(inode);
749 nilfs_set_file_dirty(inode, 0);
750 nilfs_transaction_commit(sb);
751 /* May construct a logical segment and may fail in sync mode.
752 But truncate has no return value. */
755 static void nilfs_clear_inode(struct inode *inode)
757 struct nilfs_inode_info *ii = NILFS_I(inode);
760 * Free resources allocated in nilfs_read_inode(), here.
762 BUG_ON(!list_empty(&ii->i_dirty));
766 if (nilfs_is_metadata_file_inode(inode))
767 nilfs_mdt_clear(inode);
769 if (test_bit(NILFS_I_BMAP, &ii->i_state))
770 nilfs_bmap_clear(ii->i_bmap);
772 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
774 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
775 nilfs_put_root(ii->i_root);
778 void nilfs_evict_inode(struct inode *inode)
780 struct nilfs_transaction_info ti;
781 struct super_block *sb = inode->i_sb;
782 struct nilfs_inode_info *ii = NILFS_I(inode);
785 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
786 truncate_inode_pages_final(&inode->i_data);
788 nilfs_clear_inode(inode);
791 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
793 truncate_inode_pages_final(&inode->i_data);
795 /* TODO: some of the following operations may fail. */
796 nilfs_truncate_bmap(ii, 0);
797 nilfs_mark_inode_dirty(inode);
800 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
802 atomic64_dec(&ii->i_root->inodes_count);
804 nilfs_clear_inode(inode);
807 nilfs_set_transaction_flag(NILFS_TI_SYNC);
808 nilfs_transaction_commit(sb);
809 /* May construct a logical segment and may fail in sync mode.
810 But delete_inode has no return value. */
813 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
815 struct nilfs_transaction_info ti;
816 struct inode *inode = d_inode(dentry);
817 struct super_block *sb = inode->i_sb;
820 err = inode_change_ok(inode, iattr);
824 err = nilfs_transaction_begin(sb, &ti, 0);
828 if ((iattr->ia_valid & ATTR_SIZE) &&
829 iattr->ia_size != i_size_read(inode)) {
830 inode_dio_wait(inode);
831 truncate_setsize(inode, iattr->ia_size);
832 nilfs_truncate(inode);
835 setattr_copy(inode, iattr);
836 mark_inode_dirty(inode);
838 if (iattr->ia_valid & ATTR_MODE) {
839 err = nilfs_acl_chmod(inode);
844 return nilfs_transaction_commit(sb);
847 nilfs_transaction_abort(sb);
851 int nilfs_permission(struct inode *inode, int mask)
853 struct nilfs_root *root = NILFS_I(inode)->i_root;
855 if ((mask & MAY_WRITE) && root &&
856 root->cno != NILFS_CPTREE_CURRENT_CNO)
857 return -EROFS; /* snapshot is not writable */
859 return generic_permission(inode, mask);
862 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
864 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
865 struct nilfs_inode_info *ii = NILFS_I(inode);
868 spin_lock(&nilfs->ns_inode_lock);
869 if (ii->i_bh == NULL) {
870 spin_unlock(&nilfs->ns_inode_lock);
871 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
875 spin_lock(&nilfs->ns_inode_lock);
876 if (ii->i_bh == NULL)
886 spin_unlock(&nilfs->ns_inode_lock);
890 int nilfs_inode_dirty(struct inode *inode)
892 struct nilfs_inode_info *ii = NILFS_I(inode);
893 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
896 if (!list_empty(&ii->i_dirty)) {
897 spin_lock(&nilfs->ns_inode_lock);
898 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
899 test_bit(NILFS_I_BUSY, &ii->i_state);
900 spin_unlock(&nilfs->ns_inode_lock);
905 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
907 struct nilfs_inode_info *ii = NILFS_I(inode);
908 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
910 atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
912 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
915 spin_lock(&nilfs->ns_inode_lock);
916 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
917 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
918 /* Because this routine may race with nilfs_dispose_list(),
919 we have to check NILFS_I_QUEUED here, too. */
920 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
921 /* This will happen when somebody is freeing
923 nilfs_warning(inode->i_sb, __func__,
924 "cannot get inode (ino=%lu)",
926 spin_unlock(&nilfs->ns_inode_lock);
927 return -EINVAL; /* NILFS_I_DIRTY may remain for
930 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
931 set_bit(NILFS_I_QUEUED, &ii->i_state);
933 spin_unlock(&nilfs->ns_inode_lock);
937 int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
939 struct buffer_head *ibh;
942 err = nilfs_load_inode_block(inode, &ibh);
944 nilfs_warning(inode->i_sb, __func__,
945 "failed to reget inode block.");
948 nilfs_update_inode(inode, ibh, flags);
949 mark_buffer_dirty(ibh);
950 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
956 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
957 * @inode: inode of the file to be registered.
959 * nilfs_dirty_inode() loads a inode block containing the specified
960 * @inode and copies data from a nilfs_inode to a corresponding inode
961 * entry in the inode block. This operation is excluded from the segment
962 * construction. This function can be called both as a single operation
963 * and as a part of indivisible file operations.
965 void nilfs_dirty_inode(struct inode *inode, int flags)
967 struct nilfs_transaction_info ti;
968 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
970 if (is_bad_inode(inode)) {
971 nilfs_warning(inode->i_sb, __func__,
972 "tried to mark bad_inode dirty. ignored.");
977 nilfs_mdt_mark_dirty(inode);
980 nilfs_transaction_begin(inode->i_sb, &ti, 0);
981 __nilfs_mark_inode_dirty(inode, flags);
982 nilfs_transaction_commit(inode->i_sb); /* never fails */
985 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
986 __u64 start, __u64 len)
988 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
989 __u64 logical = 0, phys = 0, size = 0;
992 sector_t blkoff, end_blkoff;
993 sector_t delalloc_blkoff;
994 unsigned long delalloc_blklen;
995 unsigned int blkbits = inode->i_blkbits;
998 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1004 isize = i_size_read(inode);
1006 blkoff = start >> blkbits;
1007 end_blkoff = (start + len - 1) >> blkbits;
1009 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1014 unsigned int maxblocks;
1016 if (delalloc_blklen && blkoff == delalloc_blkoff) {
1018 /* End of the current extent */
1019 ret = fiemap_fill_next_extent(
1020 fieinfo, logical, phys, size, flags);
1024 if (blkoff > end_blkoff)
1027 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1028 logical = blkoff << blkbits;
1030 size = delalloc_blklen << blkbits;
1032 blkoff = delalloc_blkoff + delalloc_blklen;
1033 delalloc_blklen = nilfs_find_uncommitted_extent(
1034 inode, blkoff, &delalloc_blkoff);
1039 * Limit the number of blocks that we look up so as
1040 * not to get into the next delayed allocation extent.
1042 maxblocks = INT_MAX;
1043 if (delalloc_blklen)
1044 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1048 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1049 n = nilfs_bmap_lookup_contig(
1050 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1051 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1056 if (unlikely(n != -ENOENT))
1061 past_eof = ((blkoff << blkbits) >= isize);
1064 /* End of the current extent */
1067 flags |= FIEMAP_EXTENT_LAST;
1069 ret = fiemap_fill_next_extent(
1070 fieinfo, logical, phys, size, flags);
1075 if (blkoff > end_blkoff || past_eof)
1079 if (phys && blkphy << blkbits == phys + size) {
1080 /* The current extent goes on */
1081 size += n << blkbits;
1083 /* Terminate the current extent */
1084 ret = fiemap_fill_next_extent(
1085 fieinfo, logical, phys, size,
1087 if (ret || blkoff > end_blkoff)
1090 /* Start another extent */
1091 flags = FIEMAP_EXTENT_MERGED;
1092 logical = blkoff << blkbits;
1093 phys = blkphy << blkbits;
1094 size = n << blkbits;
1097 /* Start a new extent */
1098 flags = FIEMAP_EXTENT_MERGED;
1099 logical = blkoff << blkbits;
1100 phys = blkphy << blkbits;
1101 size = n << blkbits;
1108 /* If ret is 1 then we just hit the end of the extent array */
1112 inode_unlock(inode);