4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
32 #define CREATE_TRACE_POINTS
33 #include <trace/events/f2fs.h>
35 static struct proc_dir_entry *f2fs_proc_root;
36 static struct kmem_cache *f2fs_inode_cachep;
40 Opt_disable_roll_forward,
46 Opt_disable_ext_identify,
50 static match_table_t f2fs_tokens = {
51 {Opt_gc_background, "background_gc=%s"},
52 {Opt_disable_roll_forward, "disable_roll_forward"},
53 {Opt_discard, "discard"},
54 {Opt_noheap, "no_heap"},
55 {Opt_nouser_xattr, "nouser_xattr"},
57 {Opt_active_logs, "active_logs=%u"},
58 {Opt_disable_ext_identify, "disable_ext_identify"},
62 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
70 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
74 static void init_once(void *foo)
76 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
78 inode_init_once(&fi->vfs_inode);
81 static int parse_options(struct super_block *sb, char *options)
83 struct f2fs_sb_info *sbi = F2FS_SB(sb);
84 substring_t args[MAX_OPT_ARGS];
91 while ((p = strsep(&options, ",")) != NULL) {
96 * Initialize args struct so we know whether arg was
97 * found; some options take optional arguments.
99 args[0].to = args[0].from = NULL;
100 token = match_token(p, f2fs_tokens, args);
103 case Opt_gc_background:
104 name = match_strdup(&args[0]);
108 if (!strncmp(name, "on", 2))
110 else if (!strncmp(name, "off", 3))
111 clear_opt(sbi, BG_GC);
118 case Opt_disable_roll_forward:
119 set_opt(sbi, DISABLE_ROLL_FORWARD);
122 set_opt(sbi, DISCARD);
125 set_opt(sbi, NOHEAP);
127 #ifdef CONFIG_F2FS_FS_XATTR
128 case Opt_nouser_xattr:
129 clear_opt(sbi, XATTR_USER);
132 case Opt_nouser_xattr:
133 f2fs_msg(sb, KERN_INFO,
134 "nouser_xattr options not supported");
137 #ifdef CONFIG_F2FS_FS_POSIX_ACL
139 clear_opt(sbi, POSIX_ACL);
143 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
146 case Opt_active_logs:
147 if (args->from && match_int(args, &arg))
149 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
151 sbi->active_logs = arg;
153 case Opt_disable_ext_identify:
154 set_opt(sbi, DISABLE_EXT_IDENTIFY);
157 f2fs_msg(sb, KERN_ERR,
158 "Unrecognized mount option \"%s\" or missing value",
166 static struct inode *f2fs_alloc_inode(struct super_block *sb)
168 struct f2fs_inode_info *fi;
170 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_NOFS | __GFP_ZERO);
174 init_once((void *) fi);
176 /* Initialize f2fs-specific inode info */
177 fi->vfs_inode.i_version = 1;
178 atomic_set(&fi->dirty_dents, 0);
179 fi->i_current_depth = 1;
181 rwlock_init(&fi->ext.ext_lock);
183 set_inode_flag(fi, FI_NEW_INODE);
185 return &fi->vfs_inode;
188 static int f2fs_drop_inode(struct inode *inode)
191 * This is to avoid a deadlock condition like below.
192 * writeback_single_inode(inode)
193 * - f2fs_write_data_page
194 * - f2fs_gc -> iput -> evict
195 * - inode_wait_for_writeback(inode)
197 if (!inode_unhashed(inode) && inode->i_state & I_SYNC)
199 return generic_drop_inode(inode);
203 * f2fs_dirty_inode() is called from __mark_inode_dirty()
205 * We should call set_dirty_inode to write the dirty inode through write_inode.
207 static void f2fs_dirty_inode(struct inode *inode, int flags)
209 set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
213 static void f2fs_i_callback(struct rcu_head *head)
215 struct inode *inode = container_of(head, struct inode, i_rcu);
216 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
219 static void f2fs_destroy_inode(struct inode *inode)
221 call_rcu(&inode->i_rcu, f2fs_i_callback);
224 static void f2fs_put_super(struct super_block *sb)
226 struct f2fs_sb_info *sbi = F2FS_SB(sb);
229 remove_proc_entry("segment_info", sbi->s_proc);
230 remove_proc_entry(sb->s_id, f2fs_proc_root);
233 f2fs_destroy_stats(sbi);
236 write_checkpoint(sbi, true);
238 iput(sbi->node_inode);
239 iput(sbi->meta_inode);
241 /* destroy f2fs internal modules */
242 destroy_node_manager(sbi);
243 destroy_segment_manager(sbi);
247 sb->s_fs_info = NULL;
248 brelse(sbi->raw_super_buf);
252 int f2fs_sync_fs(struct super_block *sb, int sync)
254 struct f2fs_sb_info *sbi = F2FS_SB(sb);
256 trace_f2fs_sync_fs(sb, sync);
258 if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
262 mutex_lock(&sbi->gc_mutex);
263 write_checkpoint(sbi, false);
264 mutex_unlock(&sbi->gc_mutex);
266 f2fs_balance_fs(sbi);
272 static int f2fs_freeze(struct super_block *sb)
276 if (f2fs_readonly(sb))
279 err = f2fs_sync_fs(sb, 1);
283 static int f2fs_unfreeze(struct super_block *sb)
288 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
290 struct super_block *sb = dentry->d_sb;
291 struct f2fs_sb_info *sbi = F2FS_SB(sb);
292 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
293 block_t total_count, user_block_count, start_count, ovp_count;
295 total_count = le64_to_cpu(sbi->raw_super->block_count);
296 user_block_count = sbi->user_block_count;
297 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
298 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
299 buf->f_type = F2FS_SUPER_MAGIC;
300 buf->f_bsize = sbi->blocksize;
302 buf->f_blocks = total_count - start_count;
303 buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
304 buf->f_bavail = user_block_count - valid_user_blocks(sbi);
306 buf->f_files = sbi->total_node_count;
307 buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi);
309 buf->f_namelen = F2FS_NAME_LEN;
310 buf->f_fsid.val[0] = (u32)id;
311 buf->f_fsid.val[1] = (u32)(id >> 32);
316 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
318 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
320 if (!(root->d_sb->s_flags & MS_RDONLY) && test_opt(sbi, BG_GC))
321 seq_printf(seq, ",background_gc=%s", "on");
323 seq_printf(seq, ",background_gc=%s", "off");
324 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
325 seq_puts(seq, ",disable_roll_forward");
326 if (test_opt(sbi, DISCARD))
327 seq_puts(seq, ",discard");
328 if (test_opt(sbi, NOHEAP))
329 seq_puts(seq, ",no_heap_alloc");
330 #ifdef CONFIG_F2FS_FS_XATTR
331 if (test_opt(sbi, XATTR_USER))
332 seq_puts(seq, ",user_xattr");
334 seq_puts(seq, ",nouser_xattr");
336 #ifdef CONFIG_F2FS_FS_POSIX_ACL
337 if (test_opt(sbi, POSIX_ACL))
338 seq_puts(seq, ",acl");
340 seq_puts(seq, ",noacl");
342 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
343 seq_puts(seq, ",disable_ext_identify");
345 seq_printf(seq, ",active_logs=%u", sbi->active_logs);
350 static int segment_info_seq_show(struct seq_file *seq, void *offset)
352 struct super_block *sb = seq->private;
353 struct f2fs_sb_info *sbi = F2FS_SB(sb);
354 unsigned int total_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
357 for (i = 0; i < total_segs; i++) {
358 seq_printf(seq, "%u", get_valid_blocks(sbi, i, 1));
359 if (i != 0 && (i % 10) == 0)
367 static int segment_info_open_fs(struct inode *inode, struct file *file)
369 return single_open(file, segment_info_seq_show, PDE_DATA(inode));
372 static const struct file_operations f2fs_seq_segment_info_fops = {
373 .owner = THIS_MODULE,
374 .open = segment_info_open_fs,
377 .release = single_release,
380 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
382 struct f2fs_sb_info *sbi = F2FS_SB(sb);
383 struct f2fs_mount_info org_mount_opt;
384 int err, active_logs;
387 * Save the old mount options in case we
388 * need to restore them.
390 org_mount_opt = sbi->mount_opt;
391 active_logs = sbi->active_logs;
393 /* parse mount options */
394 err = parse_options(sb, data);
399 * Previous and new state of filesystem is RO,
400 * so no point in checking GC conditions.
402 if ((sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY))
406 * We stop the GC thread if FS is mounted as RO
407 * or if background_gc = off is passed in mount
408 * option. Also sync the filesystem.
410 if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
411 if (sbi->gc_thread) {
415 } else if (test_opt(sbi, BG_GC) && !sbi->gc_thread) {
416 err = start_gc_thread(sbi);
421 /* Update the POSIXACL Flag */
422 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
423 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
427 sbi->mount_opt = org_mount_opt;
428 sbi->active_logs = active_logs;
432 static struct super_operations f2fs_sops = {
433 .alloc_inode = f2fs_alloc_inode,
434 .drop_inode = f2fs_drop_inode,
435 .destroy_inode = f2fs_destroy_inode,
436 .write_inode = f2fs_write_inode,
437 .dirty_inode = f2fs_dirty_inode,
438 .show_options = f2fs_show_options,
439 .evict_inode = f2fs_evict_inode,
440 .put_super = f2fs_put_super,
441 .sync_fs = f2fs_sync_fs,
442 .freeze_fs = f2fs_freeze,
443 .unfreeze_fs = f2fs_unfreeze,
444 .statfs = f2fs_statfs,
445 .remount_fs = f2fs_remount,
448 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
449 u64 ino, u32 generation)
451 struct f2fs_sb_info *sbi = F2FS_SB(sb);
454 if (ino < F2FS_ROOT_INO(sbi))
455 return ERR_PTR(-ESTALE);
458 * f2fs_iget isn't quite right if the inode is currently unallocated!
459 * However f2fs_iget currently does appropriate checks to handle stale
460 * inodes so everything is OK.
462 inode = f2fs_iget(sb, ino);
464 return ERR_CAST(inode);
465 if (generation && inode->i_generation != generation) {
466 /* we didn't find the right inode.. */
468 return ERR_PTR(-ESTALE);
473 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
474 int fh_len, int fh_type)
476 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
480 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
481 int fh_len, int fh_type)
483 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
487 static const struct export_operations f2fs_export_ops = {
488 .fh_to_dentry = f2fs_fh_to_dentry,
489 .fh_to_parent = f2fs_fh_to_parent,
490 .get_parent = f2fs_get_parent,
493 static loff_t max_file_size(unsigned bits)
495 loff_t result = ADDRS_PER_INODE;
496 loff_t leaf_count = ADDRS_PER_BLOCK;
498 /* two direct node blocks */
499 result += (leaf_count * 2);
501 /* two indirect node blocks */
502 leaf_count *= NIDS_PER_BLOCK;
503 result += (leaf_count * 2);
505 /* one double indirect node block */
506 leaf_count *= NIDS_PER_BLOCK;
507 result += leaf_count;
513 static int sanity_check_raw_super(struct super_block *sb,
514 struct f2fs_super_block *raw_super)
516 unsigned int blocksize;
518 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
519 f2fs_msg(sb, KERN_INFO,
520 "Magic Mismatch, valid(0x%x) - read(0x%x)",
521 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
525 /* Currently, support only 4KB page cache size */
526 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
527 f2fs_msg(sb, KERN_INFO,
528 "Invalid page_cache_size (%lu), supports only 4KB\n",
533 /* Currently, support only 4KB block size */
534 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
535 if (blocksize != F2FS_BLKSIZE) {
536 f2fs_msg(sb, KERN_INFO,
537 "Invalid blocksize (%u), supports only 4KB\n",
542 if (le32_to_cpu(raw_super->log_sectorsize) !=
543 F2FS_LOG_SECTOR_SIZE) {
544 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
547 if (le32_to_cpu(raw_super->log_sectors_per_block) !=
548 F2FS_LOG_SECTORS_PER_BLOCK) {
549 f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block");
555 static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
557 unsigned int total, fsmeta;
558 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
559 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
561 total = le32_to_cpu(raw_super->segment_count);
562 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
563 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
564 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
565 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
566 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
571 if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
572 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
578 static void init_sb_info(struct f2fs_sb_info *sbi)
580 struct f2fs_super_block *raw_super = sbi->raw_super;
583 sbi->log_sectors_per_block =
584 le32_to_cpu(raw_super->log_sectors_per_block);
585 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
586 sbi->blocksize = 1 << sbi->log_blocksize;
587 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
588 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
589 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
590 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
591 sbi->total_sections = le32_to_cpu(raw_super->section_count);
592 sbi->total_node_count =
593 (le32_to_cpu(raw_super->segment_count_nat) / 2)
594 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
595 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
596 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
597 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
598 sbi->cur_victim_sec = NULL_SECNO;
600 for (i = 0; i < NR_COUNT_TYPE; i++)
601 atomic_set(&sbi->nr_pages[i], 0);
604 static int validate_superblock(struct super_block *sb,
605 struct f2fs_super_block **raw_super,
606 struct buffer_head **raw_super_buf, sector_t block)
608 const char *super = (block == 0 ? "first" : "second");
610 /* read f2fs raw super block */
611 *raw_super_buf = sb_bread(sb, block);
612 if (!*raw_super_buf) {
613 f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
618 *raw_super = (struct f2fs_super_block *)
619 ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
621 /* sanity checking of raw super */
622 if (!sanity_check_raw_super(sb, *raw_super))
625 f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
626 "in %s superblock", super);
630 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
632 struct f2fs_sb_info *sbi;
633 struct f2fs_super_block *raw_super;
634 struct buffer_head *raw_super_buf;
639 /* allocate memory for f2fs-specific super block info */
640 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
644 /* set a block size */
645 if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
646 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
650 err = validate_superblock(sb, &raw_super, &raw_super_buf, 0);
652 brelse(raw_super_buf);
653 /* check secondary superblock when primary failed */
654 err = validate_superblock(sb, &raw_super, &raw_super_buf, 1);
659 /* init some FS parameters */
660 sbi->active_logs = NR_CURSEG_TYPE;
664 #ifdef CONFIG_F2FS_FS_XATTR
665 set_opt(sbi, XATTR_USER);
667 #ifdef CONFIG_F2FS_FS_POSIX_ACL
668 set_opt(sbi, POSIX_ACL);
670 /* parse mount options */
671 err = parse_options(sb, (char *)data);
675 sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
676 sb->s_max_links = F2FS_LINK_MAX;
677 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
679 sb->s_op = &f2fs_sops;
680 sb->s_xattr = f2fs_xattr_handlers;
681 sb->s_export_op = &f2fs_export_ops;
682 sb->s_magic = F2FS_SUPER_MAGIC;
684 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
685 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
686 memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
688 /* init f2fs-specific super block info */
690 sbi->raw_super = raw_super;
691 sbi->raw_super_buf = raw_super_buf;
692 mutex_init(&sbi->gc_mutex);
693 mutex_init(&sbi->writepages);
694 mutex_init(&sbi->cp_mutex);
695 for (i = 0; i < NR_GLOBAL_LOCKS; i++)
696 mutex_init(&sbi->fs_lock[i]);
697 mutex_init(&sbi->node_write);
699 spin_lock_init(&sbi->stat_lock);
700 init_rwsem(&sbi->bio_sem);
703 /* get an inode for meta space */
704 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
705 if (IS_ERR(sbi->meta_inode)) {
706 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
707 err = PTR_ERR(sbi->meta_inode);
711 err = get_valid_checkpoint(sbi);
713 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
714 goto free_meta_inode;
717 /* sanity checking of checkpoint */
719 if (sanity_check_ckpt(sbi)) {
720 f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
724 sbi->total_valid_node_count =
725 le32_to_cpu(sbi->ckpt->valid_node_count);
726 sbi->total_valid_inode_count =
727 le32_to_cpu(sbi->ckpt->valid_inode_count);
728 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
729 sbi->total_valid_block_count =
730 le64_to_cpu(sbi->ckpt->valid_block_count);
731 sbi->last_valid_block_count = sbi->total_valid_block_count;
732 sbi->alloc_valid_block_count = 0;
733 INIT_LIST_HEAD(&sbi->dir_inode_list);
734 spin_lock_init(&sbi->dir_inode_lock);
736 init_orphan_info(sbi);
738 /* setup f2fs internal modules */
739 err = build_segment_manager(sbi);
741 f2fs_msg(sb, KERN_ERR,
742 "Failed to initialize F2FS segment manager");
745 err = build_node_manager(sbi);
747 f2fs_msg(sb, KERN_ERR,
748 "Failed to initialize F2FS node manager");
752 build_gc_manager(sbi);
754 /* get an inode for node space */
755 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
756 if (IS_ERR(sbi->node_inode)) {
757 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
758 err = PTR_ERR(sbi->node_inode);
762 /* if there are nt orphan nodes free them */
764 if (recover_orphan_inodes(sbi))
765 goto free_node_inode;
767 /* read root inode and dentry */
768 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
770 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
772 goto free_node_inode;
774 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
775 goto free_root_inode;
777 sb->s_root = d_make_root(root); /* allocate root dentry */
780 goto free_root_inode;
783 /* recover fsynced data */
784 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
785 err = recover_fsync_data(sbi);
787 f2fs_msg(sb, KERN_ERR,
788 "Cannot recover all fsync data errno=%ld", err);
792 * If filesystem is not mounted as read-only then
793 * do start the gc_thread.
795 if (!(sb->s_flags & MS_RDONLY)) {
796 /* After POR, we can run background GC thread.*/
797 err = start_gc_thread(sbi);
802 err = f2fs_build_stats(sbi);
807 sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
810 proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
811 &f2fs_seq_segment_info_fops, sb);
813 if (test_opt(sbi, DISCARD)) {
814 struct request_queue *q = bdev_get_queue(sb->s_bdev);
815 if (!blk_queue_discard(q))
816 f2fs_msg(sb, KERN_WARNING,
817 "mounting with \"discard\" option, but "
818 "the device does not support discard");
828 iput(sbi->node_inode);
830 destroy_node_manager(sbi);
832 destroy_segment_manager(sbi);
836 make_bad_inode(sbi->meta_inode);
837 iput(sbi->meta_inode);
839 brelse(raw_super_buf);
845 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
846 const char *dev_name, void *data)
848 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
851 static struct file_system_type f2fs_fs_type = {
852 .owner = THIS_MODULE,
855 .kill_sb = kill_block_super,
856 .fs_flags = FS_REQUIRES_DEV,
858 MODULE_ALIAS_FS("f2fs");
860 static int __init init_inodecache(void)
862 f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
863 sizeof(struct f2fs_inode_info), NULL);
864 if (f2fs_inode_cachep == NULL)
869 static void destroy_inodecache(void)
872 * Make sure all delayed rcu free inodes are flushed before we
876 kmem_cache_destroy(f2fs_inode_cachep);
879 static int __init init_f2fs_fs(void)
883 err = init_inodecache();
886 err = create_node_manager_caches();
889 err = create_gc_caches();
892 err = create_checkpoint_caches();
895 err = register_filesystem(&f2fs_fs_type);
898 f2fs_create_root_stats();
899 f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
904 static void __exit exit_f2fs_fs(void)
906 remove_proc_entry("fs/f2fs", NULL);
907 f2fs_destroy_root_stats();
908 unregister_filesystem(&f2fs_fs_type);
909 destroy_checkpoint_caches();
911 destroy_node_manager_caches();
912 destroy_inodecache();
915 module_init(init_f2fs_fs)
916 module_exit(exit_f2fs_fs)
918 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
919 MODULE_DESCRIPTION("Flash Friendly File System");
920 MODULE_LICENSE("GPL");