4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 #include <linux/sysfs.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
38 static struct proc_dir_entry *f2fs_proc_root;
39 static struct kmem_cache *f2fs_inode_cachep;
40 static struct kset *f2fs_kset;
42 /* f2fs-wide shrinker description */
43 static struct shrinker f2fs_shrinker_info = {
44 .scan_objects = f2fs_shrink_scan,
45 .count_objects = f2fs_shrink_count,
46 .seeks = DEFAULT_SEEKS,
51 Opt_disable_roll_forward,
60 Opt_disable_ext_identify,
74 static match_table_t f2fs_tokens = {
75 {Opt_gc_background, "background_gc=%s"},
76 {Opt_disable_roll_forward, "disable_roll_forward"},
77 {Opt_norecovery, "norecovery"},
78 {Opt_discard, "discard"},
79 {Opt_noheap, "no_heap"},
80 {Opt_user_xattr, "user_xattr"},
81 {Opt_nouser_xattr, "nouser_xattr"},
84 {Opt_active_logs, "active_logs=%u"},
85 {Opt_disable_ext_identify, "disable_ext_identify"},
86 {Opt_inline_xattr, "inline_xattr"},
87 {Opt_inline_data, "inline_data"},
88 {Opt_inline_dentry, "inline_dentry"},
89 {Opt_flush_merge, "flush_merge"},
90 {Opt_nobarrier, "nobarrier"},
91 {Opt_fastboot, "fastboot"},
92 {Opt_extent_cache, "extent_cache"},
93 {Opt_noextent_cache, "noextent_cache"},
94 {Opt_noinline_data, "noinline_data"},
95 {Opt_data_flush, "data_flush"},
99 /* Sysfs support for f2fs */
101 GC_THREAD, /* struct f2fs_gc_thread */
102 SM_INFO, /* struct f2fs_sm_info */
103 NM_INFO, /* struct f2fs_nm_info */
104 F2FS_SBI, /* struct f2fs_sb_info */
108 struct attribute attr;
109 ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
110 ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
111 const char *, size_t);
116 static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
118 if (struct_type == GC_THREAD)
119 return (unsigned char *)sbi->gc_thread;
120 else if (struct_type == SM_INFO)
121 return (unsigned char *)SM_I(sbi);
122 else if (struct_type == NM_INFO)
123 return (unsigned char *)NM_I(sbi);
124 else if (struct_type == F2FS_SBI)
125 return (unsigned char *)sbi;
129 static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
130 struct f2fs_sb_info *sbi, char *buf)
132 struct super_block *sb = sbi->sb;
134 if (!sb->s_bdev->bd_part)
135 return snprintf(buf, PAGE_SIZE, "0\n");
137 return snprintf(buf, PAGE_SIZE, "%llu\n",
138 (unsigned long long)(sbi->kbytes_written +
139 BD_PART_WRITTEN(sbi)));
142 static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
143 struct f2fs_sb_info *sbi, char *buf)
145 unsigned char *ptr = NULL;
148 ptr = __struct_ptr(sbi, a->struct_type);
152 ui = (unsigned int *)(ptr + a->offset);
154 return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
157 static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
158 struct f2fs_sb_info *sbi,
159 const char *buf, size_t count)
166 ptr = __struct_ptr(sbi, a->struct_type);
170 ui = (unsigned int *)(ptr + a->offset);
172 ret = kstrtoul(skip_spaces(buf), 0, &t);
179 static ssize_t f2fs_attr_show(struct kobject *kobj,
180 struct attribute *attr, char *buf)
182 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
184 struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
186 return a->show ? a->show(a, sbi, buf) : 0;
189 static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
190 const char *buf, size_t len)
192 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
194 struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
196 return a->store ? a->store(a, sbi, buf, len) : 0;
199 static void f2fs_sb_release(struct kobject *kobj)
201 struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
203 complete(&sbi->s_kobj_unregister);
206 #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
207 static struct f2fs_attr f2fs_attr_##_name = { \
208 .attr = {.name = __stringify(_name), .mode = _mode }, \
211 .struct_type = _struct_type, \
215 #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
216 F2FS_ATTR_OFFSET(struct_type, name, 0644, \
217 f2fs_sbi_show, f2fs_sbi_store, \
218 offsetof(struct struct_name, elname))
220 #define F2FS_GENERAL_RO_ATTR(name) \
221 static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
223 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
224 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
225 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
226 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
227 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
228 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
229 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
230 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
231 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
232 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
233 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
234 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
235 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
236 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
237 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
238 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
239 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
240 F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
242 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
243 static struct attribute *f2fs_attrs[] = {
244 ATTR_LIST(gc_min_sleep_time),
245 ATTR_LIST(gc_max_sleep_time),
246 ATTR_LIST(gc_no_gc_sleep_time),
248 ATTR_LIST(reclaim_segments),
249 ATTR_LIST(max_small_discards),
250 ATTR_LIST(batched_trim_sections),
251 ATTR_LIST(ipu_policy),
252 ATTR_LIST(min_ipu_util),
253 ATTR_LIST(min_fsync_blocks),
254 ATTR_LIST(max_victim_search),
255 ATTR_LIST(dir_level),
256 ATTR_LIST(ram_thresh),
257 ATTR_LIST(ra_nid_pages),
258 ATTR_LIST(dirty_nats_ratio),
259 ATTR_LIST(cp_interval),
260 ATTR_LIST(idle_interval),
261 ATTR_LIST(lifetime_write_kbytes),
265 static const struct sysfs_ops f2fs_attr_ops = {
266 .show = f2fs_attr_show,
267 .store = f2fs_attr_store,
270 static struct kobj_type f2fs_ktype = {
271 .default_attrs = f2fs_attrs,
272 .sysfs_ops = &f2fs_attr_ops,
273 .release = f2fs_sb_release,
276 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
278 struct va_format vaf;
284 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
288 static void init_once(void *foo)
290 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
292 inode_init_once(&fi->vfs_inode);
295 static int parse_options(struct super_block *sb, char *options)
297 struct f2fs_sb_info *sbi = F2FS_SB(sb);
298 struct request_queue *q;
299 substring_t args[MAX_OPT_ARGS];
306 while ((p = strsep(&options, ",")) != NULL) {
311 * Initialize args struct so we know whether arg was
312 * found; some options take optional arguments.
314 args[0].to = args[0].from = NULL;
315 token = match_token(p, f2fs_tokens, args);
318 case Opt_gc_background:
319 name = match_strdup(&args[0]);
323 if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
325 clear_opt(sbi, FORCE_FG_GC);
326 } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
327 clear_opt(sbi, BG_GC);
328 clear_opt(sbi, FORCE_FG_GC);
329 } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
331 set_opt(sbi, FORCE_FG_GC);
338 case Opt_disable_roll_forward:
339 set_opt(sbi, DISABLE_ROLL_FORWARD);
342 /* this option mounts f2fs with ro */
343 set_opt(sbi, DISABLE_ROLL_FORWARD);
344 if (!f2fs_readonly(sb))
348 q = bdev_get_queue(sb->s_bdev);
349 if (blk_queue_discard(q)) {
350 set_opt(sbi, DISCARD);
352 f2fs_msg(sb, KERN_WARNING,
353 "mounting with \"discard\" option, but "
354 "the device does not support discard");
358 set_opt(sbi, NOHEAP);
360 #ifdef CONFIG_F2FS_FS_XATTR
362 set_opt(sbi, XATTR_USER);
364 case Opt_nouser_xattr:
365 clear_opt(sbi, XATTR_USER);
367 case Opt_inline_xattr:
368 set_opt(sbi, INLINE_XATTR);
372 f2fs_msg(sb, KERN_INFO,
373 "user_xattr options not supported");
375 case Opt_nouser_xattr:
376 f2fs_msg(sb, KERN_INFO,
377 "nouser_xattr options not supported");
379 case Opt_inline_xattr:
380 f2fs_msg(sb, KERN_INFO,
381 "inline_xattr options not supported");
384 #ifdef CONFIG_F2FS_FS_POSIX_ACL
386 set_opt(sbi, POSIX_ACL);
389 clear_opt(sbi, POSIX_ACL);
393 f2fs_msg(sb, KERN_INFO, "acl options not supported");
396 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
399 case Opt_active_logs:
400 if (args->from && match_int(args, &arg))
402 if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
404 sbi->active_logs = arg;
406 case Opt_disable_ext_identify:
407 set_opt(sbi, DISABLE_EXT_IDENTIFY);
409 case Opt_inline_data:
410 set_opt(sbi, INLINE_DATA);
412 case Opt_inline_dentry:
413 set_opt(sbi, INLINE_DENTRY);
415 case Opt_flush_merge:
416 set_opt(sbi, FLUSH_MERGE);
419 set_opt(sbi, NOBARRIER);
422 set_opt(sbi, FASTBOOT);
424 case Opt_extent_cache:
425 set_opt(sbi, EXTENT_CACHE);
427 case Opt_noextent_cache:
428 clear_opt(sbi, EXTENT_CACHE);
430 case Opt_noinline_data:
431 clear_opt(sbi, INLINE_DATA);
434 set_opt(sbi, DATA_FLUSH);
437 f2fs_msg(sb, KERN_ERR,
438 "Unrecognized mount option \"%s\" or missing value",
446 static struct inode *f2fs_alloc_inode(struct super_block *sb)
448 struct f2fs_inode_info *fi;
450 fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
454 init_once((void *) fi);
456 /* Initialize f2fs-specific inode info */
457 fi->vfs_inode.i_version = 1;
458 atomic_set(&fi->dirty_pages, 0);
459 fi->i_current_depth = 1;
461 init_rwsem(&fi->i_sem);
462 INIT_LIST_HEAD(&fi->dirty_list);
463 INIT_LIST_HEAD(&fi->inmem_pages);
464 mutex_init(&fi->inmem_lock);
466 set_inode_flag(fi, FI_NEW_INODE);
468 if (test_opt(F2FS_SB(sb), INLINE_XATTR))
469 set_inode_flag(fi, FI_INLINE_XATTR);
471 /* Will be used by directory only */
472 fi->i_dir_level = F2FS_SB(sb)->dir_level;
474 #ifdef CONFIG_F2FS_FS_ENCRYPTION
475 fi->i_crypt_info = NULL;
477 return &fi->vfs_inode;
480 static int f2fs_drop_inode(struct inode *inode)
483 * This is to avoid a deadlock condition like below.
484 * writeback_single_inode(inode)
485 * - f2fs_write_data_page
486 * - f2fs_gc -> iput -> evict
487 * - inode_wait_for_writeback(inode)
489 if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
490 if (!inode->i_nlink && !is_bad_inode(inode)) {
491 /* to avoid evict_inode call simultaneously */
492 atomic_inc(&inode->i_count);
493 spin_unlock(&inode->i_lock);
495 /* some remained atomic pages should discarded */
496 if (f2fs_is_atomic_file(inode))
497 commit_inmem_pages(inode, true);
499 /* should remain fi->extent_tree for writepage */
500 f2fs_destroy_extent_node(inode);
502 sb_start_intwrite(inode->i_sb);
503 i_size_write(inode, 0);
505 if (F2FS_HAS_BLOCKS(inode))
506 f2fs_truncate(inode, true);
508 sb_end_intwrite(inode->i_sb);
510 #ifdef CONFIG_F2FS_FS_ENCRYPTION
511 if (F2FS_I(inode)->i_crypt_info)
512 f2fs_free_encryption_info(inode,
513 F2FS_I(inode)->i_crypt_info);
515 spin_lock(&inode->i_lock);
516 atomic_dec(&inode->i_count);
520 return generic_drop_inode(inode);
524 * f2fs_dirty_inode() is called from __mark_inode_dirty()
526 * We should call set_dirty_inode to write the dirty inode through write_inode.
528 static void f2fs_dirty_inode(struct inode *inode, int flags)
530 set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
533 static void f2fs_i_callback(struct rcu_head *head)
535 struct inode *inode = container_of(head, struct inode, i_rcu);
536 kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
539 static void f2fs_destroy_inode(struct inode *inode)
541 call_rcu(&inode->i_rcu, f2fs_i_callback);
544 static void f2fs_put_super(struct super_block *sb)
546 struct f2fs_sb_info *sbi = F2FS_SB(sb);
549 remove_proc_entry("segment_info", sbi->s_proc);
550 remove_proc_entry(sb->s_id, f2fs_proc_root);
552 kobject_del(&sbi->s_kobj);
556 /* prevent remaining shrinker jobs */
557 mutex_lock(&sbi->umount_mutex);
560 * We don't need to do checkpoint when superblock is clean.
561 * But, the previous checkpoint was not done by umount, it needs to do
562 * clean checkpoint again.
564 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
565 !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
566 struct cp_control cpc = {
569 write_checkpoint(sbi, &cpc);
572 /* write_checkpoint can update stat informaion */
573 f2fs_destroy_stats(sbi);
576 * normally superblock is clean, so we need to release this.
577 * In addition, EIO will skip do checkpoint, we need this as well.
579 release_ino_entry(sbi);
580 release_discard_addrs(sbi);
582 f2fs_leave_shrinker(sbi);
583 mutex_unlock(&sbi->umount_mutex);
585 /* our cp_error case, we can wait for any writeback page */
586 if (get_pages(sbi, F2FS_WRITEBACK)) {
587 f2fs_submit_merged_bio(sbi, DATA, WRITE);
588 f2fs_submit_merged_bio(sbi, NODE, WRITE);
589 f2fs_submit_merged_bio(sbi, META, WRITE);
592 iput(sbi->node_inode);
593 iput(sbi->meta_inode);
595 /* destroy f2fs internal modules */
596 destroy_node_manager(sbi);
597 destroy_segment_manager(sbi);
600 kobject_put(&sbi->s_kobj);
601 wait_for_completion(&sbi->s_kobj_unregister);
603 sb->s_fs_info = NULL;
604 kfree(sbi->raw_super);
608 int f2fs_sync_fs(struct super_block *sb, int sync)
610 struct f2fs_sb_info *sbi = F2FS_SB(sb);
613 trace_f2fs_sync_fs(sb, sync);
616 struct cp_control cpc;
618 cpc.reason = __get_cp_reason(sbi);
620 mutex_lock(&sbi->gc_mutex);
621 err = write_checkpoint(sbi, &cpc);
622 mutex_unlock(&sbi->gc_mutex);
624 f2fs_trace_ios(NULL, 1);
629 static int f2fs_freeze(struct super_block *sb)
633 if (f2fs_readonly(sb))
636 err = f2fs_sync_fs(sb, 1);
640 static int f2fs_unfreeze(struct super_block *sb)
645 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
647 struct super_block *sb = dentry->d_sb;
648 struct f2fs_sb_info *sbi = F2FS_SB(sb);
649 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
650 block_t total_count, user_block_count, start_count, ovp_count;
652 total_count = le64_to_cpu(sbi->raw_super->block_count);
653 user_block_count = sbi->user_block_count;
654 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
655 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
656 buf->f_type = F2FS_SUPER_MAGIC;
657 buf->f_bsize = sbi->blocksize;
659 buf->f_blocks = total_count - start_count;
660 buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
661 buf->f_bavail = user_block_count - valid_user_blocks(sbi);
663 buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
664 buf->f_ffree = buf->f_files - valid_inode_count(sbi);
666 buf->f_namelen = F2FS_NAME_LEN;
667 buf->f_fsid.val[0] = (u32)id;
668 buf->f_fsid.val[1] = (u32)(id >> 32);
673 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
675 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
677 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
678 if (test_opt(sbi, FORCE_FG_GC))
679 seq_printf(seq, ",background_gc=%s", "sync");
681 seq_printf(seq, ",background_gc=%s", "on");
683 seq_printf(seq, ",background_gc=%s", "off");
685 if (test_opt(sbi, DISABLE_ROLL_FORWARD))
686 seq_puts(seq, ",disable_roll_forward");
687 if (test_opt(sbi, DISCARD))
688 seq_puts(seq, ",discard");
689 if (test_opt(sbi, NOHEAP))
690 seq_puts(seq, ",no_heap_alloc");
691 #ifdef CONFIG_F2FS_FS_XATTR
692 if (test_opt(sbi, XATTR_USER))
693 seq_puts(seq, ",user_xattr");
695 seq_puts(seq, ",nouser_xattr");
696 if (test_opt(sbi, INLINE_XATTR))
697 seq_puts(seq, ",inline_xattr");
699 #ifdef CONFIG_F2FS_FS_POSIX_ACL
700 if (test_opt(sbi, POSIX_ACL))
701 seq_puts(seq, ",acl");
703 seq_puts(seq, ",noacl");
705 if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
706 seq_puts(seq, ",disable_ext_identify");
707 if (test_opt(sbi, INLINE_DATA))
708 seq_puts(seq, ",inline_data");
710 seq_puts(seq, ",noinline_data");
711 if (test_opt(sbi, INLINE_DENTRY))
712 seq_puts(seq, ",inline_dentry");
713 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
714 seq_puts(seq, ",flush_merge");
715 if (test_opt(sbi, NOBARRIER))
716 seq_puts(seq, ",nobarrier");
717 if (test_opt(sbi, FASTBOOT))
718 seq_puts(seq, ",fastboot");
719 if (test_opt(sbi, EXTENT_CACHE))
720 seq_puts(seq, ",extent_cache");
722 seq_puts(seq, ",noextent_cache");
723 if (test_opt(sbi, DATA_FLUSH))
724 seq_puts(seq, ",data_flush");
725 seq_printf(seq, ",active_logs=%u", sbi->active_logs);
730 static int segment_info_seq_show(struct seq_file *seq, void *offset)
732 struct super_block *sb = seq->private;
733 struct f2fs_sb_info *sbi = F2FS_SB(sb);
734 unsigned int total_segs =
735 le32_to_cpu(sbi->raw_super->segment_count_main);
738 seq_puts(seq, "format: segment_type|valid_blocks\n"
739 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
741 for (i = 0; i < total_segs; i++) {
742 struct seg_entry *se = get_seg_entry(sbi, i);
745 seq_printf(seq, "%-10d", i);
746 seq_printf(seq, "%d|%-3u", se->type,
747 get_valid_blocks(sbi, i, 1));
748 if ((i % 10) == 9 || i == (total_segs - 1))
757 static int segment_info_open_fs(struct inode *inode, struct file *file)
759 return single_open(file, segment_info_seq_show, PDE_DATA(inode));
762 static const struct file_operations f2fs_seq_segment_info_fops = {
763 .owner = THIS_MODULE,
764 .open = segment_info_open_fs,
767 .release = single_release,
770 static void default_options(struct f2fs_sb_info *sbi)
772 /* init some FS parameters */
773 sbi->active_logs = NR_CURSEG_TYPE;
776 set_opt(sbi, INLINE_DATA);
777 set_opt(sbi, EXTENT_CACHE);
779 #ifdef CONFIG_F2FS_FS_XATTR
780 set_opt(sbi, XATTR_USER);
782 #ifdef CONFIG_F2FS_FS_POSIX_ACL
783 set_opt(sbi, POSIX_ACL);
787 static int f2fs_remount(struct super_block *sb, int *flags, char *data)
789 struct f2fs_sb_info *sbi = F2FS_SB(sb);
790 struct f2fs_mount_info org_mount_opt;
791 int err, active_logs;
792 bool need_restart_gc = false;
793 bool need_stop_gc = false;
794 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
797 * Save the old mount options in case we
798 * need to restore them.
800 org_mount_opt = sbi->mount_opt;
801 active_logs = sbi->active_logs;
803 if (*flags & MS_RDONLY) {
804 set_opt(sbi, FASTBOOT);
805 set_sbi_flag(sbi, SBI_IS_DIRTY);
810 sbi->mount_opt.opt = 0;
811 default_options(sbi);
813 /* parse mount options */
814 err = parse_options(sb, data);
819 * Previous and new state of filesystem is RO,
820 * so skip checking GC and FLUSH_MERGE conditions.
822 if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
825 /* disallow enable/disable extent_cache dynamically */
826 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
828 f2fs_msg(sbi->sb, KERN_WARNING,
829 "switch extent_cache option is not allowed");
834 * We stop the GC thread if FS is mounted as RO
835 * or if background_gc = off is passed in mount
836 * option. Also sync the filesystem.
838 if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
839 if (sbi->gc_thread) {
842 need_restart_gc = true;
844 } else if (!sbi->gc_thread) {
845 err = start_gc_thread(sbi);
852 * We stop issue flush thread if FS is mounted as RO
853 * or if flush_merge is not passed in mount option.
855 if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
856 destroy_flush_cmd_control(sbi);
857 } else if (!SM_I(sbi)->cmd_control_info) {
858 err = create_flush_cmd_control(sbi);
863 /* Update the POSIXACL Flag */
864 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
865 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
868 if (need_restart_gc) {
869 if (start_gc_thread(sbi))
870 f2fs_msg(sbi->sb, KERN_WARNING,
871 "background gc thread has stopped");
872 } else if (need_stop_gc) {
876 sbi->mount_opt = org_mount_opt;
877 sbi->active_logs = active_logs;
881 static struct super_operations f2fs_sops = {
882 .alloc_inode = f2fs_alloc_inode,
883 .drop_inode = f2fs_drop_inode,
884 .destroy_inode = f2fs_destroy_inode,
885 .write_inode = f2fs_write_inode,
886 .dirty_inode = f2fs_dirty_inode,
887 .show_options = f2fs_show_options,
888 .evict_inode = f2fs_evict_inode,
889 .put_super = f2fs_put_super,
890 .sync_fs = f2fs_sync_fs,
891 .freeze_fs = f2fs_freeze,
892 .unfreeze_fs = f2fs_unfreeze,
893 .statfs = f2fs_statfs,
894 .remount_fs = f2fs_remount,
897 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
898 u64 ino, u32 generation)
900 struct f2fs_sb_info *sbi = F2FS_SB(sb);
903 if (check_nid_range(sbi, ino))
904 return ERR_PTR(-ESTALE);
907 * f2fs_iget isn't quite right if the inode is currently unallocated!
908 * However f2fs_iget currently does appropriate checks to handle stale
909 * inodes so everything is OK.
911 inode = f2fs_iget(sb, ino);
913 return ERR_CAST(inode);
914 if (unlikely(generation && inode->i_generation != generation)) {
915 /* we didn't find the right inode.. */
917 return ERR_PTR(-ESTALE);
922 static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
923 int fh_len, int fh_type)
925 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
929 static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
930 int fh_len, int fh_type)
932 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
936 static const struct export_operations f2fs_export_ops = {
937 .fh_to_dentry = f2fs_fh_to_dentry,
938 .fh_to_parent = f2fs_fh_to_parent,
939 .get_parent = f2fs_get_parent,
942 static loff_t max_file_blocks(void)
944 loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
945 loff_t leaf_count = ADDRS_PER_BLOCK;
947 /* two direct node blocks */
948 result += (leaf_count * 2);
950 /* two indirect node blocks */
951 leaf_count *= NIDS_PER_BLOCK;
952 result += (leaf_count * 2);
954 /* one double indirect node block */
955 leaf_count *= NIDS_PER_BLOCK;
956 result += leaf_count;
961 static inline bool sanity_check_area_boundary(struct super_block *sb,
962 struct f2fs_super_block *raw_super)
964 u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
965 u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
966 u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
967 u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
968 u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
969 u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
970 u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
971 u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
972 u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
973 u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
974 u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
975 u32 segment_count = le32_to_cpu(raw_super->segment_count);
976 u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
978 if (segment0_blkaddr != cp_blkaddr) {
979 f2fs_msg(sb, KERN_INFO,
980 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
981 segment0_blkaddr, cp_blkaddr);
985 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
987 f2fs_msg(sb, KERN_INFO,
988 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
989 cp_blkaddr, sit_blkaddr,
990 segment_count_ckpt << log_blocks_per_seg);
994 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
996 f2fs_msg(sb, KERN_INFO,
997 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
998 sit_blkaddr, nat_blkaddr,
999 segment_count_sit << log_blocks_per_seg);
1003 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
1005 f2fs_msg(sb, KERN_INFO,
1006 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1007 nat_blkaddr, ssa_blkaddr,
1008 segment_count_nat << log_blocks_per_seg);
1012 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
1014 f2fs_msg(sb, KERN_INFO,
1015 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1016 ssa_blkaddr, main_blkaddr,
1017 segment_count_ssa << log_blocks_per_seg);
1021 if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
1022 segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
1023 f2fs_msg(sb, KERN_INFO,
1024 "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
1026 segment0_blkaddr + (segment_count << log_blocks_per_seg),
1027 segment_count_main << log_blocks_per_seg);
1034 static int sanity_check_raw_super(struct super_block *sb,
1035 struct f2fs_super_block *raw_super)
1037 unsigned int blocksize;
1039 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
1040 f2fs_msg(sb, KERN_INFO,
1041 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1042 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
1046 /* Currently, support only 4KB page cache size */
1047 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
1048 f2fs_msg(sb, KERN_INFO,
1049 "Invalid page_cache_size (%lu), supports only 4KB\n",
1054 /* Currently, support only 4KB block size */
1055 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
1056 if (blocksize != F2FS_BLKSIZE) {
1057 f2fs_msg(sb, KERN_INFO,
1058 "Invalid blocksize (%u), supports only 4KB\n",
1063 /* check log blocks per segment */
1064 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
1065 f2fs_msg(sb, KERN_INFO,
1066 "Invalid log blocks per segment (%u)\n",
1067 le32_to_cpu(raw_super->log_blocks_per_seg));
1071 /* Currently, support 512/1024/2048/4096 bytes sector size */
1072 if (le32_to_cpu(raw_super->log_sectorsize) >
1073 F2FS_MAX_LOG_SECTOR_SIZE ||
1074 le32_to_cpu(raw_super->log_sectorsize) <
1075 F2FS_MIN_LOG_SECTOR_SIZE) {
1076 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
1077 le32_to_cpu(raw_super->log_sectorsize));
1080 if (le32_to_cpu(raw_super->log_sectors_per_block) +
1081 le32_to_cpu(raw_super->log_sectorsize) !=
1082 F2FS_MAX_LOG_SECTOR_SIZE) {
1083 f2fs_msg(sb, KERN_INFO,
1084 "Invalid log sectors per block(%u) log sectorsize(%u)",
1085 le32_to_cpu(raw_super->log_sectors_per_block),
1086 le32_to_cpu(raw_super->log_sectorsize));
1090 /* check reserved ino info */
1091 if (le32_to_cpu(raw_super->node_ino) != 1 ||
1092 le32_to_cpu(raw_super->meta_ino) != 2 ||
1093 le32_to_cpu(raw_super->root_ino) != 3) {
1094 f2fs_msg(sb, KERN_INFO,
1095 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1096 le32_to_cpu(raw_super->node_ino),
1097 le32_to_cpu(raw_super->meta_ino),
1098 le32_to_cpu(raw_super->root_ino));
1102 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1103 if (sanity_check_area_boundary(sb, raw_super))
1109 static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
1111 unsigned int total, fsmeta;
1112 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1113 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1115 total = le32_to_cpu(raw_super->segment_count);
1116 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
1117 fsmeta += le32_to_cpu(raw_super->segment_count_sit);
1118 fsmeta += le32_to_cpu(raw_super->segment_count_nat);
1119 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
1120 fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
1122 if (unlikely(fsmeta >= total))
1125 if (unlikely(f2fs_cp_error(sbi))) {
1126 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
1132 static void init_sb_info(struct f2fs_sb_info *sbi)
1134 struct f2fs_super_block *raw_super = sbi->raw_super;
1137 sbi->log_sectors_per_block =
1138 le32_to_cpu(raw_super->log_sectors_per_block);
1139 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
1140 sbi->blocksize = 1 << sbi->log_blocksize;
1141 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
1142 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
1143 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
1144 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
1145 sbi->total_sections = le32_to_cpu(raw_super->section_count);
1146 sbi->total_node_count =
1147 (le32_to_cpu(raw_super->segment_count_nat) / 2)
1148 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
1149 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino);
1150 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
1151 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
1152 sbi->cur_victim_sec = NULL_SECNO;
1153 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
1155 for (i = 0; i < NR_COUNT_TYPE; i++)
1156 atomic_set(&sbi->nr_pages[i], 0);
1158 sbi->dir_level = DEF_DIR_LEVEL;
1159 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
1160 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
1161 clear_sbi_flag(sbi, SBI_NEED_FSCK);
1163 INIT_LIST_HEAD(&sbi->s_list);
1164 mutex_init(&sbi->umount_mutex);
1168 * Read f2fs raw super block.
1169 * Because we have two copies of super block, so read the first one at first,
1170 * if the first one is invalid, move to read the second one.
1172 static int read_raw_super_block(struct super_block *sb,
1173 struct f2fs_super_block **raw_super,
1174 int *valid_super_block, int *recovery)
1177 struct buffer_head *bh;
1178 struct f2fs_super_block *super, *buf;
1181 super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
1185 bh = sb_bread(sb, block);
1188 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
1194 buf = (struct f2fs_super_block *)(bh->b_data + F2FS_SUPER_OFFSET);
1196 /* sanity checking of raw super */
1197 if (sanity_check_raw_super(sb, buf)) {
1200 f2fs_msg(sb, KERN_ERR,
1201 "Can't find valid F2FS filesystem in %dth superblock",
1208 memcpy(super, buf, sizeof(*super));
1209 *valid_super_block = block;
1215 /* check the validity of the second superblock */
1221 /* No valid superblock */
1230 static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block)
1232 struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi);
1233 struct buffer_head *bh;
1236 bh = sb_getblk(sbi->sb, block);
1241 memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
1242 set_buffer_uptodate(bh);
1243 set_buffer_dirty(bh);
1246 /* it's rare case, we can do fua all the time */
1247 err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
1253 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
1257 /* write back-up superblock first */
1258 err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1);
1260 /* if we are in recovery path, skip writing valid superblock */
1264 /* write current valid superblock */
1265 return __f2fs_commit_super(sbi, sbi->valid_super_block);
1268 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
1270 struct f2fs_sb_info *sbi;
1271 struct f2fs_super_block *raw_super;
1274 bool retry = true, need_fsck = false;
1275 char *options = NULL;
1276 int recovery, i, valid_super_block;
1277 struct curseg_info *seg_i;
1282 valid_super_block = -1;
1285 /* allocate memory for f2fs-specific super block info */
1286 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
1290 /* set a block size */
1291 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
1292 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
1296 err = read_raw_super_block(sb, &raw_super, &valid_super_block,
1301 sb->s_fs_info = sbi;
1302 default_options(sbi);
1303 /* parse mount options */
1304 options = kstrdup((const char *)data, GFP_KERNEL);
1305 if (data && !options) {
1310 err = parse_options(sb, options);
1314 sbi->max_file_blocks = max_file_blocks();
1315 sb->s_maxbytes = sbi->max_file_blocks <<
1316 le32_to_cpu(raw_super->log_blocksize);
1317 sb->s_max_links = F2FS_LINK_MAX;
1318 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
1320 sb->s_op = &f2fs_sops;
1321 sb->s_xattr = f2fs_xattr_handlers;
1322 sb->s_export_op = &f2fs_export_ops;
1323 sb->s_magic = F2FS_SUPER_MAGIC;
1324 sb->s_time_gran = 1;
1325 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
1326 (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
1327 memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
1329 /* init f2fs-specific super block info */
1331 sbi->raw_super = raw_super;
1332 sbi->valid_super_block = valid_super_block;
1333 mutex_init(&sbi->gc_mutex);
1334 mutex_init(&sbi->writepages);
1335 mutex_init(&sbi->cp_mutex);
1336 init_rwsem(&sbi->node_write);
1338 /* disallow all the data/node/meta page writes */
1339 set_sbi_flag(sbi, SBI_POR_DOING);
1340 spin_lock_init(&sbi->stat_lock);
1342 init_rwsem(&sbi->read_io.io_rwsem);
1343 sbi->read_io.sbi = sbi;
1344 sbi->read_io.bio = NULL;
1345 for (i = 0; i < NR_PAGE_TYPE; i++) {
1346 init_rwsem(&sbi->write_io[i].io_rwsem);
1347 sbi->write_io[i].sbi = sbi;
1348 sbi->write_io[i].bio = NULL;
1351 init_rwsem(&sbi->cp_rwsem);
1352 init_waitqueue_head(&sbi->cp_wait);
1355 /* get an inode for meta space */
1356 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
1357 if (IS_ERR(sbi->meta_inode)) {
1358 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
1359 err = PTR_ERR(sbi->meta_inode);
1363 err = get_valid_checkpoint(sbi);
1365 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
1366 goto free_meta_inode;
1369 /* sanity checking of checkpoint */
1371 if (sanity_check_ckpt(sbi)) {
1372 f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
1376 sbi->total_valid_node_count =
1377 le32_to_cpu(sbi->ckpt->valid_node_count);
1378 sbi->total_valid_inode_count =
1379 le32_to_cpu(sbi->ckpt->valid_inode_count);
1380 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
1381 sbi->total_valid_block_count =
1382 le64_to_cpu(sbi->ckpt->valid_block_count);
1383 sbi->last_valid_block_count = sbi->total_valid_block_count;
1384 sbi->alloc_valid_block_count = 0;
1385 for (i = 0; i < NR_INODE_TYPE; i++) {
1386 INIT_LIST_HEAD(&sbi->inode_list[i]);
1387 spin_lock_init(&sbi->inode_lock[i]);
1390 init_extent_cache_info(sbi);
1392 init_ino_entry_info(sbi);
1394 /* setup f2fs internal modules */
1395 err = build_segment_manager(sbi);
1397 f2fs_msg(sb, KERN_ERR,
1398 "Failed to initialize F2FS segment manager");
1401 err = build_node_manager(sbi);
1403 f2fs_msg(sb, KERN_ERR,
1404 "Failed to initialize F2FS node manager");
1408 /* For write statistics */
1409 if (sb->s_bdev->bd_part)
1410 sbi->sectors_written_start =
1411 (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
1413 /* Read accumulated write IO statistics if exists */
1414 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1415 if (__exist_node_summaries(sbi))
1416 sbi->kbytes_written =
1417 le64_to_cpu(seg_i->sum_blk->info.kbytes_written);
1419 build_gc_manager(sbi);
1421 /* get an inode for node space */
1422 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
1423 if (IS_ERR(sbi->node_inode)) {
1424 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
1425 err = PTR_ERR(sbi->node_inode);
1429 f2fs_join_shrinker(sbi);
1431 /* if there are nt orphan nodes free them */
1432 err = recover_orphan_inodes(sbi);
1434 goto free_node_inode;
1436 /* read root inode and dentry */
1437 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
1439 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
1440 err = PTR_ERR(root);
1441 goto free_node_inode;
1443 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1446 goto free_node_inode;
1449 sb->s_root = d_make_root(root); /* allocate root dentry */
1452 goto free_root_inode;
1455 err = f2fs_build_stats(sbi);
1457 goto free_root_inode;
1460 sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
1463 proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
1464 &f2fs_seq_segment_info_fops, sb);
1466 sbi->s_kobj.kset = f2fs_kset;
1467 init_completion(&sbi->s_kobj_unregister);
1468 err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
1473 /* recover fsynced data */
1474 if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
1476 * mount should be failed, when device has readonly mode, and
1477 * previous checkpoint was not done by clean system shutdown.
1479 if (bdev_read_only(sb->s_bdev) &&
1480 !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
1486 set_sbi_flag(sbi, SBI_NEED_FSCK);
1488 err = recover_fsync_data(sbi);
1491 f2fs_msg(sb, KERN_ERR,
1492 "Cannot recover all fsync data errno=%ld", err);
1496 /* recover_fsync_data() cleared this already */
1497 clear_sbi_flag(sbi, SBI_POR_DOING);
1500 * If filesystem is not mounted as read-only then
1501 * do start the gc_thread.
1503 if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
1504 /* After POR, we can run background GC thread.*/
1505 err = start_gc_thread(sbi);
1511 /* recover broken superblock */
1512 if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
1513 f2fs_msg(sb, KERN_INFO, "Recover invalid superblock");
1514 f2fs_commit_super(sbi, true);
1517 f2fs_update_time(sbi, CP_TIME);
1518 f2fs_update_time(sbi, REQ_TIME);
1522 kobject_del(&sbi->s_kobj);
1523 kobject_put(&sbi->s_kobj);
1524 wait_for_completion(&sbi->s_kobj_unregister);
1527 remove_proc_entry("segment_info", sbi->s_proc);
1528 remove_proc_entry(sb->s_id, f2fs_proc_root);
1530 f2fs_destroy_stats(sbi);
1535 mutex_lock(&sbi->umount_mutex);
1536 f2fs_leave_shrinker(sbi);
1537 iput(sbi->node_inode);
1538 mutex_unlock(&sbi->umount_mutex);
1540 destroy_node_manager(sbi);
1542 destroy_segment_manager(sbi);
1546 make_bad_inode(sbi->meta_inode);
1547 iput(sbi->meta_inode);
1555 /* give only one another chance */
1558 shrink_dcache_sb(sb);
1564 static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
1565 const char *dev_name, void *data)
1567 return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
1570 static void kill_f2fs_super(struct super_block *sb)
1573 set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
1574 kill_block_super(sb);
1577 static struct file_system_type f2fs_fs_type = {
1578 .owner = THIS_MODULE,
1580 .mount = f2fs_mount,
1581 .kill_sb = kill_f2fs_super,
1582 .fs_flags = FS_REQUIRES_DEV,
1584 MODULE_ALIAS_FS("f2fs");
1586 static int __init init_inodecache(void)
1588 f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
1589 sizeof(struct f2fs_inode_info), 0,
1590 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
1591 if (!f2fs_inode_cachep)
1596 static void destroy_inodecache(void)
1599 * Make sure all delayed rcu free inodes are flushed before we
1603 kmem_cache_destroy(f2fs_inode_cachep);
1606 static int __init init_f2fs_fs(void)
1610 f2fs_build_trace_ios();
1612 err = init_inodecache();
1615 err = create_node_manager_caches();
1617 goto free_inodecache;
1618 err = create_segment_manager_caches();
1620 goto free_node_manager_caches;
1621 err = create_checkpoint_caches();
1623 goto free_segment_manager_caches;
1624 err = create_extent_cache();
1626 goto free_checkpoint_caches;
1627 f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
1630 goto free_extent_cache;
1632 err = f2fs_init_crypto();
1636 err = register_shrinker(&f2fs_shrinker_info);
1640 err = register_filesystem(&f2fs_fs_type);
1643 err = f2fs_create_root_stats();
1645 goto free_filesystem;
1646 f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
1650 unregister_filesystem(&f2fs_fs_type);
1652 unregister_shrinker(&f2fs_shrinker_info);
1656 kset_unregister(f2fs_kset);
1658 destroy_extent_cache();
1659 free_checkpoint_caches:
1660 destroy_checkpoint_caches();
1661 free_segment_manager_caches:
1662 destroy_segment_manager_caches();
1663 free_node_manager_caches:
1664 destroy_node_manager_caches();
1666 destroy_inodecache();
1671 static void __exit exit_f2fs_fs(void)
1673 remove_proc_entry("fs/f2fs", NULL);
1674 f2fs_destroy_root_stats();
1675 unregister_shrinker(&f2fs_shrinker_info);
1676 unregister_filesystem(&f2fs_fs_type);
1678 destroy_extent_cache();
1679 destroy_checkpoint_caches();
1680 destroy_segment_manager_caches();
1681 destroy_node_manager_caches();
1682 destroy_inodecache();
1683 kset_unregister(f2fs_kset);
1684 f2fs_destroy_trace_ios();
1687 module_init(init_f2fs_fs)
1688 module_exit(exit_f2fs_fs)
1690 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
1691 MODULE_DESCRIPTION("Flash Friendly File System");
1692 MODULE_LICENSE("GPL");