4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
18 * Roll forward recovery scenarios.
20 * [Term] F: fsync_mark, D: dentry_mark
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
25 * 2. inode(x) | CP | inode(F) | dnode(F)
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
31 * 4. inode(x) | CP | dnode(F) | inode(F)
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
37 * 6. CP | inode(DF) | dnode(F)
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
48 static struct kmem_cache *fsync_entry_slab;
50 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
52 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
54 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
59 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
62 struct fsync_inode_entry *entry;
64 list_for_each_entry(entry, head, list)
65 if (entry->inode->i_ino == ino)
71 static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
74 struct fsync_inode_entry *entry;
76 entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
81 list_add_tail(&entry->list, head);
86 static void del_fsync_inode(struct fsync_inode_entry *entry)
89 list_del(&entry->list);
90 kmem_cache_free(fsync_entry_slab, entry);
93 static int recover_dentry(struct inode *inode, struct page *ipage,
94 struct list_head *dir_list)
96 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
97 nid_t pino = le32_to_cpu(raw_inode->i_pino);
98 struct f2fs_dir_entry *de;
101 struct inode *dir, *einode;
102 struct fsync_inode_entry *entry;
105 entry = get_fsync_inode(dir_list, pino);
107 dir = f2fs_iget(inode->i_sb, pino);
113 entry = add_fsync_inode(dir_list, dir);
123 if (file_enc_name(inode))
126 name.len = le32_to_cpu(raw_inode->i_namelen);
127 name.name = raw_inode->i_name;
129 if (unlikely(name.len > F2FS_NAME_LEN)) {
135 de = f2fs_find_entry(dir, &name, &page);
136 if (de && inode->i_ino == le32_to_cpu(de->ino))
140 einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
141 if (IS_ERR(einode)) {
143 err = PTR_ERR(einode);
148 err = acquire_orphan_inode(F2FS_I_SB(inode));
153 f2fs_delete_entry(de, page, dir, einode);
156 } else if (IS_ERR(page)) {
159 err = __f2fs_add_link(dir, &name, inode,
160 inode->i_ino, inode->i_mode);
165 f2fs_dentry_kunmap(dir, page);
166 f2fs_put_page(page, 0);
168 f2fs_msg(inode->i_sb, KERN_NOTICE,
169 "%s: ino = %x, name = %s, dir = %lx, err = %d",
170 __func__, ino_of_node(ipage), raw_inode->i_name,
171 IS_ERR(dir) ? 0 : dir->i_ino, err);
175 static void recover_inode(struct inode *inode, struct page *page)
177 struct f2fs_inode *raw = F2FS_INODE(page);
180 inode->i_mode = le16_to_cpu(raw->i_mode);
181 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
182 inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
183 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
184 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
185 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
186 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
187 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
189 if (file_enc_name(inode))
190 name = "<encrypted>";
192 name = F2FS_INODE(page)->i_name;
194 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
195 ino_of_node(page), name);
198 static bool is_same_inode(struct inode *inode, struct page *ipage)
200 struct f2fs_inode *ri = F2FS_INODE(ipage);
201 struct timespec disk;
203 if (!IS_INODE(ipage))
206 disk.tv_sec = le64_to_cpu(ri->i_ctime);
207 disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
208 if (timespec_compare(&inode->i_ctime, &disk) > 0)
211 disk.tv_sec = le64_to_cpu(ri->i_atime);
212 disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
213 if (timespec_compare(&inode->i_atime, &disk) > 0)
216 disk.tv_sec = le64_to_cpu(ri->i_mtime);
217 disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
218 if (timespec_compare(&inode->i_mtime, &disk) > 0)
224 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
226 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
227 struct curseg_info *curseg;
229 struct page *page = NULL;
233 /* get node pages in the current segment */
234 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
235 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
238 struct fsync_inode_entry *entry;
240 if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
243 page = get_tmp_page(sbi, blkaddr);
245 if (cp_ver != cpver_of_node(page))
248 if (!is_fsync_dnode(page))
251 entry = get_fsync_inode(head, ino_of_node(page));
253 if (!is_same_inode(entry->inode, page))
256 if (IS_INODE(page) && is_dent_dnode(page)) {
257 err = recover_inode_page(sbi, page);
263 * CP | dnode(F) | inode(DF)
264 * For this case, we should not give up now.
266 inode = f2fs_iget(sbi->sb, ino_of_node(page));
268 err = PTR_ERR(inode);
269 if (err == -ENOENT) {
276 /* add this fsync inode to the list */
277 entry = add_fsync_inode(head, inode);
284 entry->blkaddr = blkaddr;
286 if (IS_INODE(page) && is_dent_dnode(page))
287 entry->last_dentry = blkaddr;
289 /* check next segment */
290 blkaddr = next_blkaddr_of_node(page);
291 f2fs_put_page(page, 1);
293 ra_meta_pages_cond(sbi, blkaddr);
295 f2fs_put_page(page, 1);
299 static void destroy_fsync_dnodes(struct list_head *head)
301 struct fsync_inode_entry *entry, *tmp;
303 list_for_each_entry_safe(entry, tmp, head, list)
304 del_fsync_inode(entry);
307 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
308 block_t blkaddr, struct dnode_of_data *dn)
310 struct seg_entry *sentry;
311 unsigned int segno = GET_SEGNO(sbi, blkaddr);
312 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
313 struct f2fs_summary_block *sum_node;
314 struct f2fs_summary sum;
315 struct page *sum_page, *node_page;
316 struct dnode_of_data tdn = *dn;
323 sentry = get_seg_entry(sbi, segno);
324 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
327 /* Get the previous summary */
328 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
329 struct curseg_info *curseg = CURSEG_I(sbi, i);
330 if (curseg->segno == segno) {
331 sum = curseg->sum_blk->entries[blkoff];
336 sum_page = get_sum_page(sbi, segno);
337 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
338 sum = sum_node->entries[blkoff];
339 f2fs_put_page(sum_page, 1);
341 /* Use the locked dnode page and inode */
342 nid = le32_to_cpu(sum.nid);
343 if (dn->inode->i_ino == nid) {
345 if (!dn->inode_page_locked)
346 lock_page(dn->inode_page);
347 tdn.node_page = dn->inode_page;
348 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
350 } else if (dn->nid == nid) {
351 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
355 /* Get the node page */
356 node_page = get_node_page(sbi, nid);
357 if (IS_ERR(node_page))
358 return PTR_ERR(node_page);
360 offset = ofs_of_node(node_page);
361 ino = ino_of_node(node_page);
362 f2fs_put_page(node_page, 1);
364 if (ino != dn->inode->i_ino) {
365 /* Deallocate previous index in the node page */
366 inode = f2fs_iget(sbi->sb, ino);
368 return PTR_ERR(inode);
373 bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
376 * if inode page is locked, unlock temporarily, but its reference
379 if (ino == dn->inode->i_ino && dn->inode_page_locked)
380 unlock_page(dn->inode_page);
382 set_new_dnode(&tdn, inode, NULL, NULL, 0);
383 if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
386 if (tdn.data_blkaddr == blkaddr)
387 truncate_data_blocks_range(&tdn, 1);
389 f2fs_put_dnode(&tdn);
391 if (ino != dn->inode->i_ino)
393 else if (dn->inode_page_locked)
394 lock_page(dn->inode_page);
398 if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
399 truncate_data_blocks_range(&tdn, 1);
400 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
401 unlock_page(dn->inode_page);
405 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
406 struct page *page, block_t blkaddr)
408 struct dnode_of_data dn;
410 unsigned int start, end;
411 int err = 0, recovered = 0;
413 /* step 1: recover xattr */
414 if (IS_INODE(page)) {
415 recover_inline_xattr(inode, page);
416 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
418 * Deprecated; xattr blocks should be found from cold log.
419 * But, we should remain this for backward compatibility.
421 recover_xattr_data(inode, page, blkaddr);
425 /* step 2: recover inline data */
426 if (recover_inline_data(inode, page))
429 /* step 3: recover data indices */
430 start = start_bidx_of_node(ofs_of_node(page), inode);
431 end = start + ADDRS_PER_PAGE(page, inode);
433 set_new_dnode(&dn, inode, NULL, NULL, 0);
435 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
439 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
441 get_node_info(sbi, dn.nid, &ni);
442 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
443 f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
445 for (; start < end; start++, dn.ofs_in_node++) {
448 src = datablock_addr(dn.node_page, dn.ofs_in_node);
449 dest = datablock_addr(page, dn.ofs_in_node);
451 /* skip recovering if dest is the same as src */
455 /* dest is invalid, just invalidate src block */
456 if (dest == NULL_ADDR) {
457 truncate_data_blocks_range(&dn, 1);
461 if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
462 f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
465 * dest is reserved block, invalidate src block
466 * and then reserve one new block in dnode page.
468 if (dest == NEW_ADDR) {
469 truncate_data_blocks_range(&dn, 1);
470 reserve_new_block(&dn);
474 /* dest is valid block, try to recover from src to dest */
475 if (is_valid_blkaddr(sbi, dest, META_POR)) {
477 if (src == NULL_ADDR) {
478 err = reserve_new_block(&dn);
479 #ifdef CONFIG_F2FS_FAULT_INJECTION
481 err = reserve_new_block(&dn);
483 /* We should not get -ENOSPC */
484 f2fs_bug_on(sbi, err);
489 /* Check the previous node page having this index */
490 err = check_index_in_prev_nodes(sbi, dest, &dn);
494 /* write dummy data page */
495 f2fs_replace_block(sbi, &dn, src, dest,
496 ni.version, false, false);
501 copy_node_footer(dn.node_page, page);
502 fill_node_footer(dn.node_page, dn.nid, ni.ino,
503 ofs_of_node(page), false);
504 set_page_dirty(dn.node_page);
508 f2fs_msg(sbi->sb, KERN_NOTICE,
509 "recover_data: ino = %lx, recovered = %d blocks, err = %d",
510 inode->i_ino, recovered, err);
514 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
515 struct list_head *dir_list)
517 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
518 struct curseg_info *curseg;
519 struct page *page = NULL;
523 /* get node pages in the current segment */
524 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
525 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
528 struct fsync_inode_entry *entry;
530 if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
533 ra_meta_pages_cond(sbi, blkaddr);
535 page = get_tmp_page(sbi, blkaddr);
537 if (cp_ver != cpver_of_node(page)) {
538 f2fs_put_page(page, 1);
542 entry = get_fsync_inode(inode_list, ino_of_node(page));
546 * inode(x) | CP | inode(x) | dnode(F)
547 * In this case, we can lose the latest inode(x).
548 * So, call recover_inode for the inode update.
551 recover_inode(entry->inode, page);
552 if (entry->last_dentry == blkaddr) {
553 err = recover_dentry(entry->inode, page, dir_list);
555 f2fs_put_page(page, 1);
559 err = do_recover_data(sbi, entry->inode, page, blkaddr);
561 f2fs_put_page(page, 1);
565 if (entry->blkaddr == blkaddr)
566 del_fsync_inode(entry);
568 /* check next segment */
569 blkaddr = next_blkaddr_of_node(page);
570 f2fs_put_page(page, 1);
573 allocate_new_segments(sbi);
577 int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
579 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
580 struct list_head inode_list;
581 struct list_head dir_list;
585 bool need_writecp = false;
587 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
588 sizeof(struct fsync_inode_entry));
589 if (!fsync_entry_slab)
592 INIT_LIST_HEAD(&inode_list);
593 INIT_LIST_HEAD(&dir_list);
595 /* prevent checkpoint */
596 mutex_lock(&sbi->cp_mutex);
598 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
600 /* step #1: find fsynced inode numbers */
601 err = find_fsync_dnodes(sbi, &inode_list);
602 if (err || list_empty(&inode_list))
612 /* step #2: recover data */
613 err = recover_data(sbi, &inode_list, &dir_list);
615 f2fs_bug_on(sbi, !list_empty(&inode_list));
617 destroy_fsync_dnodes(&inode_list);
619 /* truncate meta pages to be used by the recovery */
620 truncate_inode_pages_range(META_MAPPING(sbi),
621 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
624 truncate_inode_pages_final(NODE_MAPPING(sbi));
625 truncate_inode_pages_final(META_MAPPING(sbi));
628 clear_sbi_flag(sbi, SBI_POR_DOING);
630 bool invalidate = false;
632 if (test_opt(sbi, LFS)) {
633 update_meta_page(sbi, NULL, blkaddr);
635 } else if (discard_next_dnode(sbi, blkaddr)) {
639 /* Flush all the NAT/SIT pages */
640 while (get_pages(sbi, F2FS_DIRTY_META))
641 sync_meta_pages(sbi, META, LONG_MAX);
643 /* invalidate temporary meta page */
645 invalidate_mapping_pages(META_MAPPING(sbi),
648 set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
649 mutex_unlock(&sbi->cp_mutex);
650 } else if (need_writecp) {
651 struct cp_control cpc = {
652 .reason = CP_RECOVERY,
654 mutex_unlock(&sbi->cp_mutex);
655 err = write_checkpoint(sbi, &cpc);
657 mutex_unlock(&sbi->cp_mutex);
660 destroy_fsync_dnodes(&dir_list);
661 kmem_cache_destroy(fsync_entry_slab);
662 return ret ? ret: err;