4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/vmalloc.h>
23 * This function balances dirty node and dentry pages.
24 * In addition, it controls garbage collection.
26 void f2fs_balance_fs(struct f2fs_sb_info *sbi)
29 * We should do GC or end up with checkpoint, if there are so many dirty
30 * dir/node pages without enough free segments.
32 if (has_not_enough_free_secs(sbi, 0)) {
33 mutex_lock(&sbi->gc_mutex);
38 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
39 enum dirty_type dirty_type)
41 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
43 /* need not be added */
44 if (IS_CURSEG(sbi, segno))
47 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
48 dirty_i->nr_dirty[dirty_type]++;
50 if (dirty_type == DIRTY) {
51 struct seg_entry *sentry = get_seg_entry(sbi, segno);
52 enum dirty_type t = DIRTY_HOT_DATA;
54 dirty_type = sentry->type;
56 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
57 dirty_i->nr_dirty[dirty_type]++;
59 /* Only one bitmap should be set */
60 for (; t <= DIRTY_COLD_NODE; t++) {
63 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
64 dirty_i->nr_dirty[t]--;
69 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
70 enum dirty_type dirty_type)
72 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
74 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
75 dirty_i->nr_dirty[dirty_type]--;
77 if (dirty_type == DIRTY) {
78 enum dirty_type t = DIRTY_HOT_DATA;
80 /* clear all the bitmaps */
81 for (; t <= DIRTY_COLD_NODE; t++)
82 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
83 dirty_i->nr_dirty[t]--;
85 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
86 clear_bit(GET_SECNO(sbi, segno),
87 dirty_i->victim_secmap);
92 * Should not occur error such as -ENOMEM.
93 * Adding dirty entry into seglist is not critical operation.
94 * If a given segment is one of current working segments, it won't be added.
96 void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
98 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
99 unsigned short valid_blocks;
101 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
104 mutex_lock(&dirty_i->seglist_lock);
106 valid_blocks = get_valid_blocks(sbi, segno, 0);
108 if (valid_blocks == 0) {
109 __locate_dirty_segment(sbi, segno, PRE);
110 __remove_dirty_segment(sbi, segno, DIRTY);
111 } else if (valid_blocks < sbi->blocks_per_seg) {
112 __locate_dirty_segment(sbi, segno, DIRTY);
114 /* Recovery routine with SSR needs this */
115 __remove_dirty_segment(sbi, segno, DIRTY);
118 mutex_unlock(&dirty_i->seglist_lock);
123 * Should call clear_prefree_segments after checkpoint is done.
125 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
127 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
128 unsigned int segno, offset = 0;
129 unsigned int total_segs = TOTAL_SEGS(sbi);
131 mutex_lock(&dirty_i->seglist_lock);
133 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
135 if (segno >= total_segs)
137 __set_test_and_free(sbi, segno);
140 mutex_unlock(&dirty_i->seglist_lock);
143 void clear_prefree_segments(struct f2fs_sb_info *sbi)
145 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
146 unsigned int segno, offset = 0;
147 unsigned int total_segs = TOTAL_SEGS(sbi);
149 mutex_lock(&dirty_i->seglist_lock);
151 segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
153 if (segno >= total_segs)
157 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
158 dirty_i->nr_dirty[PRE]--;
161 if (test_opt(sbi, DISCARD))
162 blkdev_issue_discard(sbi->sb->s_bdev,
163 START_BLOCK(sbi, segno) <<
164 sbi->log_sectors_per_block,
165 1 << (sbi->log_sectors_per_block +
166 sbi->log_blocks_per_seg),
169 mutex_unlock(&dirty_i->seglist_lock);
172 static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
174 struct sit_info *sit_i = SIT_I(sbi);
175 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
176 sit_i->dirty_sentries++;
179 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
180 unsigned int segno, int modified)
182 struct seg_entry *se = get_seg_entry(sbi, segno);
185 __mark_sit_entry_dirty(sbi, segno);
188 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
190 struct seg_entry *se;
191 unsigned int segno, offset;
192 long int new_vblocks;
194 segno = GET_SEGNO(sbi, blkaddr);
196 se = get_seg_entry(sbi, segno);
197 new_vblocks = se->valid_blocks + del;
198 offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
200 BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) ||
201 (new_vblocks > sbi->blocks_per_seg)));
203 se->valid_blocks = new_vblocks;
204 se->mtime = get_mtime(sbi);
205 SIT_I(sbi)->max_mtime = se->mtime;
207 /* Update valid block bitmap */
209 if (f2fs_set_bit(offset, se->cur_valid_map))
212 if (!f2fs_clear_bit(offset, se->cur_valid_map))
215 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
216 se->ckpt_valid_blocks += del;
218 __mark_sit_entry_dirty(sbi, segno);
220 /* update total number of valid blocks to be written in ckpt area */
221 SIT_I(sbi)->written_valid_blocks += del;
223 if (sbi->segs_per_sec > 1)
224 get_sec_entry(sbi, segno)->valid_blocks += del;
227 static void refresh_sit_entry(struct f2fs_sb_info *sbi,
228 block_t old_blkaddr, block_t new_blkaddr)
230 update_sit_entry(sbi, new_blkaddr, 1);
231 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
232 update_sit_entry(sbi, old_blkaddr, -1);
235 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
237 unsigned int segno = GET_SEGNO(sbi, addr);
238 struct sit_info *sit_i = SIT_I(sbi);
240 BUG_ON(addr == NULL_ADDR);
241 if (addr == NEW_ADDR)
244 /* add it into sit main buffer */
245 mutex_lock(&sit_i->sentry_lock);
247 update_sit_entry(sbi, addr, -1);
249 /* add it into dirty seglist */
250 locate_dirty_segment(sbi, segno);
252 mutex_unlock(&sit_i->sentry_lock);
256 * This function should be resided under the curseg_mutex lock
258 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
259 struct f2fs_summary *sum, unsigned short offset)
261 struct curseg_info *curseg = CURSEG_I(sbi, type);
262 void *addr = curseg->sum_blk;
263 addr += offset * sizeof(struct f2fs_summary);
264 memcpy(addr, sum, sizeof(struct f2fs_summary));
269 * Calculate the number of current summary pages for writing
271 int npages_for_summary_flush(struct f2fs_sb_info *sbi)
273 int total_size_bytes = 0;
274 int valid_sum_count = 0;
277 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
278 if (sbi->ckpt->alloc_type[i] == SSR)
279 valid_sum_count += sbi->blocks_per_seg;
281 valid_sum_count += curseg_blkoff(sbi, i);
284 total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1)
285 + sizeof(struct nat_journal) + 2
286 + sizeof(struct sit_journal) + 2;
287 sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE;
288 if (total_size_bytes < sum_space)
290 else if (total_size_bytes < 2 * sum_space)
296 * Caller should put this summary page
298 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
300 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
303 static void write_sum_page(struct f2fs_sb_info *sbi,
304 struct f2fs_summary_block *sum_blk, block_t blk_addr)
306 struct page *page = grab_meta_page(sbi, blk_addr);
307 void *kaddr = page_address(page);
308 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
309 set_page_dirty(page);
310 f2fs_put_page(page, 1);
313 static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type)
315 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
316 unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
318 unsigned int ofs = 0;
321 * If there is not enough reserved sections,
322 * we should not reuse prefree segments.
324 if (has_not_enough_free_secs(sbi, 0))
328 * NODE page should not reuse prefree segment,
329 * since those information is used for SPOR.
331 if (IS_NODESEG(type))
334 segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
335 ofs += sbi->segs_per_sec;
337 if (segno < TOTAL_SEGS(sbi)) {
340 /* skip intermediate segments in a section */
341 if (segno % sbi->segs_per_sec)
344 /* skip if the section is currently used */
345 if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
348 /* skip if whole section is not prefree */
349 for (i = 1; i < sbi->segs_per_sec; i++)
350 if (!test_bit(segno + i, prefree_segmap))
353 /* skip if whole section was not free at the last checkpoint */
354 for (i = 0; i < sbi->segs_per_sec; i++)
355 if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
363 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
365 struct curseg_info *curseg = CURSEG_I(sbi, type);
366 unsigned int segno = curseg->segno;
367 struct free_segmap_info *free_i = FREE_I(sbi);
369 if (segno + 1 < TOTAL_SEGS(sbi) && (segno + 1) % sbi->segs_per_sec)
370 return !test_bit(segno + 1, free_i->free_segmap);
375 * Find a new segment from the free segments bitmap to right order
376 * This function should be returned with success, otherwise BUG
378 static void get_new_segment(struct f2fs_sb_info *sbi,
379 unsigned int *newseg, bool new_sec, int dir)
381 struct free_segmap_info *free_i = FREE_I(sbi);
382 unsigned int segno, secno, zoneno;
383 unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
384 unsigned int hint = *newseg / sbi->segs_per_sec;
385 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
386 unsigned int left_start = hint;
391 write_lock(&free_i->segmap_lock);
393 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
394 segno = find_next_zero_bit(free_i->free_segmap,
395 TOTAL_SEGS(sbi), *newseg + 1);
396 if (segno - *newseg < sbi->segs_per_sec -
397 (*newseg % sbi->segs_per_sec))
401 secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
402 if (secno >= TOTAL_SECS(sbi)) {
403 if (dir == ALLOC_RIGHT) {
404 secno = find_next_zero_bit(free_i->free_secmap,
406 BUG_ON(secno >= TOTAL_SECS(sbi));
409 left_start = hint - 1;
415 while (test_bit(left_start, free_i->free_secmap)) {
416 if (left_start > 0) {
420 left_start = find_next_zero_bit(free_i->free_secmap,
422 BUG_ON(left_start >= TOTAL_SECS(sbi));
428 segno = secno * sbi->segs_per_sec;
429 zoneno = secno / sbi->secs_per_zone;
431 /* give up on finding another zone */
434 if (sbi->secs_per_zone == 1)
436 if (zoneno == old_zoneno)
438 if (dir == ALLOC_LEFT) {
439 if (!go_left && zoneno + 1 >= total_zones)
441 if (go_left && zoneno == 0)
444 for (i = 0; i < NR_CURSEG_TYPE; i++)
445 if (CURSEG_I(sbi, i)->zone == zoneno)
448 if (i < NR_CURSEG_TYPE) {
449 /* zone is in user, try another */
451 hint = zoneno * sbi->secs_per_zone - 1;
452 else if (zoneno + 1 >= total_zones)
455 hint = (zoneno + 1) * sbi->secs_per_zone;
457 goto find_other_zone;
460 /* set it as dirty segment in free segmap */
461 BUG_ON(test_bit(segno, free_i->free_segmap));
462 __set_inuse(sbi, segno);
464 write_unlock(&free_i->segmap_lock);
467 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
469 struct curseg_info *curseg = CURSEG_I(sbi, type);
470 struct summary_footer *sum_footer;
472 curseg->segno = curseg->next_segno;
473 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
474 curseg->next_blkoff = 0;
475 curseg->next_segno = NULL_SEGNO;
477 sum_footer = &(curseg->sum_blk->footer);
478 memset(sum_footer, 0, sizeof(struct summary_footer));
479 if (IS_DATASEG(type))
480 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
481 if (IS_NODESEG(type))
482 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
483 __set_sit_entry_type(sbi, type, curseg->segno, modified);
487 * Allocate a current working segment.
488 * This function always allocates a free segment in LFS manner.
490 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
492 struct curseg_info *curseg = CURSEG_I(sbi, type);
493 unsigned int segno = curseg->segno;
494 int dir = ALLOC_LEFT;
496 write_sum_page(sbi, curseg->sum_blk,
497 GET_SUM_BLOCK(sbi, curseg->segno));
498 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
501 if (test_opt(sbi, NOHEAP))
504 get_new_segment(sbi, &segno, new_sec, dir);
505 curseg->next_segno = segno;
506 reset_curseg(sbi, type, 1);
507 curseg->alloc_type = LFS;
510 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
511 struct curseg_info *seg, block_t start)
513 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
515 for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) {
516 if (!f2fs_test_bit(ofs, se->ckpt_valid_map)
517 && !f2fs_test_bit(ofs, se->cur_valid_map))
520 seg->next_blkoff = ofs;
524 * If a segment is written by LFS manner, next block offset is just obtained
525 * by increasing the current block offset. However, if a segment is written by
526 * SSR manner, next block offset obtained by calling __next_free_blkoff
528 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
529 struct curseg_info *seg)
531 if (seg->alloc_type == SSR)
532 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
538 * This function always allocates a used segment (from dirty seglist) by SSR
539 * manner, so it should recover the existing segment information of valid blocks
541 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
543 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
544 struct curseg_info *curseg = CURSEG_I(sbi, type);
545 unsigned int new_segno = curseg->next_segno;
546 struct f2fs_summary_block *sum_node;
547 struct page *sum_page;
549 write_sum_page(sbi, curseg->sum_blk,
550 GET_SUM_BLOCK(sbi, curseg->segno));
551 __set_test_and_inuse(sbi, new_segno);
553 mutex_lock(&dirty_i->seglist_lock);
554 __remove_dirty_segment(sbi, new_segno, PRE);
555 __remove_dirty_segment(sbi, new_segno, DIRTY);
556 mutex_unlock(&dirty_i->seglist_lock);
558 reset_curseg(sbi, type, 1);
559 curseg->alloc_type = SSR;
560 __next_free_blkoff(sbi, curseg, 0);
563 sum_page = get_sum_page(sbi, new_segno);
564 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
565 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
566 f2fs_put_page(sum_page, 1);
570 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
572 struct curseg_info *curseg = CURSEG_I(sbi, type);
573 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
575 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
576 return v_ops->get_victim(sbi,
577 &(curseg)->next_segno, BG_GC, type, SSR);
579 /* For data segments, let's do SSR more intensively */
580 for (; type >= CURSEG_HOT_DATA; type--)
581 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
588 * flush out current segment and replace it with new segment
589 * This function should be returned with success, otherwise BUG
591 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
592 int type, bool force)
594 struct curseg_info *curseg = CURSEG_I(sbi, type);
597 new_curseg(sbi, type, true);
601 curseg->next_segno = check_prefree_segments(sbi, type);
603 if (curseg->next_segno != NULL_SEGNO)
604 change_curseg(sbi, type, false);
605 else if (type == CURSEG_WARM_NODE)
606 new_curseg(sbi, type, false);
607 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
608 new_curseg(sbi, type, false);
609 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
610 change_curseg(sbi, type, true);
612 new_curseg(sbi, type, false);
614 sbi->segment_count[curseg->alloc_type]++;
617 void allocate_new_segments(struct f2fs_sb_info *sbi)
619 struct curseg_info *curseg;
620 unsigned int old_curseg;
623 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
624 curseg = CURSEG_I(sbi, i);
625 old_curseg = curseg->segno;
626 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
627 locate_dirty_segment(sbi, old_curseg);
631 static const struct segment_allocation default_salloc_ops = {
632 .allocate_segment = allocate_segment_by_default,
635 static void f2fs_end_io_write(struct bio *bio, int err,
636 struct batch_complete *batch)
638 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
639 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
640 struct bio_private *p = bio->bi_private;
643 struct page *page = bvec->bv_page;
645 if (--bvec >= bio->bi_io_vec)
646 prefetchw(&bvec->bv_page->flags);
650 set_bit(AS_EIO, &page->mapping->flags);
651 set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
652 p->sbi->sb->s_flags |= MS_RDONLY;
654 end_page_writeback(page);
655 dec_page_count(p->sbi, F2FS_WRITEBACK);
656 } while (bvec >= bio->bi_io_vec);
664 struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
667 struct bio_private *priv;
669 priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
675 /* No failure on bio allocation */
676 bio = bio_alloc(GFP_NOIO, npages);
678 bio->bi_private = priv;
682 static void do_submit_bio(struct f2fs_sb_info *sbi,
683 enum page_type type, bool sync)
685 int rw = sync ? WRITE_SYNC : WRITE;
686 enum page_type btype = type > META ? META : type;
688 if (type >= META_FLUSH)
689 rw = WRITE_FLUSH_FUA;
691 if (sbi->bio[btype]) {
692 struct bio_private *p = sbi->bio[btype]->bi_private;
694 sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
695 if (type == META_FLUSH) {
696 DECLARE_COMPLETION_ONSTACK(wait);
699 submit_bio(rw, sbi->bio[btype]);
700 wait_for_completion(&wait);
703 submit_bio(rw, sbi->bio[btype]);
705 sbi->bio[btype] = NULL;
709 void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
711 down_write(&sbi->bio_sem);
712 do_submit_bio(sbi, type, sync);
713 up_write(&sbi->bio_sem);
716 static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
717 block_t blk_addr, enum page_type type)
719 struct block_device *bdev = sbi->sb->s_bdev;
721 verify_block_addr(sbi, blk_addr);
723 down_write(&sbi->bio_sem);
725 inc_page_count(sbi, F2FS_WRITEBACK);
727 if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
728 do_submit_bio(sbi, type, false);
730 if (sbi->bio[type] == NULL) {
731 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_get_nr_vecs(bdev));
732 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
734 * The end_io will be assigned at the sumbission phase.
735 * Until then, let bio_add_page() merge consecutive IOs as much
740 if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
742 do_submit_bio(sbi, type, false);
746 sbi->last_block_in_bio[type] = blk_addr;
748 up_write(&sbi->bio_sem);
751 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
753 struct curseg_info *curseg = CURSEG_I(sbi, type);
754 if (curseg->next_blkoff < sbi->blocks_per_seg)
759 static int __get_segment_type_2(struct page *page, enum page_type p_type)
762 return CURSEG_HOT_DATA;
764 return CURSEG_HOT_NODE;
767 static int __get_segment_type_4(struct page *page, enum page_type p_type)
769 if (p_type == DATA) {
770 struct inode *inode = page->mapping->host;
772 if (S_ISDIR(inode->i_mode))
773 return CURSEG_HOT_DATA;
775 return CURSEG_COLD_DATA;
777 if (IS_DNODE(page) && !is_cold_node(page))
778 return CURSEG_HOT_NODE;
780 return CURSEG_COLD_NODE;
784 static int __get_segment_type_6(struct page *page, enum page_type p_type)
786 if (p_type == DATA) {
787 struct inode *inode = page->mapping->host;
789 if (S_ISDIR(inode->i_mode))
790 return CURSEG_HOT_DATA;
791 else if (is_cold_data(page) || is_cold_file(inode))
792 return CURSEG_COLD_DATA;
794 return CURSEG_WARM_DATA;
797 return is_cold_node(page) ? CURSEG_WARM_NODE :
800 return CURSEG_COLD_NODE;
804 static int __get_segment_type(struct page *page, enum page_type p_type)
806 struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
807 switch (sbi->active_logs) {
809 return __get_segment_type_2(page, p_type);
811 return __get_segment_type_4(page, p_type);
813 /* NR_CURSEG_TYPE(6) logs by default */
814 BUG_ON(sbi->active_logs != NR_CURSEG_TYPE);
815 return __get_segment_type_6(page, p_type);
818 static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
819 block_t old_blkaddr, block_t *new_blkaddr,
820 struct f2fs_summary *sum, enum page_type p_type)
822 struct sit_info *sit_i = SIT_I(sbi);
823 struct curseg_info *curseg;
824 unsigned int old_cursegno;
827 type = __get_segment_type(page, p_type);
828 curseg = CURSEG_I(sbi, type);
830 mutex_lock(&curseg->curseg_mutex);
832 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
833 old_cursegno = curseg->segno;
836 * __add_sum_entry should be resided under the curseg_mutex
837 * because, this function updates a summary entry in the
838 * current summary block.
840 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
842 mutex_lock(&sit_i->sentry_lock);
843 __refresh_next_blkoff(sbi, curseg);
844 sbi->block_count[curseg->alloc_type]++;
847 * SIT information should be updated before segment allocation,
848 * since SSR needs latest valid block information.
850 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
852 if (!__has_curseg_space(sbi, type))
853 sit_i->s_ops->allocate_segment(sbi, type, false);
855 locate_dirty_segment(sbi, old_cursegno);
856 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
857 mutex_unlock(&sit_i->sentry_lock);
860 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
862 /* writeout dirty page into bdev */
863 submit_write_page(sbi, page, *new_blkaddr, p_type);
865 mutex_unlock(&curseg->curseg_mutex);
868 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
870 set_page_writeback(page);
871 submit_write_page(sbi, page, page->index, META);
874 void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
875 unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
877 struct f2fs_summary sum;
878 set_summary(&sum, nid, 0, 0);
879 do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE);
882 void write_data_page(struct inode *inode, struct page *page,
883 struct dnode_of_data *dn, block_t old_blkaddr,
884 block_t *new_blkaddr)
886 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
887 struct f2fs_summary sum;
890 BUG_ON(old_blkaddr == NULL_ADDR);
891 get_node_info(sbi, dn->nid, &ni);
892 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
894 do_write_page(sbi, page, old_blkaddr,
895 new_blkaddr, &sum, DATA);
898 void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
899 block_t old_blk_addr)
901 submit_write_page(sbi, page, old_blk_addr, DATA);
904 void recover_data_page(struct f2fs_sb_info *sbi,
905 struct page *page, struct f2fs_summary *sum,
906 block_t old_blkaddr, block_t new_blkaddr)
908 struct sit_info *sit_i = SIT_I(sbi);
909 struct curseg_info *curseg;
910 unsigned int segno, old_cursegno;
911 struct seg_entry *se;
914 segno = GET_SEGNO(sbi, new_blkaddr);
915 se = get_seg_entry(sbi, segno);
918 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
919 if (old_blkaddr == NULL_ADDR)
920 type = CURSEG_COLD_DATA;
922 type = CURSEG_WARM_DATA;
924 curseg = CURSEG_I(sbi, type);
926 mutex_lock(&curseg->curseg_mutex);
927 mutex_lock(&sit_i->sentry_lock);
929 old_cursegno = curseg->segno;
931 /* change the current segment */
932 if (segno != curseg->segno) {
933 curseg->next_segno = segno;
934 change_curseg(sbi, type, true);
937 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
938 (sbi->blocks_per_seg - 1);
939 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
941 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
943 locate_dirty_segment(sbi, old_cursegno);
944 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
946 mutex_unlock(&sit_i->sentry_lock);
947 mutex_unlock(&curseg->curseg_mutex);
950 void rewrite_node_page(struct f2fs_sb_info *sbi,
951 struct page *page, struct f2fs_summary *sum,
952 block_t old_blkaddr, block_t new_blkaddr)
954 struct sit_info *sit_i = SIT_I(sbi);
955 int type = CURSEG_WARM_NODE;
956 struct curseg_info *curseg;
957 unsigned int segno, old_cursegno;
958 block_t next_blkaddr = next_blkaddr_of_node(page);
959 unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
961 curseg = CURSEG_I(sbi, type);
963 mutex_lock(&curseg->curseg_mutex);
964 mutex_lock(&sit_i->sentry_lock);
966 segno = GET_SEGNO(sbi, new_blkaddr);
967 old_cursegno = curseg->segno;
969 /* change the current segment */
970 if (segno != curseg->segno) {
971 curseg->next_segno = segno;
972 change_curseg(sbi, type, true);
974 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
975 (sbi->blocks_per_seg - 1);
976 __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
978 /* change the current log to the next block addr in advance */
979 if (next_segno != segno) {
980 curseg->next_segno = next_segno;
981 change_curseg(sbi, type, true);
983 curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
984 (sbi->blocks_per_seg - 1);
986 /* rewrite node page */
987 set_page_writeback(page);
988 submit_write_page(sbi, page, new_blkaddr, NODE);
989 f2fs_submit_bio(sbi, NODE, true);
990 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
992 locate_dirty_segment(sbi, old_cursegno);
993 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
995 mutex_unlock(&sit_i->sentry_lock);
996 mutex_unlock(&curseg->curseg_mutex);
999 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1001 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1002 struct curseg_info *seg_i;
1003 unsigned char *kaddr;
1008 start = start_sum_block(sbi);
1010 page = get_meta_page(sbi, start++);
1011 kaddr = (unsigned char *)page_address(page);
1013 /* Step 1: restore nat cache */
1014 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1015 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1017 /* Step 2: restore sit cache */
1018 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1019 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1021 offset = 2 * SUM_JOURNAL_SIZE;
1023 /* Step 3: restore summary entries */
1024 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1025 unsigned short blk_off;
1028 seg_i = CURSEG_I(sbi, i);
1029 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1030 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1031 seg_i->next_segno = segno;
1032 reset_curseg(sbi, i, 0);
1033 seg_i->alloc_type = ckpt->alloc_type[i];
1034 seg_i->next_blkoff = blk_off;
1036 if (seg_i->alloc_type == SSR)
1037 blk_off = sbi->blocks_per_seg;
1039 for (j = 0; j < blk_off; j++) {
1040 struct f2fs_summary *s;
1041 s = (struct f2fs_summary *)(kaddr + offset);
1042 seg_i->sum_blk->entries[j] = *s;
1043 offset += SUMMARY_SIZE;
1044 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1048 f2fs_put_page(page, 1);
1051 page = get_meta_page(sbi, start++);
1052 kaddr = (unsigned char *)page_address(page);
1056 f2fs_put_page(page, 1);
1060 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1062 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1063 struct f2fs_summary_block *sum;
1064 struct curseg_info *curseg;
1066 unsigned short blk_off;
1067 unsigned int segno = 0;
1068 block_t blk_addr = 0;
1070 /* get segment number and block addr */
1071 if (IS_DATASEG(type)) {
1072 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1073 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1075 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1076 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1078 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1080 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1082 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1084 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
1085 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1086 type - CURSEG_HOT_NODE);
1088 blk_addr = GET_SUM_BLOCK(sbi, segno);
1091 new = get_meta_page(sbi, blk_addr);
1092 sum = (struct f2fs_summary_block *)page_address(new);
1094 if (IS_NODESEG(type)) {
1095 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
1096 struct f2fs_summary *ns = &sum->entries[0];
1098 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1100 ns->ofs_in_node = 0;
1103 if (restore_node_summary(sbi, segno, sum)) {
1104 f2fs_put_page(new, 1);
1110 /* set uncompleted segment to curseg */
1111 curseg = CURSEG_I(sbi, type);
1112 mutex_lock(&curseg->curseg_mutex);
1113 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1114 curseg->next_segno = segno;
1115 reset_curseg(sbi, type, 0);
1116 curseg->alloc_type = ckpt->alloc_type[type];
1117 curseg->next_blkoff = blk_off;
1118 mutex_unlock(&curseg->curseg_mutex);
1119 f2fs_put_page(new, 1);
1123 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1125 int type = CURSEG_HOT_DATA;
1127 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1128 /* restore for compacted data summary */
1129 if (read_compacted_summaries(sbi))
1131 type = CURSEG_HOT_NODE;
1134 for (; type <= CURSEG_COLD_NODE; type++)
1135 if (read_normal_summaries(sbi, type))
1140 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1143 unsigned char *kaddr;
1144 struct f2fs_summary *summary;
1145 struct curseg_info *seg_i;
1146 int written_size = 0;
1149 page = grab_meta_page(sbi, blkaddr++);
1150 kaddr = (unsigned char *)page_address(page);
1152 /* Step 1: write nat cache */
1153 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1154 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1155 written_size += SUM_JOURNAL_SIZE;
1157 /* Step 2: write sit cache */
1158 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1159 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1161 written_size += SUM_JOURNAL_SIZE;
1163 set_page_dirty(page);
1165 /* Step 3: write summary entries */
1166 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1167 unsigned short blkoff;
1168 seg_i = CURSEG_I(sbi, i);
1169 if (sbi->ckpt->alloc_type[i] == SSR)
1170 blkoff = sbi->blocks_per_seg;
1172 blkoff = curseg_blkoff(sbi, i);
1174 for (j = 0; j < blkoff; j++) {
1176 page = grab_meta_page(sbi, blkaddr++);
1177 kaddr = (unsigned char *)page_address(page);
1180 summary = (struct f2fs_summary *)(kaddr + written_size);
1181 *summary = seg_i->sum_blk->entries[j];
1182 written_size += SUMMARY_SIZE;
1183 set_page_dirty(page);
1185 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1189 f2fs_put_page(page, 1);
1194 f2fs_put_page(page, 1);
1197 static void write_normal_summaries(struct f2fs_sb_info *sbi,
1198 block_t blkaddr, int type)
1201 if (IS_DATASEG(type))
1202 end = type + NR_CURSEG_DATA_TYPE;
1204 end = type + NR_CURSEG_NODE_TYPE;
1206 for (i = type; i < end; i++) {
1207 struct curseg_info *sum = CURSEG_I(sbi, i);
1208 mutex_lock(&sum->curseg_mutex);
1209 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1210 mutex_unlock(&sum->curseg_mutex);
1214 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1216 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
1217 write_compacted_summaries(sbi, start_blk);
1219 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1222 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1224 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
1225 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
1229 int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1230 unsigned int val, int alloc)
1234 if (type == NAT_JOURNAL) {
1235 for (i = 0; i < nats_in_cursum(sum); i++) {
1236 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1239 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1240 return update_nats_in_cursum(sum, 1);
1241 } else if (type == SIT_JOURNAL) {
1242 for (i = 0; i < sits_in_cursum(sum); i++)
1243 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1245 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1246 return update_sits_in_cursum(sum, 1);
1251 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1254 struct sit_info *sit_i = SIT_I(sbi);
1255 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1256 block_t blk_addr = sit_i->sit_base_addr + offset;
1258 check_seg_range(sbi, segno);
1260 /* calculate sit block address */
1261 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1262 blk_addr += sit_i->sit_blocks;
1264 return get_meta_page(sbi, blk_addr);
1267 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1270 struct sit_info *sit_i = SIT_I(sbi);
1271 struct page *src_page, *dst_page;
1272 pgoff_t src_off, dst_off;
1273 void *src_addr, *dst_addr;
1275 src_off = current_sit_addr(sbi, start);
1276 dst_off = next_sit_addr(sbi, src_off);
1278 /* get current sit block page without lock */
1279 src_page = get_meta_page(sbi, src_off);
1280 dst_page = grab_meta_page(sbi, dst_off);
1281 BUG_ON(PageDirty(src_page));
1283 src_addr = page_address(src_page);
1284 dst_addr = page_address(dst_page);
1285 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1287 set_page_dirty(dst_page);
1288 f2fs_put_page(src_page, 1);
1290 set_to_next_sit(sit_i, start);
1295 static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
1297 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1298 struct f2fs_summary_block *sum = curseg->sum_blk;
1302 * If the journal area in the current summary is full of sit entries,
1303 * all the sit entries will be flushed. Otherwise the sit entries
1304 * are not able to replace with newly hot sit entries.
1306 if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
1307 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1309 segno = le32_to_cpu(segno_in_journal(sum, i));
1310 __mark_sit_entry_dirty(sbi, segno);
1312 update_sits_in_cursum(sum, -sits_in_cursum(sum));
1319 * CP calls this function, which flushes SIT entries including sit_journal,
1320 * and moves prefree segs to free segs.
1322 void flush_sit_entries(struct f2fs_sb_info *sbi)
1324 struct sit_info *sit_i = SIT_I(sbi);
1325 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1326 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1327 struct f2fs_summary_block *sum = curseg->sum_blk;
1328 unsigned long nsegs = TOTAL_SEGS(sbi);
1329 struct page *page = NULL;
1330 struct f2fs_sit_block *raw_sit = NULL;
1331 unsigned int start = 0, end = 0;
1332 unsigned int segno = -1;
1335 mutex_lock(&curseg->curseg_mutex);
1336 mutex_lock(&sit_i->sentry_lock);
1339 * "flushed" indicates whether sit entries in journal are flushed
1340 * to the SIT area or not.
1342 flushed = flush_sits_in_journal(sbi);
1344 while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
1345 struct seg_entry *se = get_seg_entry(sbi, segno);
1346 int sit_offset, offset;
1348 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1353 offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
1355 segno_in_journal(sum, offset) = cpu_to_le32(segno);
1356 seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
1360 if (!page || (start > segno) || (segno > end)) {
1362 f2fs_put_page(page, 1);
1366 start = START_SEGNO(sit_i, segno);
1367 end = start + SIT_ENTRY_PER_BLOCK - 1;
1369 /* read sit block that will be updated */
1370 page = get_next_sit_page(sbi, start);
1371 raw_sit = page_address(page);
1374 /* udpate entry in SIT block */
1375 seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
1377 __clear_bit(segno, bitmap);
1378 sit_i->dirty_sentries--;
1380 mutex_unlock(&sit_i->sentry_lock);
1381 mutex_unlock(&curseg->curseg_mutex);
1383 /* writeout last modified SIT block */
1384 f2fs_put_page(page, 1);
1386 set_prefree_as_free_segments(sbi);
1389 static int build_sit_info(struct f2fs_sb_info *sbi)
1391 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1392 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1393 struct sit_info *sit_i;
1394 unsigned int sit_segs, start;
1395 char *src_bitmap, *dst_bitmap;
1396 unsigned int bitmap_size;
1398 /* allocate memory for SIT information */
1399 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1403 SM_I(sbi)->sit_info = sit_i;
1405 sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
1406 if (!sit_i->sentries)
1409 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1410 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1411 if (!sit_i->dirty_sentries_bitmap)
1414 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1415 sit_i->sentries[start].cur_valid_map
1416 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1417 sit_i->sentries[start].ckpt_valid_map
1418 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1419 if (!sit_i->sentries[start].cur_valid_map
1420 || !sit_i->sentries[start].ckpt_valid_map)
1424 if (sbi->segs_per_sec > 1) {
1425 sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
1426 sizeof(struct sec_entry));
1427 if (!sit_i->sec_entries)
1431 /* get information related with SIT */
1432 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1434 /* setup SIT bitmap from ckeckpoint pack */
1435 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1436 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1438 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
1442 /* init SIT information */
1443 sit_i->s_ops = &default_salloc_ops;
1445 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1446 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1447 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1448 sit_i->sit_bitmap = dst_bitmap;
1449 sit_i->bitmap_size = bitmap_size;
1450 sit_i->dirty_sentries = 0;
1451 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1452 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1453 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1454 mutex_init(&sit_i->sentry_lock);
1458 static int build_free_segmap(struct f2fs_sb_info *sbi)
1460 struct f2fs_sm_info *sm_info = SM_I(sbi);
1461 struct free_segmap_info *free_i;
1462 unsigned int bitmap_size, sec_bitmap_size;
1464 /* allocate memory for free segmap information */
1465 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1469 SM_I(sbi)->free_info = free_i;
1471 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1472 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1473 if (!free_i->free_segmap)
1476 sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
1477 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1478 if (!free_i->free_secmap)
1481 /* set all segments as dirty temporarily */
1482 memset(free_i->free_segmap, 0xff, bitmap_size);
1483 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1485 /* init free segmap information */
1486 free_i->start_segno =
1487 (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
1488 free_i->free_segments = 0;
1489 free_i->free_sections = 0;
1490 rwlock_init(&free_i->segmap_lock);
1494 static int build_curseg(struct f2fs_sb_info *sbi)
1496 struct curseg_info *array;
1499 array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
1503 SM_I(sbi)->curseg_array = array;
1505 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1506 mutex_init(&array[i].curseg_mutex);
1507 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1508 if (!array[i].sum_blk)
1510 array[i].segno = NULL_SEGNO;
1511 array[i].next_blkoff = 0;
1513 return restore_curseg_summaries(sbi);
1516 static void build_sit_entries(struct f2fs_sb_info *sbi)
1518 struct sit_info *sit_i = SIT_I(sbi);
1519 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1520 struct f2fs_summary_block *sum = curseg->sum_blk;
1523 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1524 struct seg_entry *se = &sit_i->sentries[start];
1525 struct f2fs_sit_block *sit_blk;
1526 struct f2fs_sit_entry sit;
1530 mutex_lock(&curseg->curseg_mutex);
1531 for (i = 0; i < sits_in_cursum(sum); i++) {
1532 if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
1533 sit = sit_in_journal(sum, i);
1534 mutex_unlock(&curseg->curseg_mutex);
1538 mutex_unlock(&curseg->curseg_mutex);
1539 page = get_current_sit_page(sbi, start);
1540 sit_blk = (struct f2fs_sit_block *)page_address(page);
1541 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
1542 f2fs_put_page(page, 1);
1544 check_block_count(sbi, start, &sit);
1545 seg_info_from_raw_sit(se, &sit);
1546 if (sbi->segs_per_sec > 1) {
1547 struct sec_entry *e = get_sec_entry(sbi, start);
1548 e->valid_blocks += se->valid_blocks;
1553 static void init_free_segmap(struct f2fs_sb_info *sbi)
1558 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1559 struct seg_entry *sentry = get_seg_entry(sbi, start);
1560 if (!sentry->valid_blocks)
1561 __set_free(sbi, start);
1564 /* set use the current segments */
1565 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
1566 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
1567 __set_test_and_inuse(sbi, curseg_t->segno);
1571 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
1573 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1574 struct free_segmap_info *free_i = FREE_I(sbi);
1575 unsigned int segno = 0, offset = 0;
1576 unsigned short valid_blocks;
1578 while (segno < TOTAL_SEGS(sbi)) {
1579 /* find dirty segment based on free segmap */
1580 segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset);
1581 if (segno >= TOTAL_SEGS(sbi))
1584 valid_blocks = get_valid_blocks(sbi, segno, 0);
1585 if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
1587 mutex_lock(&dirty_i->seglist_lock);
1588 __locate_dirty_segment(sbi, segno, DIRTY);
1589 mutex_unlock(&dirty_i->seglist_lock);
1593 static int init_victim_secmap(struct f2fs_sb_info *sbi)
1595 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1596 unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
1598 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
1599 if (!dirty_i->victim_secmap)
1604 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
1606 struct dirty_seglist_info *dirty_i;
1607 unsigned int bitmap_size, i;
1609 /* allocate memory for dirty segments list information */
1610 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
1614 SM_I(sbi)->dirty_info = dirty_i;
1615 mutex_init(&dirty_i->seglist_lock);
1617 bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
1619 for (i = 0; i < NR_DIRTY_TYPE; i++) {
1620 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
1621 if (!dirty_i->dirty_segmap[i])
1625 init_dirty_segmap(sbi);
1626 return init_victim_secmap(sbi);
1630 * Update min, max modified time for cost-benefit GC algorithm
1632 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
1634 struct sit_info *sit_i = SIT_I(sbi);
1637 mutex_lock(&sit_i->sentry_lock);
1639 sit_i->min_mtime = LLONG_MAX;
1641 for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
1643 unsigned long long mtime = 0;
1645 for (i = 0; i < sbi->segs_per_sec; i++)
1646 mtime += get_seg_entry(sbi, segno + i)->mtime;
1648 mtime = div_u64(mtime, sbi->segs_per_sec);
1650 if (sit_i->min_mtime > mtime)
1651 sit_i->min_mtime = mtime;
1653 sit_i->max_mtime = get_mtime(sbi);
1654 mutex_unlock(&sit_i->sentry_lock);
1657 int build_segment_manager(struct f2fs_sb_info *sbi)
1659 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1660 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1661 struct f2fs_sm_info *sm_info;
1664 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
1669 sbi->sm_info = sm_info;
1670 INIT_LIST_HEAD(&sm_info->wblist_head);
1671 spin_lock_init(&sm_info->wblist_lock);
1672 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1673 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1674 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
1675 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
1676 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
1677 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
1678 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
1680 err = build_sit_info(sbi);
1683 err = build_free_segmap(sbi);
1686 err = build_curseg(sbi);
1690 /* reinit free segmap based on SIT */
1691 build_sit_entries(sbi);
1693 init_free_segmap(sbi);
1694 err = build_dirty_segmap(sbi);
1698 init_min_max_mtime(sbi);
1702 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
1703 enum dirty_type dirty_type)
1705 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1707 mutex_lock(&dirty_i->seglist_lock);
1708 kfree(dirty_i->dirty_segmap[dirty_type]);
1709 dirty_i->nr_dirty[dirty_type] = 0;
1710 mutex_unlock(&dirty_i->seglist_lock);
1713 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
1715 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1716 kfree(dirty_i->victim_secmap);
1719 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
1721 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1727 /* discard pre-free/dirty segments list */
1728 for (i = 0; i < NR_DIRTY_TYPE; i++)
1729 discard_dirty_segmap(sbi, i);
1731 destroy_victim_secmap(sbi);
1732 SM_I(sbi)->dirty_info = NULL;
1736 static void destroy_curseg(struct f2fs_sb_info *sbi)
1738 struct curseg_info *array = SM_I(sbi)->curseg_array;
1743 SM_I(sbi)->curseg_array = NULL;
1744 for (i = 0; i < NR_CURSEG_TYPE; i++)
1745 kfree(array[i].sum_blk);
1749 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
1751 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
1754 SM_I(sbi)->free_info = NULL;
1755 kfree(free_i->free_segmap);
1756 kfree(free_i->free_secmap);
1760 static void destroy_sit_info(struct f2fs_sb_info *sbi)
1762 struct sit_info *sit_i = SIT_I(sbi);
1768 if (sit_i->sentries) {
1769 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1770 kfree(sit_i->sentries[start].cur_valid_map);
1771 kfree(sit_i->sentries[start].ckpt_valid_map);
1774 vfree(sit_i->sentries);
1775 vfree(sit_i->sec_entries);
1776 kfree(sit_i->dirty_sentries_bitmap);
1778 SM_I(sbi)->sit_info = NULL;
1779 kfree(sit_i->sit_bitmap);
1783 void destroy_segment_manager(struct f2fs_sb_info *sbi)
1785 struct f2fs_sm_info *sm_info = SM_I(sbi);
1786 destroy_dirty_segmap(sbi);
1787 destroy_curseg(sbi);
1788 destroy_free_segmap(sbi);
1789 destroy_sit_info(sbi);
1790 sbi->sm_info = NULL;