4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
25 #include <trace/events/f2fs.h>
27 static struct kmem_cache *winode_slab;
29 static int gc_thread_func(void *data)
31 struct f2fs_sb_info *sbi = data;
32 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
33 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36 wait_ms = gc_th->min_sleep_time;
42 wait_event_interruptible_timeout(*wq,
43 kthread_should_stop(),
44 msecs_to_jiffies(wait_ms));
45 if (kthread_should_stop())
48 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
49 wait_ms = increase_sleep_time(gc_th, wait_ms);
54 * [GC triggering condition]
55 * 0. GC is not conducted currently.
56 * 1. There are enough dirty segments.
57 * 2. IO subsystem is idle by checking the # of writeback pages.
58 * 3. IO subsystem is idle by checking the # of requests in
59 * bdev's request list.
61 * Note) We have to avoid triggering GCs too much frequently.
62 * Because it is possible that some segments can be
63 * invalidated soon after by user update or deletion.
64 * So, I'd like to wait some time to collect dirty segments.
66 if (!mutex_trylock(&sbi->gc_mutex))
70 wait_ms = increase_sleep_time(gc_th, wait_ms);
71 mutex_unlock(&sbi->gc_mutex);
75 if (has_enough_invalid_blocks(sbi))
76 wait_ms = decrease_sleep_time(gc_th, wait_ms);
78 wait_ms = increase_sleep_time(gc_th, wait_ms);
80 #ifdef CONFIG_F2FS_STAT_FS
84 /* if return value is not zero, no victim was selected */
86 wait_ms = gc_th->no_gc_sleep_time;
87 } while (!kthread_should_stop());
91 int start_gc_thread(struct f2fs_sb_info *sbi)
93 struct f2fs_gc_kthread *gc_th;
94 dev_t dev = sbi->sb->s_bdev->bd_dev;
97 if (!test_opt(sbi, BG_GC))
99 gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
105 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
106 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
107 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
111 sbi->gc_thread = gc_th;
112 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
113 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
114 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
115 if (IS_ERR(gc_th->f2fs_gc_task)) {
116 err = PTR_ERR(gc_th->f2fs_gc_task);
118 sbi->gc_thread = NULL;
125 void stop_gc_thread(struct f2fs_sb_info *sbi)
127 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
130 kthread_stop(gc_th->f2fs_gc_task);
132 sbi->gc_thread = NULL;
135 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
137 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
139 if (gc_th && gc_th->gc_idle) {
140 if (gc_th->gc_idle == 1)
142 else if (gc_th->gc_idle == 2)
148 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
149 int type, struct victim_sel_policy *p)
151 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
153 if (p->alloc_mode == SSR) {
154 p->gc_mode = GC_GREEDY;
155 p->dirty_segmap = dirty_i->dirty_segmap[type];
158 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
159 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
160 p->ofs_unit = sbi->segs_per_sec;
162 p->offset = sbi->last_victim[p->gc_mode];
165 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
166 struct victim_sel_policy *p)
168 /* SSR allocates in a segment unit */
169 if (p->alloc_mode == SSR)
170 return 1 << sbi->log_blocks_per_seg;
171 if (p->gc_mode == GC_GREEDY)
172 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
173 else if (p->gc_mode == GC_CB)
175 else /* No other gc_mode */
179 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
181 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
182 unsigned int hint = 0;
186 * If the gc_type is FG_GC, we can select victim segments
187 * selected by background GC before.
188 * Those segments guarantee they have small valid blocks.
191 secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
192 if (secno < TOTAL_SECS(sbi)) {
193 if (sec_usage_check(sbi, secno))
195 clear_bit(secno, dirty_i->victim_secmap);
196 return secno * sbi->segs_per_sec;
201 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
203 struct sit_info *sit_i = SIT_I(sbi);
204 unsigned int secno = GET_SECNO(sbi, segno);
205 unsigned int start = secno * sbi->segs_per_sec;
206 unsigned long long mtime = 0;
207 unsigned int vblocks;
208 unsigned char age = 0;
212 for (i = 0; i < sbi->segs_per_sec; i++)
213 mtime += get_seg_entry(sbi, start + i)->mtime;
214 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
216 mtime = div_u64(mtime, sbi->segs_per_sec);
217 vblocks = div_u64(vblocks, sbi->segs_per_sec);
219 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
221 /* Handle if the system time is changed by user */
222 if (mtime < sit_i->min_mtime)
223 sit_i->min_mtime = mtime;
224 if (mtime > sit_i->max_mtime)
225 sit_i->max_mtime = mtime;
226 if (sit_i->max_mtime != sit_i->min_mtime)
227 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
228 sit_i->max_mtime - sit_i->min_mtime);
230 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
233 static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
234 struct victim_sel_policy *p)
236 if (p->alloc_mode == SSR)
237 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
239 /* alloc_mode == LFS */
240 if (p->gc_mode == GC_GREEDY)
241 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
243 return get_cb_cost(sbi, segno);
247 * This function is called from two paths.
248 * One is garbage collection and the other is SSR segment selection.
249 * When it is called during GC, it just gets a victim segment
250 * and it does not remove it from dirty seglist.
251 * When it is called from SSR segment selection, it finds a segment
252 * which has minimum valid blocks and removes it from dirty seglist.
254 static int get_victim_by_default(struct f2fs_sb_info *sbi,
255 unsigned int *result, int gc_type, int type, char alloc_mode)
257 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
258 struct victim_sel_policy p;
259 unsigned int secno, max_cost;
262 p.alloc_mode = alloc_mode;
263 select_policy(sbi, gc_type, type, &p);
265 p.min_segno = NULL_SEGNO;
266 p.min_cost = max_cost = get_max_cost(sbi, &p);
268 mutex_lock(&dirty_i->seglist_lock);
270 if (p.alloc_mode == LFS && gc_type == FG_GC) {
271 p.min_segno = check_bg_victims(sbi);
272 if (p.min_segno != NULL_SEGNO)
280 segno = find_next_bit(p.dirty_segmap,
281 TOTAL_SEGS(sbi), p.offset);
282 if (segno >= TOTAL_SEGS(sbi)) {
283 if (sbi->last_victim[p.gc_mode]) {
284 sbi->last_victim[p.gc_mode] = 0;
290 p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
291 secno = GET_SECNO(sbi, segno);
293 if (sec_usage_check(sbi, secno))
295 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
298 cost = get_gc_cost(sbi, segno, &p);
300 if (p.min_cost > cost) {
305 if (cost == max_cost)
308 if (nsearched++ >= MAX_VICTIM_SEARCH) {
309 sbi->last_victim[p.gc_mode] = segno;
313 if (p.min_segno != NULL_SEGNO) {
315 if (p.alloc_mode == LFS) {
316 secno = GET_SECNO(sbi, p.min_segno);
317 if (gc_type == FG_GC)
318 sbi->cur_victim_sec = secno;
320 set_bit(secno, dirty_i->victim_secmap);
322 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
324 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
326 prefree_segments(sbi), free_segments(sbi));
328 mutex_unlock(&dirty_i->seglist_lock);
330 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
333 static const struct victim_selection default_v_ops = {
334 .get_victim = get_victim_by_default,
337 static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
339 struct inode_entry *ie;
341 list_for_each_entry(ie, ilist, list)
342 if (ie->inode->i_ino == ino)
347 static void add_gc_inode(struct inode *inode, struct list_head *ilist)
349 struct inode_entry *new_ie;
351 if (inode == find_gc_inode(inode->i_ino, ilist)) {
356 new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
361 new_ie->inode = inode;
362 list_add_tail(&new_ie->list, ilist);
365 static void put_gc_inode(struct list_head *ilist)
367 struct inode_entry *ie, *next_ie;
368 list_for_each_entry_safe(ie, next_ie, ilist, list) {
371 kmem_cache_free(winode_slab, ie);
375 static int check_valid_map(struct f2fs_sb_info *sbi,
376 unsigned int segno, int offset)
378 struct sit_info *sit_i = SIT_I(sbi);
379 struct seg_entry *sentry;
382 mutex_lock(&sit_i->sentry_lock);
383 sentry = get_seg_entry(sbi, segno);
384 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
385 mutex_unlock(&sit_i->sentry_lock);
390 * This function compares node address got in summary with that in NAT.
391 * On validity, copy that node with cold status, otherwise (invalid node)
394 static void gc_node_segment(struct f2fs_sb_info *sbi,
395 struct f2fs_summary *sum, unsigned int segno, int gc_type)
398 struct f2fs_summary *entry;
404 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
405 nid_t nid = le32_to_cpu(entry->nid);
406 struct page *node_page;
408 /* stop BG_GC if there is not enough free sections. */
409 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
412 if (check_valid_map(sbi, segno, off) == 0)
416 ra_node_page(sbi, nid);
419 node_page = get_node_page(sbi, nid);
420 if (IS_ERR(node_page))
423 /* set page dirty and write it */
424 if (gc_type == FG_GC) {
425 f2fs_submit_bio(sbi, NODE, true);
426 wait_on_page_writeback(node_page);
427 set_page_dirty(node_page);
429 if (!PageWriteback(node_page))
430 set_page_dirty(node_page);
432 f2fs_put_page(node_page, 1);
433 stat_inc_node_blk_count(sbi, 1);
441 if (gc_type == FG_GC) {
442 struct writeback_control wbc = {
443 .sync_mode = WB_SYNC_ALL,
444 .nr_to_write = LONG_MAX,
447 sync_node_pages(sbi, 0, &wbc);
450 * In the case of FG_GC, it'd be better to reclaim this victim
453 if (get_valid_blocks(sbi, segno, 1) != 0)
459 * Calculate start block index indicating the given node offset.
460 * Be careful, caller should give this node offset only indicating direct node
461 * blocks. If any node offsets, which point the other types of node blocks such
462 * as indirect or double indirect node blocks, are given, it must be a caller's
465 block_t start_bidx_of_node(unsigned int node_ofs)
467 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
475 } else if (node_ofs <= indirect_blks) {
476 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
477 bidx = node_ofs - 2 - dec;
479 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
480 bidx = node_ofs - 5 - dec;
482 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE;
485 static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
486 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
488 struct page *node_page;
490 unsigned int ofs_in_node;
491 block_t source_blkaddr;
493 nid = le32_to_cpu(sum->nid);
494 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
496 node_page = get_node_page(sbi, nid);
497 if (IS_ERR(node_page))
500 get_node_info(sbi, nid, dni);
502 if (sum->version != dni->version) {
503 f2fs_put_page(node_page, 1);
507 *nofs = ofs_of_node(node_page);
508 source_blkaddr = datablock_addr(node_page, ofs_in_node);
509 f2fs_put_page(node_page, 1);
511 if (source_blkaddr != blkaddr)
516 static void move_data_page(struct inode *inode, struct page *page, int gc_type)
518 if (gc_type == BG_GC) {
519 if (PageWriteback(page))
521 set_page_dirty(page);
524 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
526 if (PageWriteback(page)) {
527 f2fs_submit_bio(sbi, DATA, true);
528 wait_on_page_writeback(page);
531 if (clear_page_dirty_for_io(page) &&
532 S_ISDIR(inode->i_mode)) {
533 dec_page_count(sbi, F2FS_DIRTY_DENTS);
534 inode_dec_dirty_dents(inode);
537 do_write_data_page(page);
538 clear_cold_data(page);
541 f2fs_put_page(page, 1);
545 * This function tries to get parent node of victim data block, and identifies
546 * data block validity. If the block is valid, copy that with cold status and
547 * modify parent node.
548 * If the parent node is not valid or the data block address is different,
549 * the victim data block is ignored.
551 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
552 struct list_head *ilist, unsigned int segno, int gc_type)
554 struct super_block *sb = sbi->sb;
555 struct f2fs_summary *entry;
560 start_addr = START_BLOCK(sbi, segno);
565 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
566 struct page *data_page;
568 struct node_info dni; /* dnode info for the data */
569 unsigned int ofs_in_node, nofs;
572 /* stop BG_GC if there is not enough free sections. */
573 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
576 if (check_valid_map(sbi, segno, off) == 0)
580 ra_node_page(sbi, le32_to_cpu(entry->nid));
584 /* Get an inode by ino with checking validity */
585 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
589 ra_node_page(sbi, dni.ino);
593 start_bidx = start_bidx_of_node(nofs);
594 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
597 inode = f2fs_iget(sb, dni.ino);
601 data_page = find_data_page(inode,
602 start_bidx + ofs_in_node, false);
603 if (IS_ERR(data_page))
606 f2fs_put_page(data_page, 0);
607 add_gc_inode(inode, ilist);
609 inode = find_gc_inode(dni.ino, ilist);
611 data_page = get_lock_data_page(inode,
612 start_bidx + ofs_in_node);
613 if (IS_ERR(data_page))
615 move_data_page(inode, data_page, gc_type);
616 stat_inc_data_blk_count(sbi, 1);
627 if (gc_type == FG_GC) {
628 f2fs_submit_bio(sbi, DATA, true);
631 * In the case of FG_GC, it'd be better to reclaim this victim
634 if (get_valid_blocks(sbi, segno, 1) != 0) {
641 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
642 int gc_type, int type)
644 struct sit_info *sit_i = SIT_I(sbi);
646 mutex_lock(&sit_i->sentry_lock);
647 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
648 mutex_unlock(&sit_i->sentry_lock);
652 static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
653 struct list_head *ilist, int gc_type)
655 struct page *sum_page;
656 struct f2fs_summary_block *sum;
657 struct blk_plug plug;
659 /* read segment summary of victim */
660 sum_page = get_sum_page(sbi, segno);
661 if (IS_ERR(sum_page))
664 blk_start_plug(&plug);
666 sum = page_address(sum_page);
668 switch (GET_SUM_TYPE((&sum->footer))) {
670 gc_node_segment(sbi, sum->entries, segno, gc_type);
673 gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
676 blk_finish_plug(&plug);
678 stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
679 stat_inc_call_count(sbi->stat_info);
681 f2fs_put_page(sum_page, 1);
684 int f2fs_gc(struct f2fs_sb_info *sbi)
686 struct list_head ilist;
687 unsigned int segno, i;
692 INIT_LIST_HEAD(&ilist);
694 if (!(sbi->sb->s_flags & MS_ACTIVE))
697 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
699 write_checkpoint(sbi, false);
702 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
706 for (i = 0; i < sbi->segs_per_sec; i++)
707 do_garbage_collect(sbi, segno + i, &ilist, gc_type);
709 if (gc_type == FG_GC) {
710 sbi->cur_victim_sec = NULL_SEGNO;
712 WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
715 if (has_not_enough_free_secs(sbi, nfree))
718 if (gc_type == FG_GC)
719 write_checkpoint(sbi, false);
721 mutex_unlock(&sbi->gc_mutex);
723 put_gc_inode(&ilist);
727 void build_gc_manager(struct f2fs_sb_info *sbi)
729 DIRTY_I(sbi)->v_ops = &default_v_ops;
732 int __init create_gc_caches(void)
734 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
735 sizeof(struct inode_entry), NULL);
741 void destroy_gc_caches(void)
743 kmem_cache_destroy(winode_slab);