]> git.karo-electronics.de Git - linux-beck.git/commitdiff
f2fs: shrink free_nids entries
authorChao Yu <chao2.yu@samsung.com>
Tue, 28 Jul 2015 10:33:46 +0000 (18:33 +0800)
committerJaegeuk Kim <jaegeuk@kernel.org>
Thu, 20 Aug 2015 16:00:06 +0000 (09:00 -0700)
This patch introduces __count_free_nids/try_to_free_nids and registers
them in slab shrinker for shrinking under memory pressure.

Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
fs/f2fs/f2fs.h
fs/f2fs/node.c
fs/f2fs/segment.c
fs/f2fs/shrinker.c

index cc07b1595a927dc719a389491cc5e2c2cd48077e..23bfc0ccaf10a8fe3d4181a6b55985a20945d66f 100644 (file)
@@ -1681,6 +1681,7 @@ int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
 bool alloc_nid(struct f2fs_sb_info *, nid_t *);
 void alloc_nid_done(struct f2fs_sb_info *, nid_t);
 void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
+int try_to_free_nids(struct f2fs_sb_info *, int);
 void recover_inline_xattr(struct inode *, struct page *);
 void recover_xattr_data(struct inode *, struct page *, block_t);
 int recover_inode_page(struct f2fs_sb_info *, struct page *);
index ac9110788b175d64591b1a0ad711fc810060344d..6e10c2a08ec6f4711eeec30a9378a81e0967662c 100644 (file)
@@ -1635,6 +1635,34 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
                kmem_cache_free(free_nid_slab, i);
 }
 
+int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       struct free_nid *i, *next;
+       int nr = nr_shrink;
+
+       if (!mutex_trylock(&nm_i->build_lock))
+               return 0;
+
+       spin_lock(&nm_i->free_nid_list_lock);
+       list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
+               if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
+                       break;
+               if (i->state == NID_ALLOC)
+                       continue;
+               __del_from_free_nid_list(nm_i, i);
+               nm_i->fcnt--;
+               spin_unlock(&nm_i->free_nid_list_lock);
+               kmem_cache_free(free_nid_slab, i);
+               nr_shrink--;
+               spin_lock(&nm_i->free_nid_list_lock);
+       }
+       spin_unlock(&nm_i->free_nid_list_lock);
+       mutex_unlock(&nm_i->build_lock);
+
+       return nr - nr_shrink;
+}
+
 void recover_inline_xattr(struct inode *inode, struct page *page)
 {
        void *src_addr, *dst_addr;
index bf1605dbce93d6ee3ad389b26f8e3bd0fc9812f6..1b4265639f07ec88dc207a1ff64bd6d60bc9a0ad 100644 (file)
@@ -310,6 +310,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
        if (!available_free_memory(sbi, NAT_ENTRIES))
                try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
 
+       if (!available_free_memory(sbi, FREE_NIDS))
+               try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+
        /* checkpoint is the only way to shrink partial cached entries */
        if (!available_free_memory(sbi, NAT_ENTRIES) ||
                        excess_prefree_segs(sbi) ||
index 9aa4235cd304ae3c66d7e5ddaffa9793da952bd6..da0d8e0b55a5d851dc893547f63f138b5281eb77 100644 (file)
@@ -23,6 +23,13 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
        return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
 }
 
+static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
+{
+       if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
+               return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
+       return 0;
+}
+
 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
 {
        return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
@@ -53,6 +60,9 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
                /* shrink clean nat cache entries */
                count += __count_nat_entries(sbi);
 
+               /* count free nids cache entries */
+               count += __count_free_nids(sbi);
+
                spin_lock(&f2fs_list_lock);
                p = p->next;
                mutex_unlock(&sbi->umount_mutex);
@@ -97,6 +107,10 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
                if (freed < nr)
                        freed += try_to_free_nats(sbi, nr - freed);
 
+               /* shrink free nids cache entries */
+               if (freed < nr)
+                       freed += try_to_free_nids(sbi, nr - freed);
+
                spin_lock(&f2fs_list_lock);
                p = p->next;
                list_move_tail(&sbi->s_list, &f2fs_list);