]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Btrfs: cache extent state when writing out dirty metadata pages
authorJosef Bacik <jbacik@fusionio.com>
Thu, 27 Sep 2012 21:07:30 +0000 (17:07 -0400)
committerChris Mason <chris.mason@fusionio.com>
Mon, 1 Oct 2012 19:35:05 +0000 (15:35 -0400)
Everytime we write out dirty pages we search for an offset in the tree,
convert the bits in the state, and then when we wait we search for the
offset again and clear the bits.  So for every dirty range in the io tree we
are doing 4 rb searches, which is suboptimal.  With this patch we are only
doing 2 searches for every cycle (modulo weird things happening).  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/free-space-cache.c
fs/btrfs/relocation.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c

index aa02eab8c40b42f08a22e75d87aec247a32f43cf..c69995556f61f331728245607433b5c90e09064e 100644 (file)
@@ -3572,7 +3572,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
 
        while (1) {
                ret = find_first_extent_bit(dirty_pages, start, &start, &end,
-                                           mark);
+                                           mark, NULL);
                if (ret)
                        break;
 
@@ -3627,7 +3627,7 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
 again:
        while (1) {
                ret = find_first_extent_bit(unpin, 0, &start, &end,
-                                           EXTENT_DIRTY);
+                                           EXTENT_DIRTY, NULL);
                if (ret)
                        break;
 
index efb044e61d752a24b502ee429cd13f2981efe250..65941d736c4cff198718e4a1053dfc2a2378ac96 100644 (file)
@@ -313,7 +313,8 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
        while (start < end) {
                ret = find_first_extent_bit(info->pinned_extents, start,
                                            &extent_start, &extent_end,
-                                           EXTENT_DIRTY | EXTENT_UPTODATE);
+                                           EXTENT_DIRTY | EXTENT_UPTODATE,
+                                           NULL);
                if (ret)
                        break;
 
@@ -5028,7 +5029,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
 
        while (1) {
                ret = find_first_extent_bit(unpin, 0, &start, &end,
-                                           EXTENT_DIRTY);
+                                           EXTENT_DIRTY, NULL);
                if (ret)
                        break;
 
index 979fa0d6bfee5d30be2430e9d6a6750925ee6ad9..e8ee39b733564098c49dfa9dd316ad24e9c0a76d 100644 (file)
@@ -937,6 +937,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
  * @end:       the end offset in bytes (inclusive)
  * @bits:      the bits to set in this range
  * @clear_bits:        the bits to clear in this range
+ * @cached_state:      state that we're going to cache
  * @mask:      the allocation mask
  *
  * This will go through and set bits for the given range.  If any states exist
@@ -946,7 +947,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
  * boundary bits like LOCK.
  */
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                      int bits, int clear_bits, gfp_t mask)
+                      int bits, int clear_bits,
+                      struct extent_state **cached_state, gfp_t mask)
 {
        struct extent_state *state;
        struct extent_state *prealloc = NULL;
@@ -963,6 +965,15 @@ again:
        }
 
        spin_lock(&tree->lock);
+       if (cached_state && *cached_state) {
+               state = *cached_state;
+               if (state->start <= start && state->end > start &&
+                   state->tree) {
+                       node = &state->rb_node;
+                       goto hit_next;
+               }
+       }
+
        /*
         * this search will find all the extents that end after
         * our range starts.
@@ -993,6 +1004,7 @@ hit_next:
         */
        if (state->start == start && state->end <= end) {
                set_state_bits(tree, state, &bits);
+               cache_state(state, cached_state);
                state = clear_state_bit(tree, state, &clear_bits, 0);
                if (last_end == (u64)-1)
                        goto out;
@@ -1033,6 +1045,7 @@ hit_next:
                        goto out;
                if (state->end <= end) {
                        set_state_bits(tree, state, &bits);
+                       cache_state(state, cached_state);
                        state = clear_state_bit(tree, state, &clear_bits, 0);
                        if (last_end == (u64)-1)
                                goto out;
@@ -1071,6 +1084,7 @@ hit_next:
                                   &bits);
                if (err)
                        extent_io_tree_panic(tree, err);
+               cache_state(prealloc, cached_state);
                prealloc = NULL;
                start = this_end + 1;
                goto search_again;
@@ -1093,6 +1107,7 @@ hit_next:
                        extent_io_tree_panic(tree, err);
 
                set_state_bits(tree, prealloc, &bits);
+               cache_state(prealloc, cached_state);
                clear_state_bit(tree, prealloc, &clear_bits, 0);
                prealloc = NULL;
                goto out;
@@ -1297,18 +1312,42 @@ out:
  * If nothing was found, 1 is returned. If found something, return 0.
  */
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
-                         u64 *start_ret, u64 *end_ret, int bits)
+                         u64 *start_ret, u64 *end_ret, int bits,
+                         struct extent_state **cached_state)
 {
        struct extent_state *state;
+       struct rb_node *n;
        int ret = 1;
 
        spin_lock(&tree->lock);
+       if (cached_state && *cached_state) {
+               state = *cached_state;
+               if (state->end == start - 1 && state->tree) {
+                       n = rb_next(&state->rb_node);
+                       while (n) {
+                               state = rb_entry(n, struct extent_state,
+                                                rb_node);
+                               if (state->state & bits)
+                                       goto got_it;
+                               n = rb_next(n);
+                       }
+                       free_extent_state(*cached_state);
+                       *cached_state = NULL;
+                       goto out;
+               }
+               free_extent_state(*cached_state);
+               *cached_state = NULL;
+       }
+
        state = find_first_extent_bit_state(tree, start, bits);
+got_it:
        if (state) {
+               cache_state(state, cached_state);
                *start_ret = state->start;
                *end_ret = state->end;
                ret = 0;
        }
+out:
        spin_unlock(&tree->lock);
        return ret;
 }
index a69dea21904455a725bfb9c878e5c9f850a2863f..7aeb31087f88056e6a6eb729a04fe3c29c360a1a 100644 (file)
@@ -233,13 +233,15 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
                       gfp_t mask);
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                      int bits, int clear_bits, gfp_t mask);
+                      int bits, int clear_bits,
+                      struct extent_state **cached_state, gfp_t mask);
 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
                        struct extent_state **cached_state, gfp_t mask);
 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
                      struct extent_state **cached_state, gfp_t mask);
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
-                         u64 *start_ret, u64 *end_ret, int bits);
+                         u64 *start_ret, u64 *end_ret, int bits,
+                         struct extent_state **cached_state);
 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
                                                 u64 start, int bits);
 int extent_invalidatepage(struct extent_io_tree *tree,
index b107e68797f46f4e52c986ec4c1012c7f3541ce5..1027b854b90cec02b9d2328804bc12f23f9bc00b 100644 (file)
@@ -966,7 +966,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                               block_group->key.offset)) {
                ret = find_first_extent_bit(unpin, start,
                                            &extent_start, &extent_end,
-                                           EXTENT_DIRTY);
+                                           EXTENT_DIRTY, NULL);
                if (ret) {
                        ret = 0;
                        break;
index 6e530bb86c94fe6d60e7db87496b57b44d1a8c3f..776f0aa128fc56294dbed997d6a60768f05a0ee9 100644 (file)
@@ -3621,7 +3621,7 @@ next:
 
                ret = find_first_extent_bit(&rc->processed_blocks,
                                            key.objectid, &start, &end,
-                                           EXTENT_DIRTY);
+                                           EXTENT_DIRTY, NULL);
 
                if (ret == 0 && start <= key.objectid) {
                        btrfs_release_path(path);
index 69139a356f7188593bb73c96cfcce4dd9071ec01..77db875b511638b7ff94854c6b1482942da2b3fb 100644 (file)
@@ -687,13 +687,15 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
        int err = 0;
        int werr = 0;
        struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
+       struct extent_state *cached_state = NULL;
        u64 start = 0;
        u64 end;
 
        while (!find_first_extent_bit(dirty_pages, start, &start, &end,
-                                     mark)) {
-               convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
-                                  GFP_NOFS);
+                                     mark, &cached_state)) {
+               convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
+                                  mark, &cached_state, GFP_NOFS);
+               cached_state = NULL;
                err = filemap_fdatawrite_range(mapping, start, end);
                if (err)
                        werr = err;
@@ -717,12 +719,14 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
        int err = 0;
        int werr = 0;
        struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
+       struct extent_state *cached_state = NULL;
        u64 start = 0;
        u64 end;
 
        while (!find_first_extent_bit(dirty_pages, start, &start, &end,
-                                     EXTENT_NEED_WAIT)) {
-               clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
+                                     EXTENT_NEED_WAIT, &cached_state)) {
+               clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
+                                0, 0, &cached_state, GFP_NOFS);
                err = filemap_fdatawait_range(mapping, start, end);
                if (err)
                        werr = err;
index 1d7b348443234f912667ee3ee7be3d8a01a81838..f4b9e54b1da2415b74b17485a33143f86d60f7e2 100644 (file)
@@ -2463,7 +2463,8 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
 
        while (1) {
                ret = find_first_extent_bit(&log->dirty_log_pages,
-                               0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
+                               0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
+                               NULL);
                if (ret)
                        break;