]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Btrfs: cut down on loops through the allocator
authorJosef Bacik <jbacik@fb.com>
Thu, 1 Oct 2015 18:54:10 +0000 (14:54 -0400)
committerChris Mason <clm@fb.com>
Thu, 22 Oct 2015 01:55:37 +0000 (18:55 -0700)
We try really really hard to make allocations, but sometimes it is just not
going to happen, especially when free space is extremely fragmented.  So add a
few short cuts through the looping states.  For example if we couldn't allocate
a chunk, just go straight to the NO_EMPTY_SIZE loop.  If there are no uncached
block groups and we've done a full search, go straight to the ALLOC_CHUNK stage.
And finally if we already have empty_size and empty_cluster set to 0 go ahead
and return -ENOSPC.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fb.com>
Signed-off-by: Chris Mason <clm@fb.com>
fs/btrfs/extent-tree.c

index 3185c457f025a9fae6dbc2983e5ff2858935894c..9f18eb0e86b66f8e89d9bf4bf6a725f67572accc 100644 (file)
@@ -6921,6 +6921,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
        bool failed_alloc = false;
        bool use_cluster = true;
        bool have_caching_bg = false;
+       bool full_search = false;
 
        WARN_ON(num_bytes < root->sectorsize);
        ins->type = BTRFS_EXTENT_ITEM_KEY;
@@ -7023,6 +7024,8 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
        }
 search:
        have_caching_bg = false;
+       if (index == 0 || index == __get_raid_index(flags))
+               full_search = true;
        down_read(&space_info->groups_sem);
        list_for_each_entry(block_group, &space_info->block_groups[index],
                            list) {
@@ -7056,6 +7059,7 @@ search:
 have_block_group:
                cached = block_group_cache_done(block_group);
                if (unlikely(!cached)) {
+                       have_caching_bg = true;
                        ret = cache_block_group(block_group, 0);
                        BUG_ON(ret < 0);
                        ret = 0;
@@ -7228,8 +7232,6 @@ unclustered_alloc:
                        failed_alloc = true;
                        goto have_block_group;
                } else if (!offset) {
-                       if (!cached)
-                               have_caching_bg = true;
                        goto loop;
                }
 checks:
@@ -7286,7 +7288,20 @@ loop:
         */
        if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
                index = 0;
-               loop++;
+               if (loop == LOOP_CACHING_NOWAIT) {
+                       /*
+                        * We want to skip the LOOP_CACHING_WAIT step if we
+                        * don't have any unached bgs and we've alrelady done a
+                        * full search through.
+                        */
+                       if (have_caching_bg || !full_search)
+                               loop = LOOP_CACHING_WAIT;
+                       else
+                               loop = LOOP_ALLOC_CHUNK;
+               } else {
+                       loop++;
+               }
+
                if (loop == LOOP_ALLOC_CHUNK) {
                        struct btrfs_trans_handle *trans;
                        int exist = 0;
@@ -7304,6 +7319,15 @@ loop:
 
                        ret = do_chunk_alloc(trans, root, flags,
                                             CHUNK_ALLOC_FORCE);
+
+                       /*
+                        * If we can't allocate a new chunk we've already looped
+                        * through at least once, move on to the NO_EMPTY_SIZE
+                        * case.
+                        */
+                       if (ret == -ENOSPC)
+                               loop = LOOP_NO_EMPTY_SIZE;
+
                        /*
                         * Do not bail out on ENOSPC since we
                         * can do more things.
@@ -7320,6 +7344,15 @@ loop:
                }
 
                if (loop == LOOP_NO_EMPTY_SIZE) {
+                       /*
+                        * Don't loop again if we already have no empty_size and
+                        * no empty_cluster.
+                        */
+                       if (empty_size == 0 &&
+                           empty_cluster == 0) {
+                               ret = -ENOSPC;
+                               goto out;
+                       }
                        empty_size = 0;
                        empty_cluster = 0;
                }