]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - fs/btrfs/extent-tree.c
Btrfs: don't allocate chunks as aggressively
[mv-sheeva.git] / fs / btrfs / extent-tree.c
index b9080d71991a35cea63d2620d1534234cde6e0a9..aca3314ef8b9b7c48e62fcc6495c970ca54e218f 100644 (file)
@@ -2763,6 +2763,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
        if (found) {
                spin_lock(&found->lock);
                found->total_bytes += total_bytes;
+               found->disk_total += total_bytes * factor;
                found->bytes_used += bytes_used;
                found->disk_used += bytes_used * factor;
                found->full = 0;
@@ -2782,6 +2783,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
                                BTRFS_BLOCK_GROUP_SYSTEM |
                                BTRFS_BLOCK_GROUP_METADATA);
        found->total_bytes = total_bytes;
+       found->disk_total = total_bytes * factor;
        found->bytes_used = bytes_used;
        found->disk_used = bytes_used * factor;
        found->bytes_pinned = 0;
@@ -2998,8 +3000,7 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
        rcu_read_unlock();
 }
 
-static int should_alloc_chunk(struct btrfs_space_info *sinfo,
-                             u64 alloc_bytes)
+static int should_alloc_chunk(struct btrfs_space_info *sinfo, u64 alloc_bytes)
 {
        u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
 
@@ -3011,6 +3012,10 @@ static int should_alloc_chunk(struct btrfs_space_info *sinfo,
            alloc_bytes < div_factor(num_bytes, 8))
                return 0;
 
+       if (num_bytes > 256 * 1024 * 1024 &&
+           sinfo->bytes_used < div_factor(num_bytes, 3))
+               return 0;
+
        return 1;
 }
 
@@ -3109,19 +3114,22 @@ static int maybe_allocate_chunk(struct btrfs_trans_handle *trans,
  * shrink metadata reservation for delalloc
  */
 static int shrink_delalloc(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root, u64 to_reclaim)
+                          struct btrfs_root *root, u64 to_reclaim, int sync)
 {
        struct btrfs_block_rsv *block_rsv;
+       struct btrfs_space_info *space_info;
        u64 reserved;
        u64 max_reclaim;
        u64 reclaimed = 0;
+       int no_reclaim = 0;
        int pause = 1;
        int ret;
 
        block_rsv = &root->fs_info->delalloc_block_rsv;
-       spin_lock(&block_rsv->lock);
-       reserved = block_rsv->reserved;
-       spin_unlock(&block_rsv->lock);
+       space_info = block_rsv->space_info;
+       spin_lock(&space_info->lock);
+       reserved = space_info->bytes_reserved;
+       spin_unlock(&space_info->lock);
 
        if (reserved == 0)
                return 0;
@@ -3129,22 +3137,26 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
        max_reclaim = min(reserved, to_reclaim);
 
        while (1) {
-               ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0);
+               ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0, sync);
                if (!ret) {
+                       if (no_reclaim > 2)
+                               break;
+                       no_reclaim++;
                        __set_current_state(TASK_INTERRUPTIBLE);
                        schedule_timeout(pause);
                        pause <<= 1;
                        if (pause > HZ / 10)
                                pause = HZ / 10;
                } else {
+                       no_reclaim = 0;
                        pause = 1;
                }
 
-               spin_lock(&block_rsv->lock);
-               if (reserved > block_rsv->reserved)
-                       reclaimed = reserved - block_rsv->reserved;
-               reserved = block_rsv->reserved;
-               spin_unlock(&block_rsv->lock);
+               spin_lock(&space_info->lock);
+               if (reserved > space_info->bytes_reserved)
+                       reclaimed += reserved - space_info->bytes_reserved;
+               reserved = space_info->bytes_reserved;
+               spin_unlock(&space_info->lock);
 
                if (reserved == 0 || reclaimed >= max_reclaim)
                        break;
@@ -3173,7 +3185,7 @@ static int should_retry_reserve(struct btrfs_trans_handle *trans,
        if (trans && trans->transaction->in_commit)
                return -ENOSPC;
 
-       ret = shrink_delalloc(trans, root, num_bytes);
+       ret = shrink_delalloc(trans, root, num_bytes, 0);
        if (ret)
                return ret;
 
@@ -3206,7 +3218,8 @@ static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv,
 
        spin_lock(&space_info->lock);
        unused = space_info->bytes_used + space_info->bytes_reserved +
-                space_info->bytes_pinned + space_info->bytes_readonly;
+                space_info->bytes_pinned + space_info->bytes_readonly +
+                space_info->bytes_may_use;
 
        if (unused < space_info->total_bytes)
                unused = space_info->total_bytes - unused;
@@ -3500,6 +3513,8 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
 
        sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
        spin_lock(&sinfo->lock);
+       if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
+               data_used = 0;
        meta_used = sinfo->bytes_used;
        spin_unlock(&sinfo->lock);
 
@@ -3527,7 +3542,8 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
        block_rsv->size = num_bytes;
 
        num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
-                   sinfo->bytes_reserved + sinfo->bytes_readonly;
+                   sinfo->bytes_reserved + sinfo->bytes_readonly +
+                   sinfo->bytes_may_use;
 
        if (sinfo->total_bytes > num_bytes) {
                num_bytes = sinfo->total_bytes - num_bytes;
@@ -3718,7 +3734,7 @@ again:
        block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
        if (block_rsv->size > 512 * 1024 * 1024)
-               shrink_delalloc(NULL, root, to_reserve);
+               shrink_delalloc(NULL, root, to_reserve, 0);
 
        return 0;
 }
@@ -4360,7 +4376,8 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 
        block_rsv = get_block_rsv(trans, root);
        cache = btrfs_lookup_block_group(root->fs_info, buf->start);
-       BUG_ON(block_rsv->space_info != cache->space_info);
+       if (block_rsv->space_info != cache->space_info)
+               goto out;
 
        if (btrfs_header_generation(buf) == trans->transid) {
                if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
@@ -8089,6 +8106,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        struct btrfs_free_cluster *cluster;
        struct btrfs_key key;
        int ret;
+       int factor;
 
        root = root->fs_info->extent_root;
 
@@ -8097,6 +8115,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        BUG_ON(!block_group->ro);
 
        memcpy(&key, &block_group->key, sizeof(key));
+       if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
+                                 BTRFS_BLOCK_GROUP_RAID1 |
+                                 BTRFS_BLOCK_GROUP_RAID10))
+               factor = 2;
+       else
+               factor = 1;
 
        /* make sure this block group isn't part of an allocation cluster */
        cluster = &root->fs_info->data_alloc_cluster;
@@ -8137,6 +8161,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        spin_lock(&block_group->space_info->lock);
        block_group->space_info->total_bytes -= block_group->key.offset;
        block_group->space_info->bytes_readonly -= block_group->key.offset;
+       block_group->space_info->disk_total -= block_group->key.offset * factor;
        spin_unlock(&block_group->space_info->lock);
 
        btrfs_clear_space_info_full(root->fs_info);