X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=fs%2Fbtrfs%2Finode.c;h=4439fbb4ff451bbad4fa03f864ec5ed1ae8625be;hb=50c36504fc6090847f1fbdc7cf4852ae16d6e500;hp=3229c1346ea2dcd9cefb724654e5ea2160cf5a57;hpb=51773bec7ea352f3b9afa11ecfc72324c7977335;p=karo-tx-linux.git diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3229c1346ea2..4439fbb4ff45 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2127,17 +2127,13 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, ins.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_alloc_reserved_file_extent(trans, root, root->root_key.objectid, - btrfs_ino(inode), file_pos, &ins); - if (ret < 0) - goto out; + btrfs_ino(inode), file_pos, + ram_bytes, &ins); /* - * Release the reserved range from inode dirty range map, and - * move it to delayed ref codes, as now accounting only happens at - * commit_transaction() time. + * Release the reserved range from inode dirty range map, as it is + * already moved into delayed_ref_head */ btrfs_qgroup_release_data(inode, file_pos, ram_bytes); - ret = btrfs_add_delayed_qgroup_reserve(root->fs_info, trans, - root->objectid, disk_bytenr, ram_bytes); out: btrfs_free_path(path); @@ -2595,7 +2591,7 @@ again: ret = btrfs_inc_extent_ref(trans, root, new->bytenr, new->disk_len, 0, backref->root_id, backref->inum, - new->file_pos, 0); /* start - extent_offset */ + new->file_pos); /* start - extent_offset */ if (ret) { btrfs_abort_transaction(trans, root, ret); goto out_free_path; @@ -4541,7 +4537,7 @@ delete: ret = btrfs_free_extent(trans, root, extent_start, extent_num_bytes, 0, btrfs_header_owner(leaf), - ino, extent_offset, 0); + ino, extent_offset); BUG_ON(ret); if (btrfs_should_throttle_delayed_refs(trans, root)) btrfs_async_run_delayed_refs(root, @@ -9108,6 +9104,7 @@ void btrfs_destroy_inode(struct inode *inode) btrfs_put_ordered_extent(ordered); } } + btrfs_qgroup_check_reserved_leak(inode); inode_tree_del(inode); btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); free: @@ -9744,6 +9741,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, u64 cur_offset = start; u64 i_size; u64 cur_bytes; + u64 last_alloc = (u64)-1; int ret = 0; bool own_trans = true; @@ -9760,6 +9758,13 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); cur_bytes = max(cur_bytes, min_size); + /* + * If we are severely fragmented we could end up with really + * small allocations, so if the allocator is returning small + * chunks lets make its job easier by only searching for those + * sized chunks. + */ + cur_bytes = min(cur_bytes, last_alloc); ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0, *alloc_hint, &ins, 1, 0); if (ret) { @@ -9768,6 +9773,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, break; } + last_alloc = ins.offset; ret = insert_reserved_file_extent(trans, inode, cur_offset, ins.objectid, ins.offset, ins.offset,