2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
32 #include "print-tree.h"
33 #include "transaction.h"
37 #include "free-space-cache.h"
40 #undef SCRAMBLE_DELAYED_REFS
43 * control flags for do_chunk_alloc's force field
44 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45 * if we really need one.
47 * CHUNK_ALLOC_LIMITED means to only try and allocate one
48 * if we have very few chunks already allocated. This is
49 * used as part of the clustering code to help make sure
50 * we have a good pool of storage to cluster in, without
51 * filling the FS with empty chunks
53 * CHUNK_ALLOC_FORCE means it must try to allocate one
57 CHUNK_ALLOC_NO_FORCE = 0,
58 CHUNK_ALLOC_LIMITED = 1,
59 CHUNK_ALLOC_FORCE = 2,
63 * Control how reservations are dealt with.
65 * RESERVE_FREE - freeing a reservation.
66 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
68 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69 * bytes_may_use as the ENOSPC accounting is done elsewhere
74 RESERVE_ALLOC_NO_ACCOUNT = 2,
77 static int update_block_group(struct btrfs_root *root,
78 u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80 struct btrfs_root *root,
81 u64 bytenr, u64 num_bytes, u64 parent,
82 u64 root_objectid, u64 owner_objectid,
83 u64 owner_offset, int refs_to_drop,
84 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86 struct extent_buffer *leaf,
87 struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 u64 parent, u64 root_objectid,
91 u64 flags, u64 owner, u64 offset,
92 struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94 struct btrfs_root *root,
95 u64 parent, u64 root_objectid,
96 u64 flags, struct btrfs_disk_key *key,
97 int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99 struct btrfs_root *extent_root, u64 flags,
101 static int find_next_key(struct btrfs_path *path, int level,
102 struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104 int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106 u64 num_bytes, int reserve);
107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
109 int btrfs_pin_extent(struct btrfs_root *root,
110 u64 bytenr, u64 num_bytes, int reserved);
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
116 return cache->cached == BTRFS_CACHE_FINISHED ||
117 cache->cached == BTRFS_CACHE_ERROR;
120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
122 return (cache->flags & bits) == bits;
125 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
127 atomic_inc(&cache->count);
130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
132 if (atomic_dec_and_test(&cache->count)) {
133 WARN_ON(cache->pinned > 0);
134 WARN_ON(cache->reserved > 0);
135 kfree(cache->free_space_ctl);
141 * this adds the block group to the fs_info rb tree for the block group
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145 struct btrfs_block_group_cache *block_group)
148 struct rb_node *parent = NULL;
149 struct btrfs_block_group_cache *cache;
151 spin_lock(&info->block_group_cache_lock);
152 p = &info->block_group_cache_tree.rb_node;
156 cache = rb_entry(parent, struct btrfs_block_group_cache,
158 if (block_group->key.objectid < cache->key.objectid) {
160 } else if (block_group->key.objectid > cache->key.objectid) {
163 spin_unlock(&info->block_group_cache_lock);
168 rb_link_node(&block_group->cache_node, parent, p);
169 rb_insert_color(&block_group->cache_node,
170 &info->block_group_cache_tree);
172 if (info->first_logical_byte > block_group->key.objectid)
173 info->first_logical_byte = block_group->key.objectid;
175 spin_unlock(&info->block_group_cache_lock);
181 * This will return the block group at or after bytenr if contains is 0, else
182 * it will return the block group that contains the bytenr
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
188 struct btrfs_block_group_cache *cache, *ret = NULL;
192 spin_lock(&info->block_group_cache_lock);
193 n = info->block_group_cache_tree.rb_node;
196 cache = rb_entry(n, struct btrfs_block_group_cache,
198 end = cache->key.objectid + cache->key.offset - 1;
199 start = cache->key.objectid;
201 if (bytenr < start) {
202 if (!contains && (!ret || start < ret->key.objectid))
205 } else if (bytenr > start) {
206 if (contains && bytenr <= end) {
217 btrfs_get_block_group(ret);
218 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219 info->first_logical_byte = ret->key.objectid;
221 spin_unlock(&info->block_group_cache_lock);
226 static int add_excluded_extent(struct btrfs_root *root,
227 u64 start, u64 num_bytes)
229 u64 end = start + num_bytes - 1;
230 set_extent_bits(&root->fs_info->freed_extents[0],
231 start, end, EXTENT_UPTODATE, GFP_NOFS);
232 set_extent_bits(&root->fs_info->freed_extents[1],
233 start, end, EXTENT_UPTODATE, GFP_NOFS);
237 static void free_excluded_extents(struct btrfs_root *root,
238 struct btrfs_block_group_cache *cache)
242 start = cache->key.objectid;
243 end = start + cache->key.offset - 1;
245 clear_extent_bits(&root->fs_info->freed_extents[0],
246 start, end, EXTENT_UPTODATE, GFP_NOFS);
247 clear_extent_bits(&root->fs_info->freed_extents[1],
248 start, end, EXTENT_UPTODATE, GFP_NOFS);
251 static int exclude_super_stripes(struct btrfs_root *root,
252 struct btrfs_block_group_cache *cache)
259 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261 cache->bytes_super += stripe_len;
262 ret = add_excluded_extent(root, cache->key.objectid,
268 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269 bytenr = btrfs_sb_offset(i);
270 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
271 cache->key.objectid, bytenr,
272 0, &logical, &nr, &stripe_len);
279 if (logical[nr] > cache->key.objectid +
283 if (logical[nr] + stripe_len <= cache->key.objectid)
287 if (start < cache->key.objectid) {
288 start = cache->key.objectid;
289 len = (logical[nr] + stripe_len) - start;
291 len = min_t(u64, stripe_len,
292 cache->key.objectid +
293 cache->key.offset - start);
296 cache->bytes_super += len;
297 ret = add_excluded_extent(root, start, len);
309 static struct btrfs_caching_control *
310 get_caching_control(struct btrfs_block_group_cache *cache)
312 struct btrfs_caching_control *ctl;
314 spin_lock(&cache->lock);
315 if (cache->cached != BTRFS_CACHE_STARTED) {
316 spin_unlock(&cache->lock);
320 /* We're loading it the fast way, so we don't have a caching_ctl. */
321 if (!cache->caching_ctl) {
322 spin_unlock(&cache->lock);
326 ctl = cache->caching_ctl;
327 atomic_inc(&ctl->count);
328 spin_unlock(&cache->lock);
332 static void put_caching_control(struct btrfs_caching_control *ctl)
334 if (atomic_dec_and_test(&ctl->count))
339 * this is only called by cache_block_group, since we could have freed extents
340 * we need to check the pinned_extents for any extents that can't be used yet
341 * since their free space will be released as soon as the transaction commits.
343 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
344 struct btrfs_fs_info *info, u64 start, u64 end)
346 u64 extent_start, extent_end, size, total_added = 0;
349 while (start < end) {
350 ret = find_first_extent_bit(info->pinned_extents, start,
351 &extent_start, &extent_end,
352 EXTENT_DIRTY | EXTENT_UPTODATE,
357 if (extent_start <= start) {
358 start = extent_end + 1;
359 } else if (extent_start > start && extent_start < end) {
360 size = extent_start - start;
362 ret = btrfs_add_free_space(block_group, start,
364 BUG_ON(ret); /* -ENOMEM or logic error */
365 start = extent_end + 1;
374 ret = btrfs_add_free_space(block_group, start, size);
375 BUG_ON(ret); /* -ENOMEM or logic error */
381 static noinline void caching_thread(struct btrfs_work *work)
383 struct btrfs_block_group_cache *block_group;
384 struct btrfs_fs_info *fs_info;
385 struct btrfs_caching_control *caching_ctl;
386 struct btrfs_root *extent_root;
387 struct btrfs_path *path;
388 struct extent_buffer *leaf;
389 struct btrfs_key key;
395 caching_ctl = container_of(work, struct btrfs_caching_control, work);
396 block_group = caching_ctl->block_group;
397 fs_info = block_group->fs_info;
398 extent_root = fs_info->extent_root;
400 path = btrfs_alloc_path();
404 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
407 * We don't want to deadlock with somebody trying to allocate a new
408 * extent for the extent root while also trying to search the extent
409 * root to add free space. So we skip locking and search the commit
410 * root, since its read-only
412 path->skip_locking = 1;
413 path->search_commit_root = 1;
418 key.type = BTRFS_EXTENT_ITEM_KEY;
420 mutex_lock(&caching_ctl->mutex);
421 /* need to make sure the commit_root doesn't disappear */
422 down_read(&fs_info->extent_commit_sem);
425 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
429 leaf = path->nodes[0];
430 nritems = btrfs_header_nritems(leaf);
433 if (btrfs_fs_closing(fs_info) > 1) {
438 if (path->slots[0] < nritems) {
439 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
441 ret = find_next_key(path, 0, &key);
445 if (need_resched()) {
446 caching_ctl->progress = last;
447 btrfs_release_path(path);
448 up_read(&fs_info->extent_commit_sem);
449 mutex_unlock(&caching_ctl->mutex);
454 ret = btrfs_next_leaf(extent_root, path);
459 leaf = path->nodes[0];
460 nritems = btrfs_header_nritems(leaf);
464 if (key.objectid < last) {
467 key.type = BTRFS_EXTENT_ITEM_KEY;
469 caching_ctl->progress = last;
470 btrfs_release_path(path);
474 if (key.objectid < block_group->key.objectid) {
479 if (key.objectid >= block_group->key.objectid +
480 block_group->key.offset)
483 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
484 key.type == BTRFS_METADATA_ITEM_KEY) {
485 total_found += add_new_free_space(block_group,
488 if (key.type == BTRFS_METADATA_ITEM_KEY)
489 last = key.objectid +
490 fs_info->tree_root->leafsize;
492 last = key.objectid + key.offset;
494 if (total_found > (1024 * 1024 * 2)) {
496 wake_up(&caching_ctl->wait);
503 total_found += add_new_free_space(block_group, fs_info, last,
504 block_group->key.objectid +
505 block_group->key.offset);
506 caching_ctl->progress = (u64)-1;
508 spin_lock(&block_group->lock);
509 block_group->caching_ctl = NULL;
510 block_group->cached = BTRFS_CACHE_FINISHED;
511 spin_unlock(&block_group->lock);
514 btrfs_free_path(path);
515 up_read(&fs_info->extent_commit_sem);
517 free_excluded_extents(extent_root, block_group);
519 mutex_unlock(&caching_ctl->mutex);
522 spin_lock(&block_group->lock);
523 block_group->caching_ctl = NULL;
524 block_group->cached = BTRFS_CACHE_ERROR;
525 spin_unlock(&block_group->lock);
527 wake_up(&caching_ctl->wait);
529 put_caching_control(caching_ctl);
530 btrfs_put_block_group(block_group);
533 static int cache_block_group(struct btrfs_block_group_cache *cache,
537 struct btrfs_fs_info *fs_info = cache->fs_info;
538 struct btrfs_caching_control *caching_ctl;
541 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
545 INIT_LIST_HEAD(&caching_ctl->list);
546 mutex_init(&caching_ctl->mutex);
547 init_waitqueue_head(&caching_ctl->wait);
548 caching_ctl->block_group = cache;
549 caching_ctl->progress = cache->key.objectid;
550 atomic_set(&caching_ctl->count, 1);
551 caching_ctl->work.func = caching_thread;
553 spin_lock(&cache->lock);
555 * This should be a rare occasion, but this could happen I think in the
556 * case where one thread starts to load the space cache info, and then
557 * some other thread starts a transaction commit which tries to do an
558 * allocation while the other thread is still loading the space cache
559 * info. The previous loop should have kept us from choosing this block
560 * group, but if we've moved to the state where we will wait on caching
561 * block groups we need to first check if we're doing a fast load here,
562 * so we can wait for it to finish, otherwise we could end up allocating
563 * from a block group who's cache gets evicted for one reason or
566 while (cache->cached == BTRFS_CACHE_FAST) {
567 struct btrfs_caching_control *ctl;
569 ctl = cache->caching_ctl;
570 atomic_inc(&ctl->count);
571 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
572 spin_unlock(&cache->lock);
576 finish_wait(&ctl->wait, &wait);
577 put_caching_control(ctl);
578 spin_lock(&cache->lock);
581 if (cache->cached != BTRFS_CACHE_NO) {
582 spin_unlock(&cache->lock);
586 WARN_ON(cache->caching_ctl);
587 cache->caching_ctl = caching_ctl;
588 cache->cached = BTRFS_CACHE_FAST;
589 spin_unlock(&cache->lock);
591 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
592 ret = load_free_space_cache(fs_info, cache);
594 spin_lock(&cache->lock);
596 cache->caching_ctl = NULL;
597 cache->cached = BTRFS_CACHE_FINISHED;
598 cache->last_byte_to_unpin = (u64)-1;
600 if (load_cache_only) {
601 cache->caching_ctl = NULL;
602 cache->cached = BTRFS_CACHE_NO;
604 cache->cached = BTRFS_CACHE_STARTED;
607 spin_unlock(&cache->lock);
608 wake_up(&caching_ctl->wait);
610 put_caching_control(caching_ctl);
611 free_excluded_extents(fs_info->extent_root, cache);
616 * We are not going to do the fast caching, set cached to the
617 * appropriate value and wakeup any waiters.
619 spin_lock(&cache->lock);
620 if (load_cache_only) {
621 cache->caching_ctl = NULL;
622 cache->cached = BTRFS_CACHE_NO;
624 cache->cached = BTRFS_CACHE_STARTED;
626 spin_unlock(&cache->lock);
627 wake_up(&caching_ctl->wait);
630 if (load_cache_only) {
631 put_caching_control(caching_ctl);
635 down_write(&fs_info->extent_commit_sem);
636 atomic_inc(&caching_ctl->count);
637 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
638 up_write(&fs_info->extent_commit_sem);
640 btrfs_get_block_group(cache);
642 btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
648 * return the block group that starts at or after bytenr
650 static struct btrfs_block_group_cache *
651 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
653 struct btrfs_block_group_cache *cache;
655 cache = block_group_cache_tree_search(info, bytenr, 0);
661 * return the block group that contains the given bytenr
663 struct btrfs_block_group_cache *btrfs_lookup_block_group(
664 struct btrfs_fs_info *info,
667 struct btrfs_block_group_cache *cache;
669 cache = block_group_cache_tree_search(info, bytenr, 1);
674 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
677 struct list_head *head = &info->space_info;
678 struct btrfs_space_info *found;
680 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
683 list_for_each_entry_rcu(found, head, list) {
684 if (found->flags & flags) {
694 * after adding space to the filesystem, we need to clear the full flags
695 * on all the space infos.
697 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
699 struct list_head *head = &info->space_info;
700 struct btrfs_space_info *found;
703 list_for_each_entry_rcu(found, head, list)
708 /* simple helper to search for an existing extent at a given offset */
709 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
712 struct btrfs_key key;
713 struct btrfs_path *path;
715 path = btrfs_alloc_path();
719 key.objectid = start;
721 key.type = BTRFS_EXTENT_ITEM_KEY;
722 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
725 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
726 if (key.objectid == start &&
727 key.type == BTRFS_METADATA_ITEM_KEY)
730 btrfs_free_path(path);
735 * helper function to lookup reference count and flags of a tree block.
737 * the head node for delayed ref is used to store the sum of all the
738 * reference count modifications queued up in the rbtree. the head
739 * node may also store the extent flags to set. This way you can check
740 * to see what the reference count and extent flags would be if all of
741 * the delayed refs are not processed.
743 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
744 struct btrfs_root *root, u64 bytenr,
745 u64 offset, int metadata, u64 *refs, u64 *flags)
747 struct btrfs_delayed_ref_head *head;
748 struct btrfs_delayed_ref_root *delayed_refs;
749 struct btrfs_path *path;
750 struct btrfs_extent_item *ei;
751 struct extent_buffer *leaf;
752 struct btrfs_key key;
759 * If we don't have skinny metadata, don't bother doing anything
762 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
763 offset = root->leafsize;
767 path = btrfs_alloc_path();
772 key.objectid = bytenr;
773 key.type = BTRFS_METADATA_ITEM_KEY;
776 key.objectid = bytenr;
777 key.type = BTRFS_EXTENT_ITEM_KEY;
782 path->skip_locking = 1;
783 path->search_commit_root = 1;
786 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
791 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
793 if (path->slots[0]) {
795 btrfs_item_key_to_cpu(path->nodes[0], &key,
797 if (key.objectid == bytenr &&
798 key.type == BTRFS_EXTENT_ITEM_KEY &&
799 key.offset == root->leafsize)
803 key.objectid = bytenr;
804 key.type = BTRFS_EXTENT_ITEM_KEY;
805 key.offset = root->leafsize;
806 btrfs_release_path(path);
812 leaf = path->nodes[0];
813 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
814 if (item_size >= sizeof(*ei)) {
815 ei = btrfs_item_ptr(leaf, path->slots[0],
816 struct btrfs_extent_item);
817 num_refs = btrfs_extent_refs(leaf, ei);
818 extent_flags = btrfs_extent_flags(leaf, ei);
820 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
821 struct btrfs_extent_item_v0 *ei0;
822 BUG_ON(item_size != sizeof(*ei0));
823 ei0 = btrfs_item_ptr(leaf, path->slots[0],
824 struct btrfs_extent_item_v0);
825 num_refs = btrfs_extent_refs_v0(leaf, ei0);
826 /* FIXME: this isn't correct for data */
827 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
832 BUG_ON(num_refs == 0);
842 delayed_refs = &trans->transaction->delayed_refs;
843 spin_lock(&delayed_refs->lock);
844 head = btrfs_find_delayed_ref_head(trans, bytenr);
846 if (!mutex_trylock(&head->mutex)) {
847 atomic_inc(&head->node.refs);
848 spin_unlock(&delayed_refs->lock);
850 btrfs_release_path(path);
853 * Mutex was contended, block until it's released and try
856 mutex_lock(&head->mutex);
857 mutex_unlock(&head->mutex);
858 btrfs_put_delayed_ref(&head->node);
861 if (head->extent_op && head->extent_op->update_flags)
862 extent_flags |= head->extent_op->flags_to_set;
864 BUG_ON(num_refs == 0);
866 num_refs += head->node.ref_mod;
867 mutex_unlock(&head->mutex);
869 spin_unlock(&delayed_refs->lock);
871 WARN_ON(num_refs == 0);
875 *flags = extent_flags;
877 btrfs_free_path(path);
882 * Back reference rules. Back refs have three main goals:
884 * 1) differentiate between all holders of references to an extent so that
885 * when a reference is dropped we can make sure it was a valid reference
886 * before freeing the extent.
888 * 2) Provide enough information to quickly find the holders of an extent
889 * if we notice a given block is corrupted or bad.
891 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
892 * maintenance. This is actually the same as #2, but with a slightly
893 * different use case.
895 * There are two kinds of back refs. The implicit back refs is optimized
896 * for pointers in non-shared tree blocks. For a given pointer in a block,
897 * back refs of this kind provide information about the block's owner tree
898 * and the pointer's key. These information allow us to find the block by
899 * b-tree searching. The full back refs is for pointers in tree blocks not
900 * referenced by their owner trees. The location of tree block is recorded
901 * in the back refs. Actually the full back refs is generic, and can be
902 * used in all cases the implicit back refs is used. The major shortcoming
903 * of the full back refs is its overhead. Every time a tree block gets
904 * COWed, we have to update back refs entry for all pointers in it.
906 * For a newly allocated tree block, we use implicit back refs for
907 * pointers in it. This means most tree related operations only involve
908 * implicit back refs. For a tree block created in old transaction, the
909 * only way to drop a reference to it is COW it. So we can detect the
910 * event that tree block loses its owner tree's reference and do the
911 * back refs conversion.
913 * When a tree block is COW'd through a tree, there are four cases:
915 * The reference count of the block is one and the tree is the block's
916 * owner tree. Nothing to do in this case.
918 * The reference count of the block is one and the tree is not the
919 * block's owner tree. In this case, full back refs is used for pointers
920 * in the block. Remove these full back refs, add implicit back refs for
921 * every pointers in the new block.
923 * The reference count of the block is greater than one and the tree is
924 * the block's owner tree. In this case, implicit back refs is used for
925 * pointers in the block. Add full back refs for every pointers in the
926 * block, increase lower level extents' reference counts. The original
927 * implicit back refs are entailed to the new block.
929 * The reference count of the block is greater than one and the tree is
930 * not the block's owner tree. Add implicit back refs for every pointer in
931 * the new block, increase lower level extents' reference count.
933 * Back Reference Key composing:
935 * The key objectid corresponds to the first byte in the extent,
936 * The key type is used to differentiate between types of back refs.
937 * There are different meanings of the key offset for different types
940 * File extents can be referenced by:
942 * - multiple snapshots, subvolumes, or different generations in one subvol
943 * - different files inside a single subvolume
944 * - different offsets inside a file (bookend extents in file.c)
946 * The extent ref structure for the implicit back refs has fields for:
948 * - Objectid of the subvolume root
949 * - objectid of the file holding the reference
950 * - original offset in the file
951 * - how many bookend extents
953 * The key offset for the implicit back refs is hash of the first
956 * The extent ref structure for the full back refs has field for:
958 * - number of pointers in the tree leaf
960 * The key offset for the implicit back refs is the first byte of
963 * When a file extent is allocated, The implicit back refs is used.
964 * the fields are filled in:
966 * (root_key.objectid, inode objectid, offset in file, 1)
968 * When a file extent is removed file truncation, we find the
969 * corresponding implicit back refs and check the following fields:
971 * (btrfs_header_owner(leaf), inode objectid, offset in file)
973 * Btree extents can be referenced by:
975 * - Different subvolumes
977 * Both the implicit back refs and the full back refs for tree blocks
978 * only consist of key. The key offset for the implicit back refs is
979 * objectid of block's owner tree. The key offset for the full back refs
980 * is the first byte of parent block.
982 * When implicit back refs is used, information about the lowest key and
983 * level of the tree block are required. These information are stored in
984 * tree block info structure.
987 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
988 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
989 struct btrfs_root *root,
990 struct btrfs_path *path,
991 u64 owner, u32 extra_size)
993 struct btrfs_extent_item *item;
994 struct btrfs_extent_item_v0 *ei0;
995 struct btrfs_extent_ref_v0 *ref0;
996 struct btrfs_tree_block_info *bi;
997 struct extent_buffer *leaf;
998 struct btrfs_key key;
999 struct btrfs_key found_key;
1000 u32 new_size = sizeof(*item);
1004 leaf = path->nodes[0];
1005 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1007 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1008 ei0 = btrfs_item_ptr(leaf, path->slots[0],
1009 struct btrfs_extent_item_v0);
1010 refs = btrfs_extent_refs_v0(leaf, ei0);
1012 if (owner == (u64)-1) {
1014 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1015 ret = btrfs_next_leaf(root, path);
1018 BUG_ON(ret > 0); /* Corruption */
1019 leaf = path->nodes[0];
1021 btrfs_item_key_to_cpu(leaf, &found_key,
1023 BUG_ON(key.objectid != found_key.objectid);
1024 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1028 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1029 struct btrfs_extent_ref_v0);
1030 owner = btrfs_ref_objectid_v0(leaf, ref0);
1034 btrfs_release_path(path);
1036 if (owner < BTRFS_FIRST_FREE_OBJECTID)
1037 new_size += sizeof(*bi);
1039 new_size -= sizeof(*ei0);
1040 ret = btrfs_search_slot(trans, root, &key, path,
1041 new_size + extra_size, 1);
1044 BUG_ON(ret); /* Corruption */
1046 btrfs_extend_item(root, path, new_size);
1048 leaf = path->nodes[0];
1049 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1050 btrfs_set_extent_refs(leaf, item, refs);
1051 /* FIXME: get real generation */
1052 btrfs_set_extent_generation(leaf, item, 0);
1053 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1054 btrfs_set_extent_flags(leaf, item,
1055 BTRFS_EXTENT_FLAG_TREE_BLOCK |
1056 BTRFS_BLOCK_FLAG_FULL_BACKREF);
1057 bi = (struct btrfs_tree_block_info *)(item + 1);
1058 /* FIXME: get first key of the block */
1059 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1060 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1062 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1064 btrfs_mark_buffer_dirty(leaf);
1069 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1071 u32 high_crc = ~(u32)0;
1072 u32 low_crc = ~(u32)0;
1075 lenum = cpu_to_le64(root_objectid);
1076 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1077 lenum = cpu_to_le64(owner);
1078 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1079 lenum = cpu_to_le64(offset);
1080 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1082 return ((u64)high_crc << 31) ^ (u64)low_crc;
1085 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1086 struct btrfs_extent_data_ref *ref)
1088 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1089 btrfs_extent_data_ref_objectid(leaf, ref),
1090 btrfs_extent_data_ref_offset(leaf, ref));
1093 static int match_extent_data_ref(struct extent_buffer *leaf,
1094 struct btrfs_extent_data_ref *ref,
1095 u64 root_objectid, u64 owner, u64 offset)
1097 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1098 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1099 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1104 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1105 struct btrfs_root *root,
1106 struct btrfs_path *path,
1107 u64 bytenr, u64 parent,
1109 u64 owner, u64 offset)
1111 struct btrfs_key key;
1112 struct btrfs_extent_data_ref *ref;
1113 struct extent_buffer *leaf;
1119 key.objectid = bytenr;
1121 key.type = BTRFS_SHARED_DATA_REF_KEY;
1122 key.offset = parent;
1124 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1125 key.offset = hash_extent_data_ref(root_objectid,
1130 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1139 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1140 key.type = BTRFS_EXTENT_REF_V0_KEY;
1141 btrfs_release_path(path);
1142 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1153 leaf = path->nodes[0];
1154 nritems = btrfs_header_nritems(leaf);
1156 if (path->slots[0] >= nritems) {
1157 ret = btrfs_next_leaf(root, path);
1163 leaf = path->nodes[0];
1164 nritems = btrfs_header_nritems(leaf);
1168 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1169 if (key.objectid != bytenr ||
1170 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1173 ref = btrfs_item_ptr(leaf, path->slots[0],
1174 struct btrfs_extent_data_ref);
1176 if (match_extent_data_ref(leaf, ref, root_objectid,
1179 btrfs_release_path(path);
1191 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1192 struct btrfs_root *root,
1193 struct btrfs_path *path,
1194 u64 bytenr, u64 parent,
1195 u64 root_objectid, u64 owner,
1196 u64 offset, int refs_to_add)
1198 struct btrfs_key key;
1199 struct extent_buffer *leaf;
1204 key.objectid = bytenr;
1206 key.type = BTRFS_SHARED_DATA_REF_KEY;
1207 key.offset = parent;
1208 size = sizeof(struct btrfs_shared_data_ref);
1210 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1211 key.offset = hash_extent_data_ref(root_objectid,
1213 size = sizeof(struct btrfs_extent_data_ref);
1216 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1217 if (ret && ret != -EEXIST)
1220 leaf = path->nodes[0];
1222 struct btrfs_shared_data_ref *ref;
1223 ref = btrfs_item_ptr(leaf, path->slots[0],
1224 struct btrfs_shared_data_ref);
1226 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1228 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1229 num_refs += refs_to_add;
1230 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1233 struct btrfs_extent_data_ref *ref;
1234 while (ret == -EEXIST) {
1235 ref = btrfs_item_ptr(leaf, path->slots[0],
1236 struct btrfs_extent_data_ref);
1237 if (match_extent_data_ref(leaf, ref, root_objectid,
1240 btrfs_release_path(path);
1242 ret = btrfs_insert_empty_item(trans, root, path, &key,
1244 if (ret && ret != -EEXIST)
1247 leaf = path->nodes[0];
1249 ref = btrfs_item_ptr(leaf, path->slots[0],
1250 struct btrfs_extent_data_ref);
1252 btrfs_set_extent_data_ref_root(leaf, ref,
1254 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1255 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1256 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1258 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1259 num_refs += refs_to_add;
1260 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1263 btrfs_mark_buffer_dirty(leaf);
1266 btrfs_release_path(path);
1270 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1271 struct btrfs_root *root,
1272 struct btrfs_path *path,
1275 struct btrfs_key key;
1276 struct btrfs_extent_data_ref *ref1 = NULL;
1277 struct btrfs_shared_data_ref *ref2 = NULL;
1278 struct extent_buffer *leaf;
1282 leaf = path->nodes[0];
1283 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1285 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1286 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1287 struct btrfs_extent_data_ref);
1288 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1289 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1290 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1291 struct btrfs_shared_data_ref);
1292 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1293 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1294 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1295 struct btrfs_extent_ref_v0 *ref0;
1296 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1297 struct btrfs_extent_ref_v0);
1298 num_refs = btrfs_ref_count_v0(leaf, ref0);
1304 BUG_ON(num_refs < refs_to_drop);
1305 num_refs -= refs_to_drop;
1307 if (num_refs == 0) {
1308 ret = btrfs_del_item(trans, root, path);
1310 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1311 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1312 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1313 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1314 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1316 struct btrfs_extent_ref_v0 *ref0;
1317 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1318 struct btrfs_extent_ref_v0);
1319 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1322 btrfs_mark_buffer_dirty(leaf);
1327 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1328 struct btrfs_path *path,
1329 struct btrfs_extent_inline_ref *iref)
1331 struct btrfs_key key;
1332 struct extent_buffer *leaf;
1333 struct btrfs_extent_data_ref *ref1;
1334 struct btrfs_shared_data_ref *ref2;
1337 leaf = path->nodes[0];
1338 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1340 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1341 BTRFS_EXTENT_DATA_REF_KEY) {
1342 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1343 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1345 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1346 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1348 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1349 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1350 struct btrfs_extent_data_ref);
1351 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1352 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1353 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1354 struct btrfs_shared_data_ref);
1355 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1356 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1357 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1358 struct btrfs_extent_ref_v0 *ref0;
1359 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1360 struct btrfs_extent_ref_v0);
1361 num_refs = btrfs_ref_count_v0(leaf, ref0);
1369 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1370 struct btrfs_root *root,
1371 struct btrfs_path *path,
1372 u64 bytenr, u64 parent,
1375 struct btrfs_key key;
1378 key.objectid = bytenr;
1380 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1381 key.offset = parent;
1383 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1384 key.offset = root_objectid;
1387 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1390 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1391 if (ret == -ENOENT && parent) {
1392 btrfs_release_path(path);
1393 key.type = BTRFS_EXTENT_REF_V0_KEY;
1394 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1402 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1403 struct btrfs_root *root,
1404 struct btrfs_path *path,
1405 u64 bytenr, u64 parent,
1408 struct btrfs_key key;
1411 key.objectid = bytenr;
1413 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1414 key.offset = parent;
1416 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1417 key.offset = root_objectid;
1420 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1421 btrfs_release_path(path);
1425 static inline int extent_ref_type(u64 parent, u64 owner)
1428 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1430 type = BTRFS_SHARED_BLOCK_REF_KEY;
1432 type = BTRFS_TREE_BLOCK_REF_KEY;
1435 type = BTRFS_SHARED_DATA_REF_KEY;
1437 type = BTRFS_EXTENT_DATA_REF_KEY;
1442 static int find_next_key(struct btrfs_path *path, int level,
1443 struct btrfs_key *key)
1446 for (; level < BTRFS_MAX_LEVEL; level++) {
1447 if (!path->nodes[level])
1449 if (path->slots[level] + 1 >=
1450 btrfs_header_nritems(path->nodes[level]))
1453 btrfs_item_key_to_cpu(path->nodes[level], key,
1454 path->slots[level] + 1);
1456 btrfs_node_key_to_cpu(path->nodes[level], key,
1457 path->slots[level] + 1);
1464 * look for inline back ref. if back ref is found, *ref_ret is set
1465 * to the address of inline back ref, and 0 is returned.
1467 * if back ref isn't found, *ref_ret is set to the address where it
1468 * should be inserted, and -ENOENT is returned.
1470 * if insert is true and there are too many inline back refs, the path
1471 * points to the extent item, and -EAGAIN is returned.
1473 * NOTE: inline back refs are ordered in the same way that back ref
1474 * items in the tree are ordered.
1476 static noinline_for_stack
1477 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1478 struct btrfs_root *root,
1479 struct btrfs_path *path,
1480 struct btrfs_extent_inline_ref **ref_ret,
1481 u64 bytenr, u64 num_bytes,
1482 u64 parent, u64 root_objectid,
1483 u64 owner, u64 offset, int insert)
1485 struct btrfs_key key;
1486 struct extent_buffer *leaf;
1487 struct btrfs_extent_item *ei;
1488 struct btrfs_extent_inline_ref *iref;
1498 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1501 key.objectid = bytenr;
1502 key.type = BTRFS_EXTENT_ITEM_KEY;
1503 key.offset = num_bytes;
1505 want = extent_ref_type(parent, owner);
1507 extra_size = btrfs_extent_inline_ref_size(want);
1508 path->keep_locks = 1;
1513 * Owner is our parent level, so we can just add one to get the level
1514 * for the block we are interested in.
1516 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1517 key.type = BTRFS_METADATA_ITEM_KEY;
1522 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1529 * We may be a newly converted file system which still has the old fat
1530 * extent entries for metadata, so try and see if we have one of those.
1532 if (ret > 0 && skinny_metadata) {
1533 skinny_metadata = false;
1534 if (path->slots[0]) {
1536 btrfs_item_key_to_cpu(path->nodes[0], &key,
1538 if (key.objectid == bytenr &&
1539 key.type == BTRFS_EXTENT_ITEM_KEY &&
1540 key.offset == num_bytes)
1544 key.type = BTRFS_EXTENT_ITEM_KEY;
1545 key.offset = num_bytes;
1546 btrfs_release_path(path);
1551 if (ret && !insert) {
1560 leaf = path->nodes[0];
1561 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1562 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1563 if (item_size < sizeof(*ei)) {
1568 ret = convert_extent_item_v0(trans, root, path, owner,
1574 leaf = path->nodes[0];
1575 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1578 BUG_ON(item_size < sizeof(*ei));
1580 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1581 flags = btrfs_extent_flags(leaf, ei);
1583 ptr = (unsigned long)(ei + 1);
1584 end = (unsigned long)ei + item_size;
1586 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1587 ptr += sizeof(struct btrfs_tree_block_info);
1597 iref = (struct btrfs_extent_inline_ref *)ptr;
1598 type = btrfs_extent_inline_ref_type(leaf, iref);
1602 ptr += btrfs_extent_inline_ref_size(type);
1606 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1607 struct btrfs_extent_data_ref *dref;
1608 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1609 if (match_extent_data_ref(leaf, dref, root_objectid,
1614 if (hash_extent_data_ref_item(leaf, dref) <
1615 hash_extent_data_ref(root_objectid, owner, offset))
1619 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1621 if (parent == ref_offset) {
1625 if (ref_offset < parent)
1628 if (root_objectid == ref_offset) {
1632 if (ref_offset < root_objectid)
1636 ptr += btrfs_extent_inline_ref_size(type);
1638 if (err == -ENOENT && insert) {
1639 if (item_size + extra_size >=
1640 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1645 * To add new inline back ref, we have to make sure
1646 * there is no corresponding back ref item.
1647 * For simplicity, we just do not add new inline back
1648 * ref if there is any kind of item for this block
1650 if (find_next_key(path, 0, &key) == 0 &&
1651 key.objectid == bytenr &&
1652 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1657 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1660 path->keep_locks = 0;
1661 btrfs_unlock_up_safe(path, 1);
1667 * helper to add new inline back ref
1669 static noinline_for_stack
1670 void setup_inline_extent_backref(struct btrfs_root *root,
1671 struct btrfs_path *path,
1672 struct btrfs_extent_inline_ref *iref,
1673 u64 parent, u64 root_objectid,
1674 u64 owner, u64 offset, int refs_to_add,
1675 struct btrfs_delayed_extent_op *extent_op)
1677 struct extent_buffer *leaf;
1678 struct btrfs_extent_item *ei;
1681 unsigned long item_offset;
1686 leaf = path->nodes[0];
1687 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688 item_offset = (unsigned long)iref - (unsigned long)ei;
1690 type = extent_ref_type(parent, owner);
1691 size = btrfs_extent_inline_ref_size(type);
1693 btrfs_extend_item(root, path, size);
1695 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1696 refs = btrfs_extent_refs(leaf, ei);
1697 refs += refs_to_add;
1698 btrfs_set_extent_refs(leaf, ei, refs);
1700 __run_delayed_extent_op(extent_op, leaf, ei);
1702 ptr = (unsigned long)ei + item_offset;
1703 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1704 if (ptr < end - size)
1705 memmove_extent_buffer(leaf, ptr + size, ptr,
1708 iref = (struct btrfs_extent_inline_ref *)ptr;
1709 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1710 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1711 struct btrfs_extent_data_ref *dref;
1712 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1713 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1714 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1715 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1716 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1717 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1718 struct btrfs_shared_data_ref *sref;
1719 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1720 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1721 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1722 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1723 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1725 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1727 btrfs_mark_buffer_dirty(leaf);
1730 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1731 struct btrfs_root *root,
1732 struct btrfs_path *path,
1733 struct btrfs_extent_inline_ref **ref_ret,
1734 u64 bytenr, u64 num_bytes, u64 parent,
1735 u64 root_objectid, u64 owner, u64 offset)
1739 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1740 bytenr, num_bytes, parent,
1741 root_objectid, owner, offset, 0);
1745 btrfs_release_path(path);
1748 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1749 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1752 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1753 root_objectid, owner, offset);
1759 * helper to update/remove inline back ref
1761 static noinline_for_stack
1762 void update_inline_extent_backref(struct btrfs_root *root,
1763 struct btrfs_path *path,
1764 struct btrfs_extent_inline_ref *iref,
1766 struct btrfs_delayed_extent_op *extent_op)
1768 struct extent_buffer *leaf;
1769 struct btrfs_extent_item *ei;
1770 struct btrfs_extent_data_ref *dref = NULL;
1771 struct btrfs_shared_data_ref *sref = NULL;
1779 leaf = path->nodes[0];
1780 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1781 refs = btrfs_extent_refs(leaf, ei);
1782 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1783 refs += refs_to_mod;
1784 btrfs_set_extent_refs(leaf, ei, refs);
1786 __run_delayed_extent_op(extent_op, leaf, ei);
1788 type = btrfs_extent_inline_ref_type(leaf, iref);
1790 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1791 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1792 refs = btrfs_extent_data_ref_count(leaf, dref);
1793 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1794 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1795 refs = btrfs_shared_data_ref_count(leaf, sref);
1798 BUG_ON(refs_to_mod != -1);
1801 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1802 refs += refs_to_mod;
1805 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1806 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1808 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1810 size = btrfs_extent_inline_ref_size(type);
1811 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1812 ptr = (unsigned long)iref;
1813 end = (unsigned long)ei + item_size;
1814 if (ptr + size < end)
1815 memmove_extent_buffer(leaf, ptr, ptr + size,
1818 btrfs_truncate_item(root, path, item_size, 1);
1820 btrfs_mark_buffer_dirty(leaf);
1823 static noinline_for_stack
1824 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1825 struct btrfs_root *root,
1826 struct btrfs_path *path,
1827 u64 bytenr, u64 num_bytes, u64 parent,
1828 u64 root_objectid, u64 owner,
1829 u64 offset, int refs_to_add,
1830 struct btrfs_delayed_extent_op *extent_op)
1832 struct btrfs_extent_inline_ref *iref;
1835 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1836 bytenr, num_bytes, parent,
1837 root_objectid, owner, offset, 1);
1839 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1840 update_inline_extent_backref(root, path, iref,
1841 refs_to_add, extent_op);
1842 } else if (ret == -ENOENT) {
1843 setup_inline_extent_backref(root, path, iref, parent,
1844 root_objectid, owner, offset,
1845 refs_to_add, extent_op);
1851 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1852 struct btrfs_root *root,
1853 struct btrfs_path *path,
1854 u64 bytenr, u64 parent, u64 root_objectid,
1855 u64 owner, u64 offset, int refs_to_add)
1858 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1859 BUG_ON(refs_to_add != 1);
1860 ret = insert_tree_block_ref(trans, root, path, bytenr,
1861 parent, root_objectid);
1863 ret = insert_extent_data_ref(trans, root, path, bytenr,
1864 parent, root_objectid,
1865 owner, offset, refs_to_add);
1870 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1871 struct btrfs_root *root,
1872 struct btrfs_path *path,
1873 struct btrfs_extent_inline_ref *iref,
1874 int refs_to_drop, int is_data)
1878 BUG_ON(!is_data && refs_to_drop != 1);
1880 update_inline_extent_backref(root, path, iref,
1881 -refs_to_drop, NULL);
1882 } else if (is_data) {
1883 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1885 ret = btrfs_del_item(trans, root, path);
1890 static int btrfs_issue_discard(struct block_device *bdev,
1893 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1896 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1897 u64 num_bytes, u64 *actual_bytes)
1900 u64 discarded_bytes = 0;
1901 struct btrfs_bio *bbio = NULL;
1904 /* Tell the block device(s) that the sectors can be discarded */
1905 ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1906 bytenr, &num_bytes, &bbio, 0);
1907 /* Error condition is -ENOMEM */
1909 struct btrfs_bio_stripe *stripe = bbio->stripes;
1913 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1914 if (!stripe->dev->can_discard)
1917 ret = btrfs_issue_discard(stripe->dev->bdev,
1921 discarded_bytes += stripe->length;
1922 else if (ret != -EOPNOTSUPP)
1923 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1926 * Just in case we get back EOPNOTSUPP for some reason,
1927 * just ignore the return value so we don't screw up
1928 * people calling discard_extent.
1936 *actual_bytes = discarded_bytes;
1939 if (ret == -EOPNOTSUPP)
1944 /* Can return -ENOMEM */
1945 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1946 struct btrfs_root *root,
1947 u64 bytenr, u64 num_bytes, u64 parent,
1948 u64 root_objectid, u64 owner, u64 offset, int for_cow)
1951 struct btrfs_fs_info *fs_info = root->fs_info;
1953 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1954 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1956 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1957 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1959 parent, root_objectid, (int)owner,
1960 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1962 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1964 parent, root_objectid, owner, offset,
1965 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1970 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1971 struct btrfs_root *root,
1972 u64 bytenr, u64 num_bytes,
1973 u64 parent, u64 root_objectid,
1974 u64 owner, u64 offset, int refs_to_add,
1975 struct btrfs_delayed_extent_op *extent_op)
1977 struct btrfs_path *path;
1978 struct extent_buffer *leaf;
1979 struct btrfs_extent_item *item;
1984 path = btrfs_alloc_path();
1989 path->leave_spinning = 1;
1990 /* this will setup the path even if it fails to insert the back ref */
1991 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1992 path, bytenr, num_bytes, parent,
1993 root_objectid, owner, offset,
1994 refs_to_add, extent_op);
1998 if (ret != -EAGAIN) {
2003 leaf = path->nodes[0];
2004 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2005 refs = btrfs_extent_refs(leaf, item);
2006 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2008 __run_delayed_extent_op(extent_op, leaf, item);
2010 btrfs_mark_buffer_dirty(leaf);
2011 btrfs_release_path(path);
2014 path->leave_spinning = 1;
2016 /* now insert the actual backref */
2017 ret = insert_extent_backref(trans, root->fs_info->extent_root,
2018 path, bytenr, parent, root_objectid,
2019 owner, offset, refs_to_add);
2021 btrfs_abort_transaction(trans, root, ret);
2023 btrfs_free_path(path);
2027 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2028 struct btrfs_root *root,
2029 struct btrfs_delayed_ref_node *node,
2030 struct btrfs_delayed_extent_op *extent_op,
2031 int insert_reserved)
2034 struct btrfs_delayed_data_ref *ref;
2035 struct btrfs_key ins;
2040 ins.objectid = node->bytenr;
2041 ins.offset = node->num_bytes;
2042 ins.type = BTRFS_EXTENT_ITEM_KEY;
2044 ref = btrfs_delayed_node_to_data_ref(node);
2045 trace_run_delayed_data_ref(node, ref, node->action);
2047 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2048 parent = ref->parent;
2050 ref_root = ref->root;
2052 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2054 flags |= extent_op->flags_to_set;
2055 ret = alloc_reserved_file_extent(trans, root,
2056 parent, ref_root, flags,
2057 ref->objectid, ref->offset,
2058 &ins, node->ref_mod);
2059 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2060 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2061 node->num_bytes, parent,
2062 ref_root, ref->objectid,
2063 ref->offset, node->ref_mod,
2065 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2066 ret = __btrfs_free_extent(trans, root, node->bytenr,
2067 node->num_bytes, parent,
2068 ref_root, ref->objectid,
2069 ref->offset, node->ref_mod,
2077 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2078 struct extent_buffer *leaf,
2079 struct btrfs_extent_item *ei)
2081 u64 flags = btrfs_extent_flags(leaf, ei);
2082 if (extent_op->update_flags) {
2083 flags |= extent_op->flags_to_set;
2084 btrfs_set_extent_flags(leaf, ei, flags);
2087 if (extent_op->update_key) {
2088 struct btrfs_tree_block_info *bi;
2089 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2090 bi = (struct btrfs_tree_block_info *)(ei + 1);
2091 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2095 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2096 struct btrfs_root *root,
2097 struct btrfs_delayed_ref_node *node,
2098 struct btrfs_delayed_extent_op *extent_op)
2100 struct btrfs_key key;
2101 struct btrfs_path *path;
2102 struct btrfs_extent_item *ei;
2103 struct extent_buffer *leaf;
2107 int metadata = !extent_op->is_data;
2112 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2115 path = btrfs_alloc_path();
2119 key.objectid = node->bytenr;
2122 key.type = BTRFS_METADATA_ITEM_KEY;
2123 key.offset = extent_op->level;
2125 key.type = BTRFS_EXTENT_ITEM_KEY;
2126 key.offset = node->num_bytes;
2131 path->leave_spinning = 1;
2132 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2140 btrfs_release_path(path);
2143 key.offset = node->num_bytes;
2144 key.type = BTRFS_EXTENT_ITEM_KEY;
2151 leaf = path->nodes[0];
2152 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2153 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2154 if (item_size < sizeof(*ei)) {
2155 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2161 leaf = path->nodes[0];
2162 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2165 BUG_ON(item_size < sizeof(*ei));
2166 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2167 __run_delayed_extent_op(extent_op, leaf, ei);
2169 btrfs_mark_buffer_dirty(leaf);
2171 btrfs_free_path(path);
2175 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2176 struct btrfs_root *root,
2177 struct btrfs_delayed_ref_node *node,
2178 struct btrfs_delayed_extent_op *extent_op,
2179 int insert_reserved)
2182 struct btrfs_delayed_tree_ref *ref;
2183 struct btrfs_key ins;
2186 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2189 ref = btrfs_delayed_node_to_tree_ref(node);
2190 trace_run_delayed_tree_ref(node, ref, node->action);
2192 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2193 parent = ref->parent;
2195 ref_root = ref->root;
2197 ins.objectid = node->bytenr;
2198 if (skinny_metadata) {
2199 ins.offset = ref->level;
2200 ins.type = BTRFS_METADATA_ITEM_KEY;
2202 ins.offset = node->num_bytes;
2203 ins.type = BTRFS_EXTENT_ITEM_KEY;
2206 BUG_ON(node->ref_mod != 1);
2207 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2208 BUG_ON(!extent_op || !extent_op->update_flags);
2209 ret = alloc_reserved_tree_block(trans, root,
2211 extent_op->flags_to_set,
2214 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2215 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2216 node->num_bytes, parent, ref_root,
2217 ref->level, 0, 1, extent_op);
2218 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2219 ret = __btrfs_free_extent(trans, root, node->bytenr,
2220 node->num_bytes, parent, ref_root,
2221 ref->level, 0, 1, extent_op);
2228 /* helper function to actually process a single delayed ref entry */
2229 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2230 struct btrfs_root *root,
2231 struct btrfs_delayed_ref_node *node,
2232 struct btrfs_delayed_extent_op *extent_op,
2233 int insert_reserved)
2240 if (btrfs_delayed_ref_is_head(node)) {
2241 struct btrfs_delayed_ref_head *head;
2243 * we've hit the end of the chain and we were supposed
2244 * to insert this extent into the tree. But, it got
2245 * deleted before we ever needed to insert it, so all
2246 * we have to do is clean up the accounting
2249 head = btrfs_delayed_node_to_head(node);
2250 trace_run_delayed_ref_head(node, head, node->action);
2252 if (insert_reserved) {
2253 btrfs_pin_extent(root, node->bytenr,
2254 node->num_bytes, 1);
2255 if (head->is_data) {
2256 ret = btrfs_del_csums(trans, root,
2264 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2265 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2266 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2268 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2269 node->type == BTRFS_SHARED_DATA_REF_KEY)
2270 ret = run_delayed_data_ref(trans, root, node, extent_op,
2277 static noinline struct btrfs_delayed_ref_node *
2278 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2280 struct rb_node *node;
2281 struct btrfs_delayed_ref_node *ref;
2282 int action = BTRFS_ADD_DELAYED_REF;
2285 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2286 * this prevents ref count from going down to zero when
2287 * there still are pending delayed ref.
2289 node = rb_prev(&head->node.rb_node);
2293 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2295 if (ref->bytenr != head->node.bytenr)
2297 if (ref->action == action)
2299 node = rb_prev(node);
2301 if (action == BTRFS_ADD_DELAYED_REF) {
2302 action = BTRFS_DROP_DELAYED_REF;
2309 * Returns 0 on success or if called with an already aborted transaction.
2310 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2312 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2313 struct btrfs_root *root,
2314 struct list_head *cluster)
2316 struct btrfs_delayed_ref_root *delayed_refs;
2317 struct btrfs_delayed_ref_node *ref;
2318 struct btrfs_delayed_ref_head *locked_ref = NULL;
2319 struct btrfs_delayed_extent_op *extent_op;
2320 struct btrfs_fs_info *fs_info = root->fs_info;
2323 int must_insert_reserved = 0;
2325 delayed_refs = &trans->transaction->delayed_refs;
2328 /* pick a new head ref from the cluster list */
2329 if (list_empty(cluster))
2332 locked_ref = list_entry(cluster->next,
2333 struct btrfs_delayed_ref_head, cluster);
2335 /* grab the lock that says we are going to process
2336 * all the refs for this head */
2337 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2340 * we may have dropped the spin lock to get the head
2341 * mutex lock, and that might have given someone else
2342 * time to free the head. If that's true, it has been
2343 * removed from our list and we can move on.
2345 if (ret == -EAGAIN) {
2353 * We need to try and merge add/drops of the same ref since we
2354 * can run into issues with relocate dropping the implicit ref
2355 * and then it being added back again before the drop can
2356 * finish. If we merged anything we need to re-loop so we can
2359 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2363 * locked_ref is the head node, so we have to go one
2364 * node back for any delayed ref updates
2366 ref = select_delayed_ref(locked_ref);
2368 if (ref && ref->seq &&
2369 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2371 * there are still refs with lower seq numbers in the
2372 * process of being added. Don't run this ref yet.
2374 list_del_init(&locked_ref->cluster);
2375 btrfs_delayed_ref_unlock(locked_ref);
2377 delayed_refs->num_heads_ready++;
2378 spin_unlock(&delayed_refs->lock);
2380 spin_lock(&delayed_refs->lock);
2385 * record the must insert reserved flag before we
2386 * drop the spin lock.
2388 must_insert_reserved = locked_ref->must_insert_reserved;
2389 locked_ref->must_insert_reserved = 0;
2391 extent_op = locked_ref->extent_op;
2392 locked_ref->extent_op = NULL;
2395 /* All delayed refs have been processed, Go ahead
2396 * and send the head node to run_one_delayed_ref,
2397 * so that any accounting fixes can happen
2399 ref = &locked_ref->node;
2401 if (extent_op && must_insert_reserved) {
2402 btrfs_free_delayed_extent_op(extent_op);
2407 spin_unlock(&delayed_refs->lock);
2409 ret = run_delayed_extent_op(trans, root,
2411 btrfs_free_delayed_extent_op(extent_op);
2414 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2415 spin_lock(&delayed_refs->lock);
2416 btrfs_delayed_ref_unlock(locked_ref);
2425 rb_erase(&ref->rb_node, &delayed_refs->root);
2426 delayed_refs->num_entries--;
2427 if (!btrfs_delayed_ref_is_head(ref)) {
2429 * when we play the delayed ref, also correct the
2432 switch (ref->action) {
2433 case BTRFS_ADD_DELAYED_REF:
2434 case BTRFS_ADD_DELAYED_EXTENT:
2435 locked_ref->node.ref_mod -= ref->ref_mod;
2437 case BTRFS_DROP_DELAYED_REF:
2438 locked_ref->node.ref_mod += ref->ref_mod;
2444 list_del_init(&locked_ref->cluster);
2446 spin_unlock(&delayed_refs->lock);
2448 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2449 must_insert_reserved);
2451 btrfs_free_delayed_extent_op(extent_op);
2453 btrfs_delayed_ref_unlock(locked_ref);
2454 btrfs_put_delayed_ref(ref);
2455 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2456 spin_lock(&delayed_refs->lock);
2461 * If this node is a head, that means all the refs in this head
2462 * have been dealt with, and we will pick the next head to deal
2463 * with, so we must unlock the head and drop it from the cluster
2464 * list before we release it.
2466 if (btrfs_delayed_ref_is_head(ref)) {
2467 btrfs_delayed_ref_unlock(locked_ref);
2470 btrfs_put_delayed_ref(ref);
2474 spin_lock(&delayed_refs->lock);
2479 #ifdef SCRAMBLE_DELAYED_REFS
2481 * Normally delayed refs get processed in ascending bytenr order. This
2482 * correlates in most cases to the order added. To expose dependencies on this
2483 * order, we start to process the tree in the middle instead of the beginning
2485 static u64 find_middle(struct rb_root *root)
2487 struct rb_node *n = root->rb_node;
2488 struct btrfs_delayed_ref_node *entry;
2491 u64 first = 0, last = 0;
2495 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2496 first = entry->bytenr;
2500 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2501 last = entry->bytenr;
2506 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2507 WARN_ON(!entry->in_tree);
2509 middle = entry->bytenr;
2522 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2523 struct btrfs_fs_info *fs_info)
2525 struct qgroup_update *qgroup_update;
2528 if (list_empty(&trans->qgroup_ref_list) !=
2529 !trans->delayed_ref_elem.seq) {
2530 /* list without seq or seq without list */
2532 "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2533 list_empty(&trans->qgroup_ref_list) ? "" : " not",
2534 (u32)(trans->delayed_ref_elem.seq >> 32),
2535 (u32)trans->delayed_ref_elem.seq);
2539 if (!trans->delayed_ref_elem.seq)
2542 while (!list_empty(&trans->qgroup_ref_list)) {
2543 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2544 struct qgroup_update, list);
2545 list_del(&qgroup_update->list);
2547 ret = btrfs_qgroup_account_ref(
2548 trans, fs_info, qgroup_update->node,
2549 qgroup_update->extent_op);
2550 kfree(qgroup_update);
2553 btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2558 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2561 int val = atomic_read(&delayed_refs->ref_seq);
2563 if (val < seq || val >= seq + count)
2568 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2572 num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2573 sizeof(struct btrfs_extent_inline_ref));
2574 if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2575 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2578 * We don't ever fill up leaves all the way so multiply by 2 just to be
2579 * closer to what we're really going to want to ouse.
2581 return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2584 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2585 struct btrfs_root *root)
2587 struct btrfs_block_rsv *global_rsv;
2588 u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2592 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2593 num_heads = heads_to_leaves(root, num_heads);
2595 num_bytes += (num_heads - 1) * root->leafsize;
2597 global_rsv = &root->fs_info->global_block_rsv;
2600 * If we can't allocate any more chunks lets make sure we have _lots_ of
2601 * wiggle room since running delayed refs can create more delayed refs.
2603 if (global_rsv->space_info->full)
2606 spin_lock(&global_rsv->lock);
2607 if (global_rsv->reserved <= num_bytes)
2609 spin_unlock(&global_rsv->lock);
2614 * this starts processing the delayed reference count updates and
2615 * extent insertions we have queued up so far. count can be
2616 * 0, which means to process everything in the tree at the start
2617 * of the run (but not newly added entries), or it can be some target
2618 * number you'd like to process.
2620 * Returns 0 on success or if called with an aborted transaction
2621 * Returns <0 on error and aborts the transaction
2623 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2624 struct btrfs_root *root, unsigned long count)
2626 struct rb_node *node;
2627 struct btrfs_delayed_ref_root *delayed_refs;
2628 struct btrfs_delayed_ref_node *ref;
2629 struct list_head cluster;
2632 int run_all = count == (unsigned long)-1;
2636 /* We'll clean this up in btrfs_cleanup_transaction */
2640 if (root == root->fs_info->extent_root)
2641 root = root->fs_info->tree_root;
2643 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2645 delayed_refs = &trans->transaction->delayed_refs;
2646 INIT_LIST_HEAD(&cluster);
2648 count = delayed_refs->num_entries * 2;
2652 if (!run_all && !run_most) {
2654 int seq = atomic_read(&delayed_refs->ref_seq);
2657 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2659 DEFINE_WAIT(__wait);
2660 if (delayed_refs->flushing ||
2661 !btrfs_should_throttle_delayed_refs(trans, root))
2664 prepare_to_wait(&delayed_refs->wait, &__wait,
2665 TASK_UNINTERRUPTIBLE);
2667 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2670 finish_wait(&delayed_refs->wait, &__wait);
2672 if (!refs_newer(delayed_refs, seq, 256))
2677 finish_wait(&delayed_refs->wait, &__wait);
2683 atomic_inc(&delayed_refs->procs_running_refs);
2688 spin_lock(&delayed_refs->lock);
2690 #ifdef SCRAMBLE_DELAYED_REFS
2691 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2695 if (!(run_all || run_most) &&
2696 !btrfs_should_throttle_delayed_refs(trans, root))
2700 * go find something we can process in the rbtree. We start at
2701 * the beginning of the tree, and then build a cluster
2702 * of refs to process starting at the first one we are able to
2705 delayed_start = delayed_refs->run_delayed_start;
2706 ret = btrfs_find_ref_cluster(trans, &cluster,
2707 delayed_refs->run_delayed_start);
2711 ret = run_clustered_refs(trans, root, &cluster);
2713 btrfs_release_ref_cluster(&cluster);
2714 spin_unlock(&delayed_refs->lock);
2715 btrfs_abort_transaction(trans, root, ret);
2716 atomic_dec(&delayed_refs->procs_running_refs);
2717 wake_up(&delayed_refs->wait);
2721 atomic_add(ret, &delayed_refs->ref_seq);
2723 count -= min_t(unsigned long, ret, count);
2728 if (delayed_start >= delayed_refs->run_delayed_start) {
2731 * btrfs_find_ref_cluster looped. let's do one
2732 * more cycle. if we don't run any delayed ref
2733 * during that cycle (because we can't because
2734 * all of them are blocked), bail out.
2739 * no runnable refs left, stop trying
2746 /* refs were run, let's reset staleness detection */
2752 if (!list_empty(&trans->new_bgs)) {
2753 spin_unlock(&delayed_refs->lock);
2754 btrfs_create_pending_block_groups(trans, root);
2755 spin_lock(&delayed_refs->lock);
2758 node = rb_first(&delayed_refs->root);
2761 count = (unsigned long)-1;
2764 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2766 if (btrfs_delayed_ref_is_head(ref)) {
2767 struct btrfs_delayed_ref_head *head;
2769 head = btrfs_delayed_node_to_head(ref);
2770 atomic_inc(&ref->refs);
2772 spin_unlock(&delayed_refs->lock);
2774 * Mutex was contended, block until it's
2775 * released and try again
2777 mutex_lock(&head->mutex);
2778 mutex_unlock(&head->mutex);
2780 btrfs_put_delayed_ref(ref);
2784 node = rb_next(node);
2786 spin_unlock(&delayed_refs->lock);
2787 schedule_timeout(1);
2791 atomic_dec(&delayed_refs->procs_running_refs);
2793 if (waitqueue_active(&delayed_refs->wait))
2794 wake_up(&delayed_refs->wait);
2796 spin_unlock(&delayed_refs->lock);
2797 assert_qgroups_uptodate(trans);
2801 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2802 struct btrfs_root *root,
2803 u64 bytenr, u64 num_bytes, u64 flags,
2804 int level, int is_data)
2806 struct btrfs_delayed_extent_op *extent_op;
2809 extent_op = btrfs_alloc_delayed_extent_op();
2813 extent_op->flags_to_set = flags;
2814 extent_op->update_flags = 1;
2815 extent_op->update_key = 0;
2816 extent_op->is_data = is_data ? 1 : 0;
2817 extent_op->level = level;
2819 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2820 num_bytes, extent_op);
2822 btrfs_free_delayed_extent_op(extent_op);
2826 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2827 struct btrfs_root *root,
2828 struct btrfs_path *path,
2829 u64 objectid, u64 offset, u64 bytenr)
2831 struct btrfs_delayed_ref_head *head;
2832 struct btrfs_delayed_ref_node *ref;
2833 struct btrfs_delayed_data_ref *data_ref;
2834 struct btrfs_delayed_ref_root *delayed_refs;
2835 struct rb_node *node;
2839 delayed_refs = &trans->transaction->delayed_refs;
2840 spin_lock(&delayed_refs->lock);
2841 head = btrfs_find_delayed_ref_head(trans, bytenr);
2845 if (!mutex_trylock(&head->mutex)) {
2846 atomic_inc(&head->node.refs);
2847 spin_unlock(&delayed_refs->lock);
2849 btrfs_release_path(path);
2852 * Mutex was contended, block until it's released and let
2855 mutex_lock(&head->mutex);
2856 mutex_unlock(&head->mutex);
2857 btrfs_put_delayed_ref(&head->node);
2861 node = rb_prev(&head->node.rb_node);
2865 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2867 if (ref->bytenr != bytenr)
2871 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2874 data_ref = btrfs_delayed_node_to_data_ref(ref);
2876 node = rb_prev(node);
2880 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2881 if (ref->bytenr == bytenr && ref->seq == seq)
2885 if (data_ref->root != root->root_key.objectid ||
2886 data_ref->objectid != objectid || data_ref->offset != offset)
2891 mutex_unlock(&head->mutex);
2893 spin_unlock(&delayed_refs->lock);
2897 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2898 struct btrfs_root *root,
2899 struct btrfs_path *path,
2900 u64 objectid, u64 offset, u64 bytenr)
2902 struct btrfs_root *extent_root = root->fs_info->extent_root;
2903 struct extent_buffer *leaf;
2904 struct btrfs_extent_data_ref *ref;
2905 struct btrfs_extent_inline_ref *iref;
2906 struct btrfs_extent_item *ei;
2907 struct btrfs_key key;
2911 key.objectid = bytenr;
2912 key.offset = (u64)-1;
2913 key.type = BTRFS_EXTENT_ITEM_KEY;
2915 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2918 BUG_ON(ret == 0); /* Corruption */
2921 if (path->slots[0] == 0)
2925 leaf = path->nodes[0];
2926 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2928 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2932 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2933 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2934 if (item_size < sizeof(*ei)) {
2935 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2939 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2941 if (item_size != sizeof(*ei) +
2942 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2945 if (btrfs_extent_generation(leaf, ei) <=
2946 btrfs_root_last_snapshot(&root->root_item))
2949 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2950 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2951 BTRFS_EXTENT_DATA_REF_KEY)
2954 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2955 if (btrfs_extent_refs(leaf, ei) !=
2956 btrfs_extent_data_ref_count(leaf, ref) ||
2957 btrfs_extent_data_ref_root(leaf, ref) !=
2958 root->root_key.objectid ||
2959 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2960 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2968 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2969 struct btrfs_root *root,
2970 u64 objectid, u64 offset, u64 bytenr)
2972 struct btrfs_path *path;
2976 path = btrfs_alloc_path();
2981 ret = check_committed_ref(trans, root, path, objectid,
2983 if (ret && ret != -ENOENT)
2986 ret2 = check_delayed_ref(trans, root, path, objectid,
2988 } while (ret2 == -EAGAIN);
2990 if (ret2 && ret2 != -ENOENT) {
2995 if (ret != -ENOENT || ret2 != -ENOENT)
2998 btrfs_free_path(path);
2999 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3004 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3005 struct btrfs_root *root,
3006 struct extent_buffer *buf,
3007 int full_backref, int inc, int for_cow)
3014 struct btrfs_key key;
3015 struct btrfs_file_extent_item *fi;
3019 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3020 u64, u64, u64, u64, u64, u64, int);
3022 ref_root = btrfs_header_owner(buf);
3023 nritems = btrfs_header_nritems(buf);
3024 level = btrfs_header_level(buf);
3026 if (!root->ref_cows && level == 0)
3030 process_func = btrfs_inc_extent_ref;
3032 process_func = btrfs_free_extent;
3035 parent = buf->start;
3039 for (i = 0; i < nritems; i++) {
3041 btrfs_item_key_to_cpu(buf, &key, i);
3042 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3044 fi = btrfs_item_ptr(buf, i,
3045 struct btrfs_file_extent_item);
3046 if (btrfs_file_extent_type(buf, fi) ==
3047 BTRFS_FILE_EXTENT_INLINE)
3049 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3053 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3054 key.offset -= btrfs_file_extent_offset(buf, fi);
3055 ret = process_func(trans, root, bytenr, num_bytes,
3056 parent, ref_root, key.objectid,
3057 key.offset, for_cow);
3061 bytenr = btrfs_node_blockptr(buf, i);
3062 num_bytes = btrfs_level_size(root, level - 1);
3063 ret = process_func(trans, root, bytenr, num_bytes,
3064 parent, ref_root, level - 1, 0,
3075 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3076 struct extent_buffer *buf, int full_backref, int for_cow)
3078 return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3081 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3082 struct extent_buffer *buf, int full_backref, int for_cow)
3084 return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3087 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3088 struct btrfs_root *root,
3089 struct btrfs_path *path,
3090 struct btrfs_block_group_cache *cache)
3093 struct btrfs_root *extent_root = root->fs_info->extent_root;
3095 struct extent_buffer *leaf;
3097 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3100 BUG_ON(ret); /* Corruption */
3102 leaf = path->nodes[0];
3103 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3104 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3105 btrfs_mark_buffer_dirty(leaf);
3106 btrfs_release_path(path);
3109 btrfs_abort_transaction(trans, root, ret);
3116 static struct btrfs_block_group_cache *
3117 next_block_group(struct btrfs_root *root,
3118 struct btrfs_block_group_cache *cache)
3120 struct rb_node *node;
3121 spin_lock(&root->fs_info->block_group_cache_lock);
3122 node = rb_next(&cache->cache_node);
3123 btrfs_put_block_group(cache);
3125 cache = rb_entry(node, struct btrfs_block_group_cache,
3127 btrfs_get_block_group(cache);
3130 spin_unlock(&root->fs_info->block_group_cache_lock);
3134 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3135 struct btrfs_trans_handle *trans,
3136 struct btrfs_path *path)
3138 struct btrfs_root *root = block_group->fs_info->tree_root;
3139 struct inode *inode = NULL;
3141 int dcs = BTRFS_DC_ERROR;
3147 * If this block group is smaller than 100 megs don't bother caching the
3150 if (block_group->key.offset < (100 * 1024 * 1024)) {
3151 spin_lock(&block_group->lock);
3152 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3153 spin_unlock(&block_group->lock);
3158 inode = lookup_free_space_inode(root, block_group, path);
3159 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3160 ret = PTR_ERR(inode);
3161 btrfs_release_path(path);
3165 if (IS_ERR(inode)) {
3169 if (block_group->ro)
3172 ret = create_free_space_inode(root, trans, block_group, path);
3178 /* We've already setup this transaction, go ahead and exit */
3179 if (block_group->cache_generation == trans->transid &&
3180 i_size_read(inode)) {
3181 dcs = BTRFS_DC_SETUP;
3186 * We want to set the generation to 0, that way if anything goes wrong
3187 * from here on out we know not to trust this cache when we load up next
3190 BTRFS_I(inode)->generation = 0;
3191 ret = btrfs_update_inode(trans, root, inode);
3194 if (i_size_read(inode) > 0) {
3195 ret = btrfs_check_trunc_cache_free_space(root,
3196 &root->fs_info->global_block_rsv);
3200 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3205 spin_lock(&block_group->lock);
3206 if (block_group->cached != BTRFS_CACHE_FINISHED ||
3207 !btrfs_test_opt(root, SPACE_CACHE)) {
3209 * don't bother trying to write stuff out _if_
3210 * a) we're not cached,
3211 * b) we're with nospace_cache mount option.
3213 dcs = BTRFS_DC_WRITTEN;
3214 spin_unlock(&block_group->lock);
3217 spin_unlock(&block_group->lock);
3220 * Try to preallocate enough space based on how big the block group is.
3221 * Keep in mind this has to include any pinned space which could end up
3222 * taking up quite a bit since it's not folded into the other space
3225 num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3230 num_pages *= PAGE_CACHE_SIZE;
3232 ret = btrfs_check_data_free_space(inode, num_pages);
3236 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3237 num_pages, num_pages,
3240 dcs = BTRFS_DC_SETUP;
3241 btrfs_free_reserved_data_space(inode, num_pages);
3246 btrfs_release_path(path);
3248 spin_lock(&block_group->lock);
3249 if (!ret && dcs == BTRFS_DC_SETUP)
3250 block_group->cache_generation = trans->transid;
3251 block_group->disk_cache_state = dcs;
3252 spin_unlock(&block_group->lock);
3257 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3258 struct btrfs_root *root)
3260 struct btrfs_block_group_cache *cache;
3262 struct btrfs_path *path;
3265 path = btrfs_alloc_path();
3271 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3273 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3275 cache = next_block_group(root, cache);
3283 err = cache_save_setup(cache, trans, path);
3284 last = cache->key.objectid + cache->key.offset;
3285 btrfs_put_block_group(cache);
3290 err = btrfs_run_delayed_refs(trans, root,
3292 if (err) /* File system offline */
3296 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3298 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3299 btrfs_put_block_group(cache);
3305 cache = next_block_group(root, cache);
3314 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3315 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3317 last = cache->key.objectid + cache->key.offset;
3319 err = write_one_cache_group(trans, root, path, cache);
3320 btrfs_put_block_group(cache);
3321 if (err) /* File system offline */
3327 * I don't think this is needed since we're just marking our
3328 * preallocated extent as written, but just in case it can't
3332 err = btrfs_run_delayed_refs(trans, root,
3334 if (err) /* File system offline */
3338 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3341 * Really this shouldn't happen, but it could if we
3342 * couldn't write the entire preallocated extent and
3343 * splitting the extent resulted in a new block.
3346 btrfs_put_block_group(cache);
3349 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3351 cache = next_block_group(root, cache);
3360 err = btrfs_write_out_cache(root, trans, cache, path);
3363 * If we didn't have an error then the cache state is still
3364 * NEED_WRITE, so we can set it to WRITTEN.
3366 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3367 cache->disk_cache_state = BTRFS_DC_WRITTEN;
3368 last = cache->key.objectid + cache->key.offset;
3369 btrfs_put_block_group(cache);
3373 btrfs_free_path(path);
3377 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3379 struct btrfs_block_group_cache *block_group;
3382 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3383 if (!block_group || block_group->ro)
3386 btrfs_put_block_group(block_group);
3390 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3391 u64 total_bytes, u64 bytes_used,
3392 struct btrfs_space_info **space_info)
3394 struct btrfs_space_info *found;
3399 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3400 BTRFS_BLOCK_GROUP_RAID10))
3405 found = __find_space_info(info, flags);
3407 spin_lock(&found->lock);
3408 found->total_bytes += total_bytes;
3409 found->disk_total += total_bytes * factor;
3410 found->bytes_used += bytes_used;
3411 found->disk_used += bytes_used * factor;
3413 spin_unlock(&found->lock);
3414 *space_info = found;
3417 found = kzalloc(sizeof(*found), GFP_NOFS);
3421 ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3427 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3428 INIT_LIST_HEAD(&found->block_groups[i]);
3429 init_rwsem(&found->groups_sem);
3430 spin_lock_init(&found->lock);
3431 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3432 found->total_bytes = total_bytes;
3433 found->disk_total = total_bytes * factor;
3434 found->bytes_used = bytes_used;
3435 found->disk_used = bytes_used * factor;
3436 found->bytes_pinned = 0;
3437 found->bytes_reserved = 0;
3438 found->bytes_readonly = 0;
3439 found->bytes_may_use = 0;
3441 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3442 found->chunk_alloc = 0;
3444 init_waitqueue_head(&found->wait);
3445 *space_info = found;
3446 list_add_rcu(&found->list, &info->space_info);
3447 if (flags & BTRFS_BLOCK_GROUP_DATA)
3448 info->data_sinfo = found;
3452 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3454 u64 extra_flags = chunk_to_extended(flags) &
3455 BTRFS_EXTENDED_PROFILE_MASK;
3457 write_seqlock(&fs_info->profiles_lock);
3458 if (flags & BTRFS_BLOCK_GROUP_DATA)
3459 fs_info->avail_data_alloc_bits |= extra_flags;
3460 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3461 fs_info->avail_metadata_alloc_bits |= extra_flags;
3462 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3463 fs_info->avail_system_alloc_bits |= extra_flags;
3464 write_sequnlock(&fs_info->profiles_lock);
3468 * returns target flags in extended format or 0 if restripe for this
3469 * chunk_type is not in progress
3471 * should be called with either volume_mutex or balance_lock held
3473 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3475 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3481 if (flags & BTRFS_BLOCK_GROUP_DATA &&
3482 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3483 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3484 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3485 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3486 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3487 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3488 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3489 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3496 * @flags: available profiles in extended format (see ctree.h)
3498 * Returns reduced profile in chunk format. If profile changing is in
3499 * progress (either running or paused) picks the target profile (if it's
3500 * already available), otherwise falls back to plain reducing.
3502 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3505 * we add in the count of missing devices because we want
3506 * to make sure that any RAID levels on a degraded FS
3507 * continue to be honored.
3509 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3510 root->fs_info->fs_devices->missing_devices;
3515 * see if restripe for this chunk_type is in progress, if so
3516 * try to reduce to the target profile
3518 spin_lock(&root->fs_info->balance_lock);
3519 target = get_restripe_target(root->fs_info, flags);
3521 /* pick target profile only if it's already available */
3522 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3523 spin_unlock(&root->fs_info->balance_lock);
3524 return extended_to_chunk(target);
3527 spin_unlock(&root->fs_info->balance_lock);
3529 /* First, mask out the RAID levels which aren't possible */
3530 if (num_devices == 1)
3531 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3532 BTRFS_BLOCK_GROUP_RAID5);
3533 if (num_devices < 3)
3534 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3535 if (num_devices < 4)
3536 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3538 tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3539 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3540 BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3543 if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3544 tmp = BTRFS_BLOCK_GROUP_RAID6;
3545 else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3546 tmp = BTRFS_BLOCK_GROUP_RAID5;
3547 else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3548 tmp = BTRFS_BLOCK_GROUP_RAID10;
3549 else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3550 tmp = BTRFS_BLOCK_GROUP_RAID1;
3551 else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3552 tmp = BTRFS_BLOCK_GROUP_RAID0;
3554 return extended_to_chunk(flags | tmp);
3557 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3562 seq = read_seqbegin(&root->fs_info->profiles_lock);
3564 if (flags & BTRFS_BLOCK_GROUP_DATA)
3565 flags |= root->fs_info->avail_data_alloc_bits;
3566 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3567 flags |= root->fs_info->avail_system_alloc_bits;
3568 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3569 flags |= root->fs_info->avail_metadata_alloc_bits;
3570 } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3572 return btrfs_reduce_alloc_profile(root, flags);
3575 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3581 flags = BTRFS_BLOCK_GROUP_DATA;
3582 else if (root == root->fs_info->chunk_root)
3583 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3585 flags = BTRFS_BLOCK_GROUP_METADATA;
3587 ret = get_alloc_profile(root, flags);
3592 * This will check the space that the inode allocates from to make sure we have
3593 * enough space for bytes.
3595 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3597 struct btrfs_space_info *data_sinfo;
3598 struct btrfs_root *root = BTRFS_I(inode)->root;
3599 struct btrfs_fs_info *fs_info = root->fs_info;
3601 int ret = 0, committed = 0, alloc_chunk = 1;
3603 /* make sure bytes are sectorsize aligned */
3604 bytes = ALIGN(bytes, root->sectorsize);
3606 if (root == root->fs_info->tree_root ||
3607 BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3612 data_sinfo = fs_info->data_sinfo;
3617 /* make sure we have enough space to handle the data first */
3618 spin_lock(&data_sinfo->lock);
3619 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3620 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3621 data_sinfo->bytes_may_use;
3623 if (used + bytes > data_sinfo->total_bytes) {
3624 struct btrfs_trans_handle *trans;
3627 * if we don't have enough free bytes in this space then we need
3628 * to alloc a new chunk.
3630 if (!data_sinfo->full && alloc_chunk) {
3633 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3634 spin_unlock(&data_sinfo->lock);
3636 alloc_target = btrfs_get_alloc_profile(root, 1);
3637 trans = btrfs_join_transaction(root);
3639 return PTR_ERR(trans);
3641 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3643 CHUNK_ALLOC_NO_FORCE);
3644 btrfs_end_transaction(trans, root);
3653 data_sinfo = fs_info->data_sinfo;
3659 * If we don't have enough pinned space to deal with this
3660 * allocation don't bother committing the transaction.
3662 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3665 spin_unlock(&data_sinfo->lock);
3667 /* commit the current transaction and try again */
3670 !atomic_read(&root->fs_info->open_ioctl_trans)) {
3673 trans = btrfs_join_transaction(root);
3675 return PTR_ERR(trans);
3676 ret = btrfs_commit_transaction(trans, root);
3684 data_sinfo->bytes_may_use += bytes;
3685 trace_btrfs_space_reservation(root->fs_info, "space_info",
3686 data_sinfo->flags, bytes, 1);
3687 spin_unlock(&data_sinfo->lock);
3693 * Called if we need to clear a data reservation for this inode.
3695 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3697 struct btrfs_root *root = BTRFS_I(inode)->root;
3698 struct btrfs_space_info *data_sinfo;
3700 /* make sure bytes are sectorsize aligned */
3701 bytes = ALIGN(bytes, root->sectorsize);
3703 data_sinfo = root->fs_info->data_sinfo;
3704 spin_lock(&data_sinfo->lock);
3705 WARN_ON(data_sinfo->bytes_may_use < bytes);
3706 data_sinfo->bytes_may_use -= bytes;
3707 trace_btrfs_space_reservation(root->fs_info, "space_info",
3708 data_sinfo->flags, bytes, 0);
3709 spin_unlock(&data_sinfo->lock);
3712 static void force_metadata_allocation(struct btrfs_fs_info *info)
3714 struct list_head *head = &info->space_info;
3715 struct btrfs_space_info *found;
3718 list_for_each_entry_rcu(found, head, list) {
3719 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3720 found->force_alloc = CHUNK_ALLOC_FORCE;
3725 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3727 return (global->size << 1);
3730 static int should_alloc_chunk(struct btrfs_root *root,
3731 struct btrfs_space_info *sinfo, int force)
3733 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3734 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3735 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3738 if (force == CHUNK_ALLOC_FORCE)
3742 * We need to take into account the global rsv because for all intents
3743 * and purposes it's used space. Don't worry about locking the
3744 * global_rsv, it doesn't change except when the transaction commits.
3746 if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3747 num_allocated += calc_global_rsv_need_space(global_rsv);
3750 * in limited mode, we want to have some free space up to
3751 * about 1% of the FS size.
3753 if (force == CHUNK_ALLOC_LIMITED) {
3754 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3755 thresh = max_t(u64, 64 * 1024 * 1024,
3756 div_factor_fine(thresh, 1));
3758 if (num_bytes - num_allocated < thresh)
3762 if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3767 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3771 if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3772 BTRFS_BLOCK_GROUP_RAID0 |
3773 BTRFS_BLOCK_GROUP_RAID5 |
3774 BTRFS_BLOCK_GROUP_RAID6))
3775 num_dev = root->fs_info->fs_devices->rw_devices;
3776 else if (type & BTRFS_BLOCK_GROUP_RAID1)
3779 num_dev = 1; /* DUP or single */
3781 /* metadata for updaing devices and chunk tree */
3782 return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3785 static void check_system_chunk(struct btrfs_trans_handle *trans,
3786 struct btrfs_root *root, u64 type)
3788 struct btrfs_space_info *info;
3792 info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3793 spin_lock(&info->lock);
3794 left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3795 info->bytes_reserved - info->bytes_readonly;
3796 spin_unlock(&info->lock);
3798 thresh = get_system_chunk_thresh(root, type);
3799 if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3800 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3801 left, thresh, type);
3802 dump_space_info(info, 0, 0);
3805 if (left < thresh) {
3808 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3809 btrfs_alloc_chunk(trans, root, flags);
3813 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3814 struct btrfs_root *extent_root, u64 flags, int force)
3816 struct btrfs_space_info *space_info;
3817 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3818 int wait_for_alloc = 0;
3821 /* Don't re-enter if we're already allocating a chunk */
3822 if (trans->allocating_chunk)
3825 space_info = __find_space_info(extent_root->fs_info, flags);
3827 ret = update_space_info(extent_root->fs_info, flags,
3829 BUG_ON(ret); /* -ENOMEM */
3831 BUG_ON(!space_info); /* Logic error */
3834 spin_lock(&space_info->lock);
3835 if (force < space_info->force_alloc)
3836 force = space_info->force_alloc;
3837 if (space_info->full) {
3838 if (should_alloc_chunk(extent_root, space_info, force))
3842 spin_unlock(&space_info->lock);
3846 if (!should_alloc_chunk(extent_root, space_info, force)) {
3847 spin_unlock(&space_info->lock);
3849 } else if (space_info->chunk_alloc) {
3852 space_info->chunk_alloc = 1;
3855 spin_unlock(&space_info->lock);
3857 mutex_lock(&fs_info->chunk_mutex);
3860 * The chunk_mutex is held throughout the entirety of a chunk
3861 * allocation, so once we've acquired the chunk_mutex we know that the
3862 * other guy is done and we need to recheck and see if we should
3865 if (wait_for_alloc) {
3866 mutex_unlock(&fs_info->chunk_mutex);
3871 trans->allocating_chunk = true;
3874 * If we have mixed data/metadata chunks we want to make sure we keep
3875 * allocating mixed chunks instead of individual chunks.
3877 if (btrfs_mixed_space_info(space_info))
3878 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3881 * if we're doing a data chunk, go ahead and make sure that
3882 * we keep a reasonable number of metadata chunks allocated in the
3885 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3886 fs_info->data_chunk_allocations++;
3887 if (!(fs_info->data_chunk_allocations %
3888 fs_info->metadata_ratio))
3889 force_metadata_allocation(fs_info);
3893 * Check if we have enough space in SYSTEM chunk because we may need
3894 * to update devices.
3896 check_system_chunk(trans, extent_root, flags);
3898 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3899 trans->allocating_chunk = false;
3901 spin_lock(&space_info->lock);
3902 if (ret < 0 && ret != -ENOSPC)
3905 space_info->full = 1;
3909 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3911 space_info->chunk_alloc = 0;
3912 spin_unlock(&space_info->lock);
3913 mutex_unlock(&fs_info->chunk_mutex);
3917 static int can_overcommit(struct btrfs_root *root,
3918 struct btrfs_space_info *space_info, u64 bytes,
3919 enum btrfs_reserve_flush_enum flush)
3921 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3922 u64 profile = btrfs_get_alloc_profile(root, 0);
3927 used = space_info->bytes_used + space_info->bytes_reserved +
3928 space_info->bytes_pinned + space_info->bytes_readonly;
3931 * We only want to allow over committing if we have lots of actual space
3932 * free, but if we don't have enough space to handle the global reserve
3933 * space then we could end up having a real enospc problem when trying
3934 * to allocate a chunk or some other such important allocation.
3936 spin_lock(&global_rsv->lock);
3937 space_size = calc_global_rsv_need_space(global_rsv);
3938 spin_unlock(&global_rsv->lock);
3939 if (used + space_size >= space_info->total_bytes)
3942 used += space_info->bytes_may_use;
3944 spin_lock(&root->fs_info->free_chunk_lock);
3945 avail = root->fs_info->free_chunk_space;
3946 spin_unlock(&root->fs_info->free_chunk_lock);
3949 * If we have dup, raid1 or raid10 then only half of the free
3950 * space is actually useable. For raid56, the space info used
3951 * doesn't include the parity drive, so we don't have to
3954 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3955 BTRFS_BLOCK_GROUP_RAID1 |
3956 BTRFS_BLOCK_GROUP_RAID10))
3960 * If we aren't flushing all things, let us overcommit up to
3961 * 1/2th of the space. If we can flush, don't let us overcommit
3962 * too much, let it overcommit up to 1/8 of the space.
3964 if (flush == BTRFS_RESERVE_FLUSH_ALL)
3969 if (used + bytes < space_info->total_bytes + avail)
3974 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3975 unsigned long nr_pages)
3977 struct super_block *sb = root->fs_info->sb;
3979 if (down_read_trylock(&sb->s_umount)) {
3980 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3981 up_read(&sb->s_umount);
3984 * We needn't worry the filesystem going from r/w to r/o though
3985 * we don't acquire ->s_umount mutex, because the filesystem
3986 * should guarantee the delalloc inodes list be empty after
3987 * the filesystem is readonly(all dirty pages are written to
3990 btrfs_start_all_delalloc_inodes(root->fs_info, 0);
3991 if (!current->journal_info)
3992 btrfs_wait_all_ordered_extents(root->fs_info);
3997 * shrink metadata reservation for delalloc
3999 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4002 struct btrfs_block_rsv *block_rsv;
4003 struct btrfs_space_info *space_info;
4004 struct btrfs_trans_handle *trans;
4008 unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
4010 enum btrfs_reserve_flush_enum flush;
4012 trans = (struct btrfs_trans_handle *)current->journal_info;
4013 block_rsv = &root->fs_info->delalloc_block_rsv;
4014 space_info = block_rsv->space_info;
4017 delalloc_bytes = percpu_counter_sum_positive(
4018 &root->fs_info->delalloc_bytes);
4019 if (delalloc_bytes == 0) {
4022 btrfs_wait_all_ordered_extents(root->fs_info);
4026 while (delalloc_bytes && loops < 3) {
4027 max_reclaim = min(delalloc_bytes, to_reclaim);
4028 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4029 btrfs_writeback_inodes_sb_nr(root, nr_pages);
4031 * We need to wait for the async pages to actually start before
4034 wait_event(root->fs_info->async_submit_wait,
4035 !atomic_read(&root->fs_info->async_delalloc_pages));
4038 flush = BTRFS_RESERVE_FLUSH_ALL;
4040 flush = BTRFS_RESERVE_NO_FLUSH;
4041 spin_lock(&space_info->lock);
4042 if (can_overcommit(root, space_info, orig, flush)) {
4043 spin_unlock(&space_info->lock);
4046 spin_unlock(&space_info->lock);
4049 if (wait_ordered && !trans) {
4050 btrfs_wait_all_ordered_extents(root->fs_info);
4052 time_left = schedule_timeout_killable(1);
4057 delalloc_bytes = percpu_counter_sum_positive(
4058 &root->fs_info->delalloc_bytes);
4063 * maybe_commit_transaction - possibly commit the transaction if its ok to
4064 * @root - the root we're allocating for
4065 * @bytes - the number of bytes we want to reserve
4066 * @force - force the commit
4068 * This will check to make sure that committing the transaction will actually
4069 * get us somewhere and then commit the transaction if it does. Otherwise it
4070 * will return -ENOSPC.
4072 static int may_commit_transaction(struct btrfs_root *root,
4073 struct btrfs_space_info *space_info,
4074 u64 bytes, int force)
4076 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4077 struct btrfs_trans_handle *trans;
4079 trans = (struct btrfs_trans_handle *)current->journal_info;
4086 /* See if there is enough pinned space to make this reservation */
4087 spin_lock(&space_info->lock);
4088 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4090 spin_unlock(&space_info->lock);
4093 spin_unlock(&space_info->lock);
4096 * See if there is some space in the delayed insertion reservation for
4099 if (space_info != delayed_rsv->space_info)
4102 spin_lock(&space_info->lock);
4103 spin_lock(&delayed_rsv->lock);
4104 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4105 bytes - delayed_rsv->size) >= 0) {
4106 spin_unlock(&delayed_rsv->lock);
4107 spin_unlock(&space_info->lock);
4110 spin_unlock(&delayed_rsv->lock);
4111 spin_unlock(&space_info->lock);
4114 trans = btrfs_join_transaction(root);
4118 return btrfs_commit_transaction(trans, root);
4122 FLUSH_DELAYED_ITEMS_NR = 1,
4123 FLUSH_DELAYED_ITEMS = 2,
4125 FLUSH_DELALLOC_WAIT = 4,
4130 static int flush_space(struct btrfs_root *root,
4131 struct btrfs_space_info *space_info, u64 num_bytes,
4132 u64 orig_bytes, int state)
4134 struct btrfs_trans_handle *trans;
4139 case FLUSH_DELAYED_ITEMS_NR:
4140 case FLUSH_DELAYED_ITEMS:
4141 if (state == FLUSH_DELAYED_ITEMS_NR) {
4142 u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
4144 nr = (int)div64_u64(num_bytes, bytes);
4151 trans = btrfs_join_transaction(root);
4152 if (IS_ERR(trans)) {
4153 ret = PTR_ERR(trans);
4156 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4157 btrfs_end_transaction(trans, root);
4159 case FLUSH_DELALLOC:
4160 case FLUSH_DELALLOC_WAIT:
4161 shrink_delalloc(root, num_bytes, orig_bytes,
4162 state == FLUSH_DELALLOC_WAIT);
4165 trans = btrfs_join_transaction(root);
4166 if (IS_ERR(trans)) {
4167 ret = PTR_ERR(trans);
4170 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4171 btrfs_get_alloc_profile(root, 0),
4172 CHUNK_ALLOC_NO_FORCE);
4173 btrfs_end_transaction(trans, root);
4178 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4188 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4189 * @root - the root we're allocating for
4190 * @block_rsv - the block_rsv we're allocating for
4191 * @orig_bytes - the number of bytes we want
4192 * @flush - whether or not we can flush to make our reservation
4194 * This will reserve orgi_bytes number of bytes from the space info associated
4195 * with the block_rsv. If there is not enough space it will make an attempt to
4196 * flush out space to make room. It will do this by flushing delalloc if
4197 * possible or committing the transaction. If flush is 0 then no attempts to
4198 * regain reservations will be made and this will fail if there is not enough
4201 static int reserve_metadata_bytes(struct btrfs_root *root,
4202 struct btrfs_block_rsv *block_rsv,
4204 enum btrfs_reserve_flush_enum flush)
4206 struct btrfs_space_info *space_info = block_rsv->space_info;
4208 u64 num_bytes = orig_bytes;
4209 int flush_state = FLUSH_DELAYED_ITEMS_NR;
4211 bool flushing = false;
4215 spin_lock(&space_info->lock);
4217 * We only want to wait if somebody other than us is flushing and we
4218 * are actually allowed to flush all things.
4220 while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4221 space_info->flush) {
4222 spin_unlock(&space_info->lock);
4224 * If we have a trans handle we can't wait because the flusher
4225 * may have to commit the transaction, which would mean we would
4226 * deadlock since we are waiting for the flusher to finish, but
4227 * hold the current transaction open.
4229 if (current->journal_info)
4231 ret = wait_event_killable(space_info->wait, !space_info->flush);
4232 /* Must have been killed, return */
4236 spin_lock(&space_info->lock);
4240 used = space_info->bytes_used + space_info->bytes_reserved +
4241 space_info->bytes_pinned + space_info->bytes_readonly +
4242 space_info->bytes_may_use;
4245 * The idea here is that we've not already over-reserved the block group
4246 * then we can go ahead and save our reservation first and then start
4247 * flushing if we need to. Otherwise if we've already overcommitted
4248 * lets start flushing stuff first and then come back and try to make
4251 if (used <= space_info->total_bytes) {
4252 if (used + orig_bytes <= space_info->total_bytes) {
4253 space_info->bytes_may_use += orig_bytes;
4254 trace_btrfs_space_reservation(root->fs_info,
4255 "space_info", space_info->flags, orig_bytes, 1);
4259 * Ok set num_bytes to orig_bytes since we aren't
4260 * overocmmitted, this way we only try and reclaim what
4263 num_bytes = orig_bytes;
4267 * Ok we're over committed, set num_bytes to the overcommitted
4268 * amount plus the amount of bytes that we need for this
4271 num_bytes = used - space_info->total_bytes +
4275 if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4276 space_info->bytes_may_use += orig_bytes;
4277 trace_btrfs_space_reservation(root->fs_info, "space_info",
4278 space_info->flags, orig_bytes,
4284 * Couldn't make our reservation, save our place so while we're trying
4285 * to reclaim space we can actually use it instead of somebody else
4286 * stealing it from us.
4288 * We make the other tasks wait for the flush only when we can flush
4291 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4293 space_info->flush = 1;
4296 spin_unlock(&space_info->lock);
4298 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4301 ret = flush_space(root, space_info, num_bytes, orig_bytes,
4306 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4307 * would happen. So skip delalloc flush.
4309 if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4310 (flush_state == FLUSH_DELALLOC ||
4311 flush_state == FLUSH_DELALLOC_WAIT))
4312 flush_state = ALLOC_CHUNK;
4316 else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4317 flush_state < COMMIT_TRANS)
4319 else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4320 flush_state <= COMMIT_TRANS)
4324 if (ret == -ENOSPC &&
4325 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4326 struct btrfs_block_rsv *global_rsv =
4327 &root->fs_info->global_block_rsv;
4329 if (block_rsv != global_rsv &&
4330 !block_rsv_use_bytes(global_rsv, orig_bytes))
4334 spin_lock(&space_info->lock);
4335 space_info->flush = 0;
4336 wake_up_all(&space_info->wait);
4337 spin_unlock(&space_info->lock);
4342 static struct btrfs_block_rsv *get_block_rsv(
4343 const struct btrfs_trans_handle *trans,
4344 const struct btrfs_root *root)
4346 struct btrfs_block_rsv *block_rsv = NULL;
4349 block_rsv = trans->block_rsv;
4351 if (root == root->fs_info->csum_root && trans->adding_csums)
4352 block_rsv = trans->block_rsv;
4354 if (root == root->fs_info->uuid_root)
4355 block_rsv = trans->block_rsv;
4358 block_rsv = root->block_rsv;
4361 block_rsv = &root->fs_info->empty_block_rsv;
4366 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4370 spin_lock(&block_rsv->lock);
4371 if (block_rsv->reserved >= num_bytes) {
4372 block_rsv->reserved -= num_bytes;
4373 if (block_rsv->reserved < block_rsv->size)
4374 block_rsv->full = 0;
4377 spin_unlock(&block_rsv->lock);
4381 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4382 u64 num_bytes, int update_size)
4384 spin_lock(&block_rsv->lock);
4385 block_rsv->reserved += num_bytes;
4387 block_rsv->size += num_bytes;
4388 else if (block_rsv->reserved >= block_rsv->size)
4389 block_rsv->full = 1;
4390 spin_unlock(&block_rsv->lock);
4393 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4394 struct btrfs_block_rsv *dest, u64 num_bytes,
4397 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4400 if (global_rsv->space_info != dest->space_info)
4403 spin_lock(&global_rsv->lock);
4404 min_bytes = div_factor(global_rsv->size, min_factor);
4405 if (global_rsv->reserved < min_bytes + num_bytes) {
4406 spin_unlock(&global_rsv->lock);
4409 global_rsv->reserved -= num_bytes;
4410 if (global_rsv->reserved < global_rsv->size)
4411 global_rsv->full = 0;
4412 spin_unlock(&global_rsv->lock);
4414 block_rsv_add_bytes(dest, num_bytes, 1);
4418 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4419 struct btrfs_block_rsv *block_rsv,
4420 struct btrfs_block_rsv *dest, u64 num_bytes)
4422 struct btrfs_space_info *space_info = block_rsv->space_info;
4424 spin_lock(&block_rsv->lock);
4425 if (num_bytes == (u64)-1)
4426 num_bytes = block_rsv->size;
4427 block_rsv->size -= num_bytes;
4428 if (block_rsv->reserved >= block_rsv->size) {
4429 num_bytes = block_rsv->reserved - block_rsv->size;
4430 block_rsv->reserved = block_rsv->size;
4431 block_rsv->full = 1;
4435 spin_unlock(&block_rsv->lock);
4437 if (num_bytes > 0) {
4439 spin_lock(&dest->lock);
4443 bytes_to_add = dest->size - dest->reserved;
4444 bytes_to_add = min(num_bytes, bytes_to_add);
4445 dest->reserved += bytes_to_add;
4446 if (dest->reserved >= dest->size)
4448 num_bytes -= bytes_to_add;
4450 spin_unlock(&dest->lock);
4453 spin_lock(&space_info->lock);
4454 space_info->bytes_may_use -= num_bytes;
4455 trace_btrfs_space_reservation(fs_info, "space_info",
4456 space_info->flags, num_bytes, 0);
4457 spin_unlock(&space_info->lock);
4462 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4463 struct btrfs_block_rsv *dst, u64 num_bytes)
4467 ret = block_rsv_use_bytes(src, num_bytes);
4471 block_rsv_add_bytes(dst, num_bytes, 1);
4475 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4477 memset(rsv, 0, sizeof(*rsv));
4478 spin_lock_init(&rsv->lock);
4482 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4483 unsigned short type)
4485 struct btrfs_block_rsv *block_rsv;
4486 struct btrfs_fs_info *fs_info = root->fs_info;
4488 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4492 btrfs_init_block_rsv(block_rsv, type);
4493 block_rsv->space_info = __find_space_info(fs_info,
4494 BTRFS_BLOCK_GROUP_METADATA);
4498 void btrfs_free_block_rsv(struct btrfs_root *root,
4499 struct btrfs_block_rsv *rsv)
4503 btrfs_block_rsv_release(root, rsv, (u64)-1);
4507 int btrfs_block_rsv_add(struct btrfs_root *root,
4508 struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4509 enum btrfs_reserve_flush_enum flush)
4516 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4518 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4525 int btrfs_block_rsv_check(struct btrfs_root *root,
4526 struct btrfs_block_rsv *block_rsv, int min_factor)
4534 spin_lock(&block_rsv->lock);
4535 num_bytes = div_factor(block_rsv->size, min_factor);
4536 if (block_rsv->reserved >= num_bytes)
4538 spin_unlock(&block_rsv->lock);
4543 int btrfs_block_rsv_refill(struct btrfs_root *root,
4544 struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4545 enum btrfs_reserve_flush_enum flush)
4553 spin_lock(&block_rsv->lock);
4554 num_bytes = min_reserved;
4555 if (block_rsv->reserved >= num_bytes)
4558 num_bytes -= block_rsv->reserved;
4559 spin_unlock(&block_rsv->lock);
4564 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4566 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4573 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4574 struct btrfs_block_rsv *dst_rsv,
4577 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4580 void btrfs_block_rsv_release(struct btrfs_root *root,
4581 struct btrfs_block_rsv *block_rsv,
4584 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4585 if (global_rsv->full || global_rsv == block_rsv ||
4586 block_rsv->space_info != global_rsv->space_info)
4588 block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4593 * helper to calculate size of global block reservation.
4594 * the desired value is sum of space used by extent tree,
4595 * checksum tree and root tree
4597 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4599 struct btrfs_space_info *sinfo;
4603 int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4605 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4606 spin_lock(&sinfo->lock);
4607 data_used = sinfo->bytes_used;
4608 spin_unlock(&sinfo->lock);
4610 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4611 spin_lock(&sinfo->lock);
4612 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4614 meta_used = sinfo->bytes_used;
4615 spin_unlock(&sinfo->lock);
4617 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4619 num_bytes += div64_u64(data_used + meta_used, 50);
4621 if (num_bytes * 3 > meta_used)
4622 num_bytes = div64_u64(meta_used, 3);
4624 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4627 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4629 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4630 struct btrfs_space_info *sinfo = block_rsv->space_info;
4633 num_bytes = calc_global_metadata_size(fs_info);
4635 spin_lock(&sinfo->lock);
4636 spin_lock(&block_rsv->lock);
4638 block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4640 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4641 sinfo->bytes_reserved + sinfo->bytes_readonly +
4642 sinfo->bytes_may_use;
4644 if (sinfo->total_bytes > num_bytes) {
4645 num_bytes = sinfo->total_bytes - num_bytes;
4646 block_rsv->reserved += num_bytes;
4647 sinfo->bytes_may_use += num_bytes;
4648 trace_btrfs_space_reservation(fs_info, "space_info",
4649 sinfo->flags, num_bytes, 1);
4652 if (block_rsv->reserved >= block_rsv->size) {
4653 num_bytes = block_rsv->reserved - block_rsv->size;
4654 sinfo->bytes_may_use -= num_bytes;
4655 trace_btrfs_space_reservation(fs_info, "space_info",
4656 sinfo->flags, num_bytes, 0);
4657 block_rsv->reserved = block_rsv->size;
4658 block_rsv->full = 1;
4661 spin_unlock(&block_rsv->lock);
4662 spin_unlock(&sinfo->lock);
4665 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4667 struct btrfs_space_info *space_info;
4669 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4670 fs_info->chunk_block_rsv.space_info = space_info;
4672 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4673 fs_info->global_block_rsv.space_info = space_info;
4674 fs_info->delalloc_block_rsv.space_info = space_info;
4675 fs_info->trans_block_rsv.space_info = space_info;
4676 fs_info->empty_block_rsv.space_info = space_info;
4677 fs_info->delayed_block_rsv.space_info = space_info;
4679 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4680 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4681 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4682 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4683 if (fs_info->quota_root)
4684 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4685 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4687 update_global_block_rsv(fs_info);
4690 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4692 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4694 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4695 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4696 WARN_ON(fs_info->trans_block_rsv.size > 0);
4697 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4698 WARN_ON(fs_info->chunk_block_rsv.size > 0);
4699 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4700 WARN_ON(fs_info->delayed_block_rsv.size > 0);
4701 WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4704 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4705 struct btrfs_root *root)
4707 if (!trans->block_rsv)
4710 if (!trans->bytes_reserved)
4713 trace_btrfs_space_reservation(root->fs_info, "transaction",
4714 trans->transid, trans->bytes_reserved, 0);
4715 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4716 trans->bytes_reserved = 0;
4719 /* Can only return 0 or -ENOSPC */
4720 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4721 struct inode *inode)
4723 struct btrfs_root *root = BTRFS_I(inode)->root;
4724 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4725 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4728 * We need to hold space in order to delete our orphan item once we've
4729 * added it, so this takes the reservation so we can release it later
4730 * when we are truly done with the orphan item.
4732 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4733 trace_btrfs_space_reservation(root->fs_info, "orphan",
4734 btrfs_ino(inode), num_bytes, 1);
4735 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4738 void btrfs_orphan_release_metadata(struct inode *inode)
4740 struct btrfs_root *root = BTRFS_I(inode)->root;
4741 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4742 trace_btrfs_space_reservation(root->fs_info, "orphan",
4743 btrfs_ino(inode), num_bytes, 0);
4744 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4748 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4749 * root: the root of the parent directory
4750 * rsv: block reservation
4751 * items: the number of items that we need do reservation
4752 * qgroup_reserved: used to return the reserved size in qgroup
4754 * This function is used to reserve the space for snapshot/subvolume
4755 * creation and deletion. Those operations are different with the
4756 * common file/directory operations, they change two fs/file trees
4757 * and root tree, the number of items that the qgroup reserves is
4758 * different with the free space reservation. So we can not use
4759 * the space reseravtion mechanism in start_transaction().
4761 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4762 struct btrfs_block_rsv *rsv,
4764 u64 *qgroup_reserved,
4765 bool use_global_rsv)
4769 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4771 if (root->fs_info->quota_enabled) {
4772 /* One for parent inode, two for dir entries */
4773 num_bytes = 3 * root->leafsize;
4774 ret = btrfs_qgroup_reserve(root, num_bytes);
4781 *qgroup_reserved = num_bytes;
4783 num_bytes = btrfs_calc_trans_metadata_size(root, items);
4784 rsv->space_info = __find_space_info(root->fs_info,
4785 BTRFS_BLOCK_GROUP_METADATA);
4786 ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4787 BTRFS_RESERVE_FLUSH_ALL);
4789 if (ret == -ENOSPC && use_global_rsv)
4790 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4793 if (*qgroup_reserved)
4794 btrfs_qgroup_free(root, *qgroup_reserved);
4800 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4801 struct btrfs_block_rsv *rsv,
4802 u64 qgroup_reserved)
4804 btrfs_block_rsv_release(root, rsv, (u64)-1);
4805 if (qgroup_reserved)
4806 btrfs_qgroup_free(root, qgroup_reserved);
4810 * drop_outstanding_extent - drop an outstanding extent
4811 * @inode: the inode we're dropping the extent for
4813 * This is called when we are freeing up an outstanding extent, either called
4814 * after an error or after an extent is written. This will return the number of
4815 * reserved extents that need to be freed. This must be called with
4816 * BTRFS_I(inode)->lock held.
4818 static unsigned drop_outstanding_extent(struct inode *inode)
4820 unsigned drop_inode_space = 0;
4821 unsigned dropped_extents = 0;
4823 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4824 BTRFS_I(inode)->outstanding_extents--;
4826 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4827 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4828 &BTRFS_I(inode)->runtime_flags))
4829 drop_inode_space = 1;
4832 * If we have more or the same amount of outsanding extents than we have
4833 * reserved then we need to leave the reserved extents count alone.
4835 if (BTRFS_I(inode)->outstanding_extents >=
4836 BTRFS_I(inode)->reserved_extents)
4837 return drop_inode_space;
4839 dropped_extents = BTRFS_I(inode)->reserved_extents -
4840 BTRFS_I(inode)->outstanding_extents;
4841 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4842 return dropped_extents + drop_inode_space;
4846 * calc_csum_metadata_size - return the amount of metada space that must be
4847 * reserved/free'd for the given bytes.
4848 * @inode: the inode we're manipulating
4849 * @num_bytes: the number of bytes in question
4850 * @reserve: 1 if we are reserving space, 0 if we are freeing space
4852 * This adjusts the number of csum_bytes in the inode and then returns the
4853 * correct amount of metadata that must either be reserved or freed. We
4854 * calculate how many checksums we can fit into one leaf and then divide the
4855 * number of bytes that will need to be checksumed by this value to figure out
4856 * how many checksums will be required. If we are adding bytes then the number
4857 * may go up and we will return the number of additional bytes that must be
4858 * reserved. If it is going down we will return the number of bytes that must
4861 * This must be called with BTRFS_I(inode)->lock held.
4863 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4866 struct btrfs_root *root = BTRFS_I(inode)->root;
4868 int num_csums_per_leaf;
4872 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4873 BTRFS_I(inode)->csum_bytes == 0)
4876 old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4878 BTRFS_I(inode)->csum_bytes += num_bytes;
4880 BTRFS_I(inode)->csum_bytes -= num_bytes;
4881 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4882 num_csums_per_leaf = (int)div64_u64(csum_size,
4883 sizeof(struct btrfs_csum_item) +
4884 sizeof(struct btrfs_disk_key));
4885 num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4886 num_csums = num_csums + num_csums_per_leaf - 1;
4887 num_csums = num_csums / num_csums_per_leaf;
4889 old_csums = old_csums + num_csums_per_leaf - 1;
4890 old_csums = old_csums / num_csums_per_leaf;
4892 /* No change, no need to reserve more */
4893 if (old_csums == num_csums)
4897 return btrfs_calc_trans_metadata_size(root,
4898 num_csums - old_csums);
4900 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4903 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4905 struct btrfs_root *root = BTRFS_I(inode)->root;
4906 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4909 unsigned nr_extents = 0;
4910 int extra_reserve = 0;
4911 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4913 bool delalloc_lock = true;
4917 /* If we are a free space inode we need to not flush since we will be in
4918 * the middle of a transaction commit. We also don't need the delalloc
4919 * mutex since we won't race with anybody. We need this mostly to make
4920 * lockdep shut its filthy mouth.
4922 if (btrfs_is_free_space_inode(inode)) {
4923 flush = BTRFS_RESERVE_NO_FLUSH;
4924 delalloc_lock = false;
4927 if (flush != BTRFS_RESERVE_NO_FLUSH &&
4928 btrfs_transaction_in_commit(root->fs_info))
4929 schedule_timeout(1);
4932 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4934 num_bytes = ALIGN(num_bytes, root->sectorsize);
4936 spin_lock(&BTRFS_I(inode)->lock);
4937 BTRFS_I(inode)->outstanding_extents++;
4939 if (BTRFS_I(inode)->outstanding_extents >
4940 BTRFS_I(inode)->reserved_extents)
4941 nr_extents = BTRFS_I(inode)->outstanding_extents -
4942 BTRFS_I(inode)->reserved_extents;
4945 * Add an item to reserve for updating the inode when we complete the
4948 if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4949 &BTRFS_I(inode)->runtime_flags)) {
4954 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4955 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4956 csum_bytes = BTRFS_I(inode)->csum_bytes;
4957 spin_unlock(&BTRFS_I(inode)->lock);
4959 if (root->fs_info->quota_enabled) {
4960 ret = btrfs_qgroup_reserve(root, num_bytes +
4961 nr_extents * root->leafsize);
4966 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4967 if (unlikely(ret)) {
4968 if (root->fs_info->quota_enabled)
4969 btrfs_qgroup_free(root, num_bytes +
4970 nr_extents * root->leafsize);
4974 spin_lock(&BTRFS_I(inode)->lock);
4975 if (extra_reserve) {
4976 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4977 &BTRFS_I(inode)->runtime_flags);
4980 BTRFS_I(inode)->reserved_extents += nr_extents;
4981 spin_unlock(&BTRFS_I(inode)->lock);
4984 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4987 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4988 btrfs_ino(inode), to_reserve, 1);
4989 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4994 spin_lock(&BTRFS_I(inode)->lock);
4995 dropped = drop_outstanding_extent(inode);
4997 * If the inodes csum_bytes is the same as the original
4998 * csum_bytes then we know we haven't raced with any free()ers
4999 * so we can just reduce our inodes csum bytes and carry on.
5001 if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5002 calc_csum_metadata_size(inode, num_bytes, 0);
5004 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5008 * This is tricky, but first we need to figure out how much we
5009 * free'd from any free-ers that occured during this
5010 * reservation, so we reset ->csum_bytes to the csum_bytes
5011 * before we dropped our lock, and then call the free for the
5012 * number of bytes that were freed while we were trying our
5015 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5016 BTRFS_I(inode)->csum_bytes = csum_bytes;
5017 to_free = calc_csum_metadata_size(inode, bytes, 0);
5021 * Now we need to see how much we would have freed had we not
5022 * been making this reservation and our ->csum_bytes were not
5023 * artificially inflated.
5025 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5026 bytes = csum_bytes - orig_csum_bytes;
5027 bytes = calc_csum_metadata_size(inode, bytes, 0);
5030 * Now reset ->csum_bytes to what it should be. If bytes is
5031 * more than to_free then we would have free'd more space had we
5032 * not had an artificially high ->csum_bytes, so we need to free
5033 * the remainder. If bytes is the same or less then we don't
5034 * need to do anything, the other free-ers did the correct
5037 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5038 if (bytes > to_free)
5039 to_free = bytes - to_free;
5043 spin_unlock(&BTRFS_I(inode)->lock);
5045 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5048 btrfs_block_rsv_release(root, block_rsv, to_free);
5049 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5050 btrfs_ino(inode), to_free, 0);
5053 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5058 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5059 * @inode: the inode to release the reservation for
5060 * @num_bytes: the number of bytes we're releasing
5062 * This will release the metadata reservation for an inode. This can be called
5063 * once we complete IO for a given set of bytes to release their metadata
5066 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5068 struct btrfs_root *root = BTRFS_I(inode)->root;
5072 num_bytes = ALIGN(num_bytes, root->sectorsize);
5073 spin_lock(&BTRFS_I(inode)->lock);
5074 dropped = drop_outstanding_extent(inode);
5077 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5078 spin_unlock(&BTRFS_I(inode)->lock);
5080 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5082 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5083 btrfs_ino(inode), to_free, 0);
5084 if (root->fs_info->quota_enabled) {
5085 btrfs_qgroup_free(root, num_bytes +
5086 dropped * root->leafsize);
5089 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5094 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5095 * @inode: inode we're writing to
5096 * @num_bytes: the number of bytes we want to allocate
5098 * This will do the following things
5100 * o reserve space in the data space info for num_bytes
5101 * o reserve space in the metadata space info based on number of outstanding
5102 * extents and how much csums will be needed
5103 * o add to the inodes ->delalloc_bytes
5104 * o add it to the fs_info's delalloc inodes list.
5106 * This will return 0 for success and -ENOSPC if there is no space left.
5108 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5112 ret = btrfs_check_data_free_space(inode, num_bytes);
5116 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5118 btrfs_free_reserved_data_space(inode, num_bytes);
5126 * btrfs_delalloc_release_space - release data and metadata space for delalloc
5127 * @inode: inode we're releasing space for
5128 * @num_bytes: the number of bytes we want to free up
5130 * This must be matched with a call to btrfs_delalloc_reserve_space. This is
5131 * called in the case that we don't need the metadata AND data reservations
5132 * anymore. So if there is an error or we insert an inline extent.
5134 * This function will release the metadata space that was not used and will
5135 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5136 * list if there are no delalloc bytes left.
5138 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5140 btrfs_delalloc_release_metadata(inode, num_bytes);
5141 btrfs_free_reserved_data_space(inode, num_bytes);
5144 static int update_block_group(struct btrfs_root *root,
5145 u64 bytenr, u64 num_bytes, int alloc)
5147 struct btrfs_block_group_cache *cache = NULL;
5148 struct btrfs_fs_info *info = root->fs_info;
5149 u64 total = num_bytes;
5154 /* block accounting for super block */
5155 spin_lock(&info->delalloc_root_lock);
5156 old_val = btrfs_super_bytes_used(info->super_copy);
5158 old_val += num_bytes;
5160 old_val -= num_bytes;
5161 btrfs_set_super_bytes_used(info->super_copy, old_val);
5162 spin_unlock(&info->delalloc_root_lock);
5165 cache = btrfs_lookup_block_group(info, bytenr);
5168 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5169 BTRFS_BLOCK_GROUP_RAID1 |
5170 BTRFS_BLOCK_GROUP_RAID10))
5175 * If this block group has free space cache written out, we
5176 * need to make sure to load it if we are removing space. This
5177 * is because we need the unpinning stage to actually add the
5178 * space back to the block group, otherwise we will leak space.
5180 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5181 cache_block_group(cache, 1);
5183 byte_in_group = bytenr - cache->key.objectid;
5184 WARN_ON(byte_in_group > cache->key.offset);
5186 spin_lock(&cache->space_info->lock);
5187 spin_lock(&cache->lock);
5189 if (btrfs_test_opt(root, SPACE_CACHE) &&
5190 cache->disk_cache_state < BTRFS_DC_CLEAR)
5191 cache->disk_cache_state = BTRFS_DC_CLEAR;
5194 old_val = btrfs_block_group_used(&cache->item);
5195 num_bytes = min(total, cache->key.offset - byte_in_group);
5197 old_val += num_bytes;
5198 btrfs_set_block_group_used(&cache->item, old_val);
5199 cache->reserved -= num_bytes;
5200 cache->space_info->bytes_reserved -= num_bytes;
5201 cache->space_info->bytes_used += num_bytes;
5202 cache->space_info->disk_used += num_bytes * factor;
5203 spin_unlock(&cache->lock);
5204 spin_unlock(&cache->space_info->lock);
5206 old_val -= num_bytes;
5207 btrfs_set_block_group_used(&cache->item, old_val);
5208 cache->pinned += num_bytes;
5209 cache->space_info->bytes_pinned += num_bytes;
5210 cache->space_info->bytes_used -= num_bytes;
5211 cache->space_info->disk_used -= num_bytes * factor;
5212 spin_unlock(&cache->lock);
5213 spin_unlock(&cache->space_info->lock);
5215 set_extent_dirty(info->pinned_extents,
5216 bytenr, bytenr + num_bytes - 1,
5217 GFP_NOFS | __GFP_NOFAIL);
5219 btrfs_put_block_group(cache);
5221 bytenr += num_bytes;
5226 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5228 struct btrfs_block_group_cache *cache;
5231 spin_lock(&root->fs_info->block_group_cache_lock);
5232 bytenr = root->fs_info->first_logical_byte;
5233 spin_unlock(&root->fs_info->block_group_cache_lock);
5235 if (bytenr < (u64)-1)
5238 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5242 bytenr = cache->key.objectid;
5243 btrfs_put_block_group(cache);
5248 static int pin_down_extent(struct btrfs_root *root,
5249 struct btrfs_block_group_cache *cache,
5250 u64 bytenr, u64 num_bytes, int reserved)
5252 spin_lock(&cache->space_info->lock);
5253 spin_lock(&cache->lock);
5254 cache->pinned += num_bytes;
5255 cache->space_info->bytes_pinned += num_bytes;
5257 cache->reserved -= num_bytes;
5258 cache->space_info->bytes_reserved -= num_bytes;
5260 spin_unlock(&cache->lock);
5261 spin_unlock(&cache->space_info->lock);
5263 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5264 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5269 * this function must be called within transaction
5271 int btrfs_pin_extent(struct btrfs_root *root,
5272 u64 bytenr, u64 num_bytes, int reserved)
5274 struct btrfs_block_group_cache *cache;
5276 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5277 BUG_ON(!cache); /* Logic error */
5279 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5281 btrfs_put_block_group(cache);
5286 * this function must be called within transaction
5288 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5289 u64 bytenr, u64 num_bytes)
5291 struct btrfs_block_group_cache *cache;
5294 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5299 * pull in the free space cache (if any) so that our pin
5300 * removes the free space from the cache. We have load_only set
5301 * to one because the slow code to read in the free extents does check
5302 * the pinned extents.
5304 cache_block_group(cache, 1);
5306 pin_down_extent(root, cache, bytenr, num_bytes, 0);
5308 /* remove us from the free space cache (if we're there at all) */
5309 ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5310 btrfs_put_block_group(cache);
5314 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5317 struct btrfs_block_group_cache *block_group;
5318 struct btrfs_caching_control *caching_ctl;
5320 block_group = btrfs_lookup_block_group(root->fs_info, start);
5324 cache_block_group(block_group, 0);
5325 caching_ctl = get_caching_control(block_group);
5329 BUG_ON(!block_group_cache_done(block_group));
5330 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5332 mutex_lock(&caching_ctl->mutex);
5334 if (start >= caching_ctl->progress) {
5335 ret = add_excluded_extent(root, start, num_bytes);
5336 } else if (start + num_bytes <= caching_ctl->progress) {
5337 ret = btrfs_remove_free_space(block_group,
5340 num_bytes = caching_ctl->progress - start;
5341 ret = btrfs_remove_free_space(block_group,
5346 num_bytes = (start + num_bytes) -
5347 caching_ctl->progress;
5348 start = caching_ctl->progress;
5349 ret = add_excluded_extent(root, start, num_bytes);
5352 mutex_unlock(&caching_ctl->mutex);
5353 put_caching_control(caching_ctl);
5355 btrfs_put_block_group(block_group);
5359 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5360 struct extent_buffer *eb)
5362 struct btrfs_file_extent_item *item;
5363 struct btrfs_key key;
5367 if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5370 for (i = 0; i < btrfs_header_nritems(eb); i++) {
5371 btrfs_item_key_to_cpu(eb, &key, i);
5372 if (key.type != BTRFS_EXTENT_DATA_KEY)
5374 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5375 found_type = btrfs_file_extent_type(eb, item);
5376 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5378 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5380 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5381 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5382 __exclude_logged_extent(log, key.objectid, key.offset);
5389 * btrfs_update_reserved_bytes - update the block_group and space info counters
5390 * @cache: The cache we are manipulating
5391 * @num_bytes: The number of bytes in question
5392 * @reserve: One of the reservation enums
5394 * This is called by the allocator when it reserves space, or by somebody who is
5395 * freeing space that was never actually used on disk. For example if you
5396 * reserve some space for a new leaf in transaction A and before transaction A
5397 * commits you free that leaf, you call this with reserve set to 0 in order to
5398 * clear the reservation.
5400 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5401 * ENOSPC accounting. For data we handle the reservation through clearing the
5402 * delalloc bits in the io_tree. We have to do this since we could end up
5403 * allocating less disk space for the amount of data we have reserved in the
5404 * case of compression.
5406 * If this is a reservation and the block group has become read only we cannot
5407 * make the reservation and return -EAGAIN, otherwise this function always
5410 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5411 u64 num_bytes, int reserve)
5413 struct btrfs_space_info *space_info = cache->space_info;
5416 spin_lock(&space_info->lock);
5417 spin_lock(&cache->lock);
5418 if (reserve != RESERVE_FREE) {
5422 cache->reserved += num_bytes;
5423 space_info->bytes_reserved += num_bytes;
5424 if (reserve == RESERVE_ALLOC) {
5425 trace_btrfs_space_reservation(cache->fs_info,
5426 "space_info", space_info->flags,
5428 space_info->bytes_may_use -= num_bytes;
5433 space_info->bytes_readonly += num_bytes;
5434 cache->reserved -= num_bytes;
5435 space_info->bytes_reserved -= num_bytes;
5437 spin_unlock(&cache->lock);
5438 spin_unlock(&space_info->lock);
5442 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5443 struct btrfs_root *root)
5445 struct btrfs_fs_info *fs_info = root->fs_info;
5446 struct btrfs_caching_control *next;
5447 struct btrfs_caching_control *caching_ctl;
5448 struct btrfs_block_group_cache *cache;
5449 struct btrfs_space_info *space_info;
5451 down_write(&fs_info->extent_commit_sem);
5453 list_for_each_entry_safe(caching_ctl, next,
5454 &fs_info->caching_block_groups, list) {
5455 cache = caching_ctl->block_group;
5456 if (block_group_cache_done(cache)) {
5457 cache->last_byte_to_unpin = (u64)-1;
5458 list_del_init(&caching_ctl->list);
5459 put_caching_control(caching_ctl);
5461 cache->last_byte_to_unpin = caching_ctl->progress;
5465 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5466 fs_info->pinned_extents = &fs_info->freed_extents[1];
5468 fs_info->pinned_extents = &fs_info->freed_extents[0];
5470 up_write(&fs_info->extent_commit_sem);
5472 list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5473 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5475 update_global_block_rsv(fs_info);
5478 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5480 struct btrfs_fs_info *fs_info = root->fs_info;
5481 struct btrfs_block_group_cache *cache = NULL;
5482 struct btrfs_space_info *space_info;
5483 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5487 while (start <= end) {
5490 start >= cache->key.objectid + cache->key.offset) {
5492 btrfs_put_block_group(cache);
5493 cache = btrfs_lookup_block_group(fs_info, start);
5494 BUG_ON(!cache); /* Logic error */
5497 len = cache->key.objectid + cache->key.offset - start;
5498 len = min(len, end + 1 - start);
5500 if (start < cache->last_byte_to_unpin) {
5501 len = min(len, cache->last_byte_to_unpin - start);
5502 btrfs_add_free_space(cache, start, len);
5506 space_info = cache->space_info;
5508 spin_lock(&space_info->lock);
5509 spin_lock(&cache->lock);
5510 cache->pinned -= len;
5511 space_info->bytes_pinned -= len;
5513 space_info->bytes_readonly += len;
5516 spin_unlock(&cache->lock);
5517 if (!readonly && global_rsv->space_info == space_info) {
5518 spin_lock(&global_rsv->lock);
5519 if (!global_rsv->full) {
5520 len = min(len, global_rsv->size -
5521 global_rsv->reserved);
5522 global_rsv->reserved += len;
5523 space_info->bytes_may_use += len;
5524 if (global_rsv->reserved >= global_rsv->size)
5525 global_rsv->full = 1;
5527 spin_unlock(&global_rsv->lock);
5529 spin_unlock(&space_info->lock);
5533 btrfs_put_block_group(cache);
5537 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5538 struct btrfs_root *root)
5540 struct btrfs_fs_info *fs_info = root->fs_info;
5541 struct extent_io_tree *unpin;
5549 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5550 unpin = &fs_info->freed_extents[1];
5552 unpin = &fs_info->freed_extents[0];
5555 ret = find_first_extent_bit(unpin, 0, &start, &end,
5556 EXTENT_DIRTY, NULL);
5560 if (btrfs_test_opt(root, DISCARD))
5561 ret = btrfs_discard_extent(root, start,
5562 end + 1 - start, NULL);
5564 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5565 unpin_extent_range(root, start, end);
5572 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5573 u64 owner, u64 root_objectid)
5575 struct btrfs_space_info *space_info;
5578 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5579 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5580 flags = BTRFS_BLOCK_GROUP_SYSTEM;
5582 flags = BTRFS_BLOCK_GROUP_METADATA;
5584 flags = BTRFS_BLOCK_GROUP_DATA;
5587 space_info = __find_space_info(fs_info, flags);
5588 BUG_ON(!space_info); /* Logic bug */
5589 percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5593 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5594 struct btrfs_root *root,
5595 u64 bytenr, u64 num_bytes, u64 parent,
5596 u64 root_objectid, u64 owner_objectid,
5597 u64 owner_offset, int refs_to_drop,
5598 struct btrfs_delayed_extent_op *extent_op)
5600 struct btrfs_key key;
5601 struct btrfs_path *path;
5602 struct btrfs_fs_info *info = root->fs_info;
5603 struct btrfs_root *extent_root = info->extent_root;
5604 struct extent_buffer *leaf;
5605 struct btrfs_extent_item *ei;
5606 struct btrfs_extent_inline_ref *iref;
5609 int extent_slot = 0;
5610 int found_extent = 0;
5614 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5617 path = btrfs_alloc_path();
5622 path->leave_spinning = 1;
5624 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5625 BUG_ON(!is_data && refs_to_drop != 1);
5628 skinny_metadata = 0;
5630 ret = lookup_extent_backref(trans, extent_root, path, &iref,
5631 bytenr, num_bytes, parent,
5632 root_objectid, owner_objectid,
5635 extent_slot = path->slots[0];
5636 while (extent_slot >= 0) {
5637 btrfs_item_key_to_cpu(path->nodes[0], &key,
5639 if (key.objectid != bytenr)
5641 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5642 key.offset == num_bytes) {
5646 if (key.type == BTRFS_METADATA_ITEM_KEY &&
5647 key.offset == owner_objectid) {
5651 if (path->slots[0] - extent_slot > 5)
5655 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5656 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5657 if (found_extent && item_size < sizeof(*ei))
5660 if (!found_extent) {
5662 ret = remove_extent_backref(trans, extent_root, path,
5666 btrfs_abort_transaction(trans, extent_root, ret);
5669 btrfs_release_path(path);
5670 path->leave_spinning = 1;
5672 key.objectid = bytenr;
5673 key.type = BTRFS_EXTENT_ITEM_KEY;
5674 key.offset = num_bytes;
5676 if (!is_data && skinny_metadata) {
5677 key.type = BTRFS_METADATA_ITEM_KEY;
5678 key.offset = owner_objectid;
5681 ret = btrfs_search_slot(trans, extent_root,
5683 if (ret > 0 && skinny_metadata && path->slots[0]) {
5685 * Couldn't find our skinny metadata item,
5686 * see if we have ye olde extent item.
5689 btrfs_item_key_to_cpu(path->nodes[0], &key,
5691 if (key.objectid == bytenr &&
5692 key.type == BTRFS_EXTENT_ITEM_KEY &&
5693 key.offset == num_bytes)
5697 if (ret > 0 && skinny_metadata) {
5698 skinny_metadata = false;
5699 key.type = BTRFS_EXTENT_ITEM_KEY;
5700 key.offset = num_bytes;
5701 btrfs_release_path(path);
5702 ret = btrfs_search_slot(trans, extent_root,
5707 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5710 btrfs_print_leaf(extent_root,
5714 btrfs_abort_transaction(trans, extent_root, ret);
5717 extent_slot = path->slots[0];
5719 } else if (ret == -ENOENT) {
5720 btrfs_print_leaf(extent_root, path->nodes[0]);
5723 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
5724 bytenr, parent, root_objectid, owner_objectid,
5727 btrfs_abort_transaction(trans, extent_root, ret);
5731 leaf = path->nodes[0];
5732 item_size = btrfs_item_size_nr(leaf, extent_slot);
5733 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5734 if (item_size < sizeof(*ei)) {
5735 BUG_ON(found_extent || extent_slot != path->slots[0]);
5736 ret = convert_extent_item_v0(trans, extent_root, path,
5739 btrfs_abort_transaction(trans, extent_root, ret);
5743 btrfs_release_path(path);
5744 path->leave_spinning = 1;
5746 key.objectid = bytenr;
5747 key.type = BTRFS_EXTENT_ITEM_KEY;
5748 key.offset = num_bytes;
5750 ret = btrfs_search_slot(trans, extent_root, &key, path,
5753 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5755 btrfs_print_leaf(extent_root, path->nodes[0]);
5758 btrfs_abort_transaction(trans, extent_root, ret);
5762 extent_slot = path->slots[0];
5763 leaf = path->nodes[0];
5764 item_size = btrfs_item_size_nr(leaf, extent_slot);
5767 BUG_ON(item_size < sizeof(*ei));
5768 ei = btrfs_item_ptr(leaf, extent_slot,
5769 struct btrfs_extent_item);
5770 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5771 key.type == BTRFS_EXTENT_ITEM_KEY) {
5772 struct btrfs_tree_block_info *bi;
5773 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5774 bi = (struct btrfs_tree_block_info *)(ei + 1);
5775 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5778 refs = btrfs_extent_refs(leaf, ei);
5779 if (refs < refs_to_drop) {
5780 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5781 "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5783 btrfs_abort_transaction(trans, extent_root, ret);
5786 refs -= refs_to_drop;
5790 __run_delayed_extent_op(extent_op, leaf, ei);
5792 * In the case of inline back ref, reference count will
5793 * be updated by remove_extent_backref
5796 BUG_ON(!found_extent);
5798 btrfs_set_extent_refs(leaf, ei, refs);
5799 btrfs_mark_buffer_dirty(leaf);
5802 ret = remove_extent_backref(trans, extent_root, path,
5806 btrfs_abort_transaction(trans, extent_root, ret);
5810 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5814 BUG_ON(is_data && refs_to_drop !=
5815 extent_data_ref_count(root, path, iref));
5817 BUG_ON(path->slots[0] != extent_slot);
5819 BUG_ON(path->slots[0] != extent_slot + 1);
5820 path->slots[0] = extent_slot;
5825 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5828 btrfs_abort_transaction(trans, extent_root, ret);
5831 btrfs_release_path(path);
5834 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5836 btrfs_abort_transaction(trans, extent_root, ret);
5841 ret = update_block_group(root, bytenr, num_bytes, 0);
5843 btrfs_abort_transaction(trans, extent_root, ret);
5848 btrfs_free_path(path);
5853 * when we free an block, it is possible (and likely) that we free the last
5854 * delayed ref for that extent as well. This searches the delayed ref tree for
5855 * a given extent, and if there are no other delayed refs to be processed, it
5856 * removes it from the tree.
5858 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5859 struct btrfs_root *root, u64 bytenr)
5861 struct btrfs_delayed_ref_head *head;
5862 struct btrfs_delayed_ref_root *delayed_refs;
5863 struct btrfs_delayed_ref_node *ref;
5864 struct rb_node *node;
5867 delayed_refs = &trans->transaction->delayed_refs;
5868 spin_lock(&delayed_refs->lock);
5869 head = btrfs_find_delayed_ref_head(trans, bytenr);
5873 node = rb_prev(&head->node.rb_node);
5877 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5879 /* there are still entries for this ref, we can't drop it */
5880 if (ref->bytenr == bytenr)
5883 if (head->extent_op) {
5884 if (!head->must_insert_reserved)
5886 btrfs_free_delayed_extent_op(head->extent_op);
5887 head->extent_op = NULL;
5891 * waiting for the lock here would deadlock. If someone else has it
5892 * locked they are already in the process of dropping it anyway
5894 if (!mutex_trylock(&head->mutex))
5898 * at this point we have a head with no other entries. Go
5899 * ahead and process it.
5901 head->node.in_tree = 0;
5902 rb_erase(&head->node.rb_node, &delayed_refs->root);
5904 delayed_refs->num_entries--;
5907 * we don't take a ref on the node because we're removing it from the
5908 * tree, so we just steal the ref the tree was holding.
5910 delayed_refs->num_heads--;
5911 if (list_empty(&head->cluster))
5912 delayed_refs->num_heads_ready--;
5914 list_del_init(&head->cluster);
5915 spin_unlock(&delayed_refs->lock);
5917 BUG_ON(head->extent_op);
5918 if (head->must_insert_reserved)
5921 mutex_unlock(&head->mutex);
5922 btrfs_put_delayed_ref(&head->node);
5925 spin_unlock(&delayed_refs->lock);
5929 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5930 struct btrfs_root *root,
5931 struct extent_buffer *buf,
5932 u64 parent, int last_ref)
5934 struct btrfs_block_group_cache *cache = NULL;
5938 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5939 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5940 buf->start, buf->len,
5941 parent, root->root_key.objectid,
5942 btrfs_header_level(buf),
5943 BTRFS_DROP_DELAYED_REF, NULL, 0);
5944 BUG_ON(ret); /* -ENOMEM */
5950 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5952 if (btrfs_header_generation(buf) == trans->transid) {
5953 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5954 ret = check_ref_cleanup(trans, root, buf->start);
5959 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5960 pin_down_extent(root, cache, buf->start, buf->len, 1);
5964 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5966 btrfs_add_free_space(cache, buf->start, buf->len);
5967 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5972 add_pinned_bytes(root->fs_info, buf->len,
5973 btrfs_header_level(buf),
5974 root->root_key.objectid);
5977 * Deleting the buffer, clear the corrupt flag since it doesn't matter
5980 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5981 btrfs_put_block_group(cache);
5984 /* Can return -ENOMEM */
5985 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5986 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5987 u64 owner, u64 offset, int for_cow)
5990 struct btrfs_fs_info *fs_info = root->fs_info;
5992 add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
5995 * tree log blocks never actually go into the extent allocation
5996 * tree, just update pinning info and exit early.
5998 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5999 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6000 /* unlocks the pinned mutex */
6001 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6003 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6004 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6006 parent, root_objectid, (int)owner,
6007 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6009 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6011 parent, root_objectid, owner,
6012 offset, BTRFS_DROP_DELAYED_REF,
6018 static u64 stripe_align(struct btrfs_root *root,
6019 struct btrfs_block_group_cache *cache,
6020 u64 val, u64 num_bytes)
6022 u64 ret = ALIGN(val, root->stripesize);
6027 * when we wait for progress in the block group caching, its because
6028 * our allocation attempt failed at least once. So, we must sleep
6029 * and let some progress happen before we try again.
6031 * This function will sleep at least once waiting for new free space to
6032 * show up, and then it will check the block group free space numbers
6033 * for our min num_bytes. Another option is to have it go ahead
6034 * and look in the rbtree for a free extent of a given size, but this
6037 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6038 * any of the information in this block group.
6040 static noinline void
6041 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6044 struct btrfs_caching_control *caching_ctl;
6046 caching_ctl = get_caching_control(cache);
6050 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6051 (cache->free_space_ctl->free_space >= num_bytes));
6053 put_caching_control(caching_ctl);
6057 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6059 struct btrfs_caching_control *caching_ctl;
6062 caching_ctl = get_caching_control(cache);
6064 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6066 wait_event(caching_ctl->wait, block_group_cache_done(cache));
6067 if (cache->cached == BTRFS_CACHE_ERROR)
6069 put_caching_control(caching_ctl);
6073 int __get_raid_index(u64 flags)
6075 if (flags & BTRFS_BLOCK_GROUP_RAID10)
6076 return BTRFS_RAID_RAID10;
6077 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6078 return BTRFS_RAID_RAID1;
6079 else if (flags & BTRFS_BLOCK_GROUP_DUP)
6080 return BTRFS_RAID_DUP;
6081 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6082 return BTRFS_RAID_RAID0;
6083 else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6084 return BTRFS_RAID_RAID5;
6085 else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6086 return BTRFS_RAID_RAID6;
6088 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6091 static int get_block_group_index(struct btrfs_block_group_cache *cache)
6093 return __get_raid_index(cache->flags);
6096 enum btrfs_loop_type {
6097 LOOP_CACHING_NOWAIT = 0,
6098 LOOP_CACHING_WAIT = 1,
6099 LOOP_ALLOC_CHUNK = 2,
6100 LOOP_NO_EMPTY_SIZE = 3,
6104 * walks the btree of allocated extents and find a hole of a given size.
6105 * The key ins is changed to record the hole:
6106 * ins->objectid == start position
6107 * ins->flags = BTRFS_EXTENT_ITEM_KEY
6108 * ins->offset == the size of the hole.
6109 * Any available blocks before search_start are skipped.
6111 * If there is no suitable free space, we will record the max size of
6112 * the free space extent currently.
6114 static noinline int find_free_extent(struct btrfs_root *orig_root,
6115 u64 num_bytes, u64 empty_size,
6116 u64 hint_byte, struct btrfs_key *ins,
6120 struct btrfs_root *root = orig_root->fs_info->extent_root;
6121 struct btrfs_free_cluster *last_ptr = NULL;
6122 struct btrfs_block_group_cache *block_group = NULL;
6123 struct btrfs_block_group_cache *used_block_group;
6124 u64 search_start = 0;
6125 u64 max_extent_size = 0;
6126 int empty_cluster = 2 * 1024 * 1024;
6127 struct btrfs_space_info *space_info;
6129 int index = __get_raid_index(flags);
6130 int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6131 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6132 bool found_uncached_bg = false;
6133 bool failed_cluster_refill = false;
6134 bool failed_alloc = false;
6135 bool use_cluster = true;
6136 bool have_caching_bg = false;
6138 WARN_ON(num_bytes < root->sectorsize);
6139 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6143 trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6145 space_info = __find_space_info(root->fs_info, flags);
6147 btrfs_err(root->fs_info, "No space info for %llu", flags);
6152 * If the space info is for both data and metadata it means we have a
6153 * small filesystem and we can't use the clustering stuff.
6155 if (btrfs_mixed_space_info(space_info))
6156 use_cluster = false;
6158 if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6159 last_ptr = &root->fs_info->meta_alloc_cluster;
6160 if (!btrfs_test_opt(root, SSD))
6161 empty_cluster = 64 * 1024;
6164 if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6165 btrfs_test_opt(root, SSD)) {
6166 last_ptr = &root->fs_info->data_alloc_cluster;
6170 spin_lock(&last_ptr->lock);
6171 if (last_ptr->block_group)
6172 hint_byte = last_ptr->window_start;
6173 spin_unlock(&last_ptr->lock);
6176 search_start = max(search_start, first_logical_byte(root, 0));
6177 search_start = max(search_start, hint_byte);
6182 if (search_start == hint_byte) {
6183 block_group = btrfs_lookup_block_group(root->fs_info,
6185 used_block_group = block_group;
6187 * we don't want to use the block group if it doesn't match our
6188 * allocation bits, or if its not cached.
6190 * However if we are re-searching with an ideal block group
6191 * picked out then we don't care that the block group is cached.
6193 if (block_group && block_group_bits(block_group, flags) &&
6194 block_group->cached != BTRFS_CACHE_NO) {
6195 down_read(&space_info->groups_sem);
6196 if (list_empty(&block_group->list) ||
6199 * someone is removing this block group,
6200 * we can't jump into the have_block_group
6201 * target because our list pointers are not
6204 btrfs_put_block_group(block_group);
6205 up_read(&space_info->groups_sem);
6207 index = get_block_group_index(block_group);
6208 goto have_block_group;
6210 } else if (block_group) {
6211 btrfs_put_block_group(block_group);
6215 have_caching_bg = false;
6216 down_read(&space_info->groups_sem);
6217 list_for_each_entry(block_group, &space_info->block_groups[index],
6222 used_block_group = block_group;
6223 btrfs_get_block_group(block_group);
6224 search_start = block_group->key.objectid;
6227 * this can happen if we end up cycling through all the
6228 * raid types, but we want to make sure we only allocate
6229 * for the proper type.
6231 if (!block_group_bits(block_group, flags)) {
6232 u64 extra = BTRFS_BLOCK_GROUP_DUP |
6233 BTRFS_BLOCK_GROUP_RAID1 |
6234 BTRFS_BLOCK_GROUP_RAID5 |
6235 BTRFS_BLOCK_GROUP_RAID6 |
6236 BTRFS_BLOCK_GROUP_RAID10;
6239 * if they asked for extra copies and this block group
6240 * doesn't provide them, bail. This does allow us to
6241 * fill raid0 from raid1.
6243 if ((flags & extra) && !(block_group->flags & extra))
6248 cached = block_group_cache_done(block_group);
6249 if (unlikely(!cached)) {
6250 found_uncached_bg = true;
6251 ret = cache_block_group(block_group, 0);
6256 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6258 if (unlikely(block_group->ro))
6262 * Ok we want to try and use the cluster allocator, so
6266 unsigned long aligned_cluster;
6268 * the refill lock keeps out other
6269 * people trying to start a new cluster
6271 spin_lock(&last_ptr->refill_lock);
6272 used_block_group = last_ptr->block_group;
6273 if (used_block_group != block_group &&
6274 (!used_block_group ||
6275 used_block_group->ro ||
6276 !block_group_bits(used_block_group, flags))) {
6277 used_block_group = block_group;
6278 goto refill_cluster;
6281 if (used_block_group != block_group)
6282 btrfs_get_block_group(used_block_group);
6284 offset = btrfs_alloc_from_cluster(used_block_group,
6287 used_block_group->key.objectid,
6290 /* we have a block, we're done */
6291 spin_unlock(&last_ptr->refill_lock);
6292 trace_btrfs_reserve_extent_cluster(root,
6293 block_group, search_start, num_bytes);
6297 WARN_ON(last_ptr->block_group != used_block_group);
6298 if (used_block_group != block_group) {
6299 btrfs_put_block_group(used_block_group);
6300 used_block_group = block_group;
6303 BUG_ON(used_block_group != block_group);
6304 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6305 * set up a new clusters, so lets just skip it
6306 * and let the allocator find whatever block
6307 * it can find. If we reach this point, we
6308 * will have tried the cluster allocator
6309 * plenty of times and not have found
6310 * anything, so we are likely way too
6311 * fragmented for the clustering stuff to find
6314 * However, if the cluster is taken from the
6315 * current block group, release the cluster
6316 * first, so that we stand a better chance of
6317 * succeeding in the unclustered
6319 if (loop >= LOOP_NO_EMPTY_SIZE &&
6320 last_ptr->block_group != block_group) {
6321 spin_unlock(&last_ptr->refill_lock);
6322 goto unclustered_alloc;
6326 * this cluster didn't work out, free it and
6329 btrfs_return_cluster_to_free_space(NULL, last_ptr);
6331 if (loop >= LOOP_NO_EMPTY_SIZE) {
6332 spin_unlock(&last_ptr->refill_lock);
6333 goto unclustered_alloc;
6336 aligned_cluster = max_t(unsigned long,
6337 empty_cluster + empty_size,
6338 block_group->full_stripe_len);
6340 /* allocate a cluster in this block group */
6341 ret = btrfs_find_space_cluster(root, block_group,
6342 last_ptr, search_start,
6347 * now pull our allocation out of this
6350 offset = btrfs_alloc_from_cluster(block_group,
6356 /* we found one, proceed */
6357 spin_unlock(&last_ptr->refill_lock);
6358 trace_btrfs_reserve_extent_cluster(root,
6359 block_group, search_start,
6363 } else if (!cached && loop > LOOP_CACHING_NOWAIT
6364 && !failed_cluster_refill) {
6365 spin_unlock(&last_ptr->refill_lock);
6367 failed_cluster_refill = true;
6368 wait_block_group_cache_progress(block_group,
6369 num_bytes + empty_cluster + empty_size);
6370 goto have_block_group;
6374 * at this point we either didn't find a cluster
6375 * or we weren't able to allocate a block from our
6376 * cluster. Free the cluster we've been trying
6377 * to use, and go to the next block group
6379 btrfs_return_cluster_to_free_space(NULL, last_ptr);
6380 spin_unlock(&last_ptr->refill_lock);
6385 spin_lock(&block_group->free_space_ctl->tree_lock);
6387 block_group->free_space_ctl->free_space <
6388 num_bytes + empty_cluster + empty_size) {
6389 if (block_group->free_space_ctl->free_space >
6392 block_group->free_space_ctl->free_space;
6393 spin_unlock(&block_group->free_space_ctl->tree_lock);
6396 spin_unlock(&block_group->free_space_ctl->tree_lock);
6398 offset = btrfs_find_space_for_alloc(block_group, search_start,
6399 num_bytes, empty_size,
6402 * If we didn't find a chunk, and we haven't failed on this
6403 * block group before, and this block group is in the middle of
6404 * caching and we are ok with waiting, then go ahead and wait
6405 * for progress to be made, and set failed_alloc to true.
6407 * If failed_alloc is true then we've already waited on this
6408 * block group once and should move on to the next block group.
6410 if (!offset && !failed_alloc && !cached &&
6411 loop > LOOP_CACHING_NOWAIT) {
6412 wait_block_group_cache_progress(block_group,
6413 num_bytes + empty_size);
6414 failed_alloc = true;
6415 goto have_block_group;
6416 } else if (!offset) {
6418 have_caching_bg = true;
6422 search_start = stripe_align(root, used_block_group,
6425 /* move on to the next group */
6426 if (search_start + num_bytes >
6427 used_block_group->key.objectid + used_block_group->key.offset) {
6428 btrfs_add_free_space(used_block_group, offset, num_bytes);
6432 if (offset < search_start)
6433 btrfs_add_free_space(used_block_group, offset,
6434 search_start - offset);
6435 BUG_ON(offset > search_start);
6437 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6439 if (ret == -EAGAIN) {
6440 btrfs_add_free_space(used_block_group, offset, num_bytes);
6444 /* we are all good, lets return */
6445 ins->objectid = search_start;
6446 ins->offset = num_bytes;
6448 trace_btrfs_reserve_extent(orig_root, block_group,
6449 search_start, num_bytes);
6450 if (used_block_group != block_group)
6451 btrfs_put_block_group(used_block_group);
6452 btrfs_put_block_group(block_group);
6455 failed_cluster_refill = false;
6456 failed_alloc = false;
6457 BUG_ON(index != get_block_group_index(block_group));
6458 if (used_block_group != block_group)
6459 btrfs_put_block_group(used_block_group);
6460 btrfs_put_block_group(block_group);
6462 up_read(&space_info->groups_sem);
6464 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6467 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6471 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6472 * caching kthreads as we move along
6473 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6474 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6475 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6478 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6481 if (loop == LOOP_ALLOC_CHUNK) {
6482 struct btrfs_trans_handle *trans;
6484 trans = btrfs_join_transaction(root);
6485 if (IS_ERR(trans)) {
6486 ret = PTR_ERR(trans);
6490 ret = do_chunk_alloc(trans, root, flags,
6493 * Do not bail out on ENOSPC since we
6494 * can do more things.
6496 if (ret < 0 && ret != -ENOSPC)
6497 btrfs_abort_transaction(trans,
6501 btrfs_end_transaction(trans, root);
6506 if (loop == LOOP_NO_EMPTY_SIZE) {
6512 } else if (!ins->objectid) {
6514 } else if (ins->objectid) {
6519 ins->offset = max_extent_size;
6523 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6524 int dump_block_groups)
6526 struct btrfs_block_group_cache *cache;
6529 spin_lock(&info->lock);
6530 printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6532 info->total_bytes - info->bytes_used - info->bytes_pinned -
6533 info->bytes_reserved - info->bytes_readonly,
6534 (info->full) ? "" : "not ");
6535 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6536 "reserved=%llu, may_use=%llu, readonly=%llu\n",
6537 info->total_bytes, info->bytes_used, info->bytes_pinned,
6538 info->bytes_reserved, info->bytes_may_use,
6539 info->bytes_readonly);
6540 spin_unlock(&info->lock);
6542 if (!dump_block_groups)
6545 down_read(&info->groups_sem);
6547 list_for_each_entry(cache, &info->block_groups[index], list) {
6548 spin_lock(&cache->lock);
6549 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6550 cache->key.objectid, cache->key.offset,
6551 btrfs_block_group_used(&cache->item), cache->pinned,
6552 cache->reserved, cache->ro ? "[readonly]" : "");
6553 btrfs_dump_free_space(cache, bytes);
6554 spin_unlock(&cache->lock);
6556 if (++index < BTRFS_NR_RAID_TYPES)
6558 up_read(&info->groups_sem);
6561 int btrfs_reserve_extent(struct btrfs_root *root,
6562 u64 num_bytes, u64 min_alloc_size,
6563 u64 empty_size, u64 hint_byte,
6564 struct btrfs_key *ins, int is_data)
6566 bool final_tried = false;
6570 flags = btrfs_get_alloc_profile(root, is_data);
6572 WARN_ON(num_bytes < root->sectorsize);
6573 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6576 if (ret == -ENOSPC) {
6577 if (!final_tried && ins->offset) {
6578 num_bytes = min(num_bytes >> 1, ins->offset);
6579 num_bytes = round_down(num_bytes, root->sectorsize);
6580 num_bytes = max(num_bytes, min_alloc_size);
6581 if (num_bytes == min_alloc_size)
6584 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6585 struct btrfs_space_info *sinfo;
6587 sinfo = __find_space_info(root->fs_info, flags);
6588 btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6591 dump_space_info(sinfo, num_bytes, 1);
6595 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6600 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6601 u64 start, u64 len, int pin)
6603 struct btrfs_block_group_cache *cache;
6606 cache = btrfs_lookup_block_group(root->fs_info, start);
6608 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6613 if (btrfs_test_opt(root, DISCARD))
6614 ret = btrfs_discard_extent(root, start, len, NULL);
6617 pin_down_extent(root, cache, start, len, 1);
6619 btrfs_add_free_space(cache, start, len);
6620 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6622 btrfs_put_block_group(cache);
6624 trace_btrfs_reserved_extent_free(root, start, len);
6629 int btrfs_free_reserved_extent(struct btrfs_root *root,
6632 return __btrfs_free_reserved_extent(root, start, len, 0);
6635 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6638 return __btrfs_free_reserved_extent(root, start, len, 1);
6641 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6642 struct btrfs_root *root,
6643 u64 parent, u64 root_objectid,
6644 u64 flags, u64 owner, u64 offset,
6645 struct btrfs_key *ins, int ref_mod)
6648 struct btrfs_fs_info *fs_info = root->fs_info;
6649 struct btrfs_extent_item *extent_item;
6650 struct btrfs_extent_inline_ref *iref;
6651 struct btrfs_path *path;
6652 struct extent_buffer *leaf;
6657 type = BTRFS_SHARED_DATA_REF_KEY;
6659 type = BTRFS_EXTENT_DATA_REF_KEY;
6661 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6663 path = btrfs_alloc_path();
6667 path->leave_spinning = 1;
6668 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6671 btrfs_free_path(path);
6675 leaf = path->nodes[0];
6676 extent_item = btrfs_item_ptr(leaf, path->slots[0],
6677 struct btrfs_extent_item);
6678 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6679 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6680 btrfs_set_extent_flags(leaf, extent_item,
6681 flags | BTRFS_EXTENT_FLAG_DATA);
6683 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6684 btrfs_set_extent_inline_ref_type(leaf, iref, type);
6686 struct btrfs_shared_data_ref *ref;
6687 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6688 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6689 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6691 struct btrfs_extent_data_ref *ref;
6692 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6693 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6694 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6695 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6696 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6699 btrfs_mark_buffer_dirty(path->nodes[0]);
6700 btrfs_free_path(path);
6702 ret = update_block_group(root, ins->objectid, ins->offset, 1);
6703 if (ret) { /* -ENOENT, logic error */
6704 btrfs_err(fs_info, "update block group failed for %llu %llu",
6705 ins->objectid, ins->offset);
6711 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6712 struct btrfs_root *root,
6713 u64 parent, u64 root_objectid,
6714 u64 flags, struct btrfs_disk_key *key,
6715 int level, struct btrfs_key *ins)
6718 struct btrfs_fs_info *fs_info = root->fs_info;
6719 struct btrfs_extent_item *extent_item;
6720 struct btrfs_tree_block_info *block_info;
6721 struct btrfs_extent_inline_ref *iref;
6722 struct btrfs_path *path;
6723 struct extent_buffer *leaf;
6724 u32 size = sizeof(*extent_item) + sizeof(*iref);
6725 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6728 if (!skinny_metadata)
6729 size += sizeof(*block_info);
6731 path = btrfs_alloc_path();
6735 path->leave_spinning = 1;
6736 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6739 btrfs_free_path(path);
6743 leaf = path->nodes[0];
6744 extent_item = btrfs_item_ptr(leaf, path->slots[0],
6745 struct btrfs_extent_item);
6746 btrfs_set_extent_refs(leaf, extent_item, 1);
6747 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6748 btrfs_set_extent_flags(leaf, extent_item,
6749 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6751 if (skinny_metadata) {
6752 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6754 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6755 btrfs_set_tree_block_key(leaf, block_info, key);
6756 btrfs_set_tree_block_level(leaf, block_info, level);
6757 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6761 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6762 btrfs_set_extent_inline_ref_type(leaf, iref,
6763 BTRFS_SHARED_BLOCK_REF_KEY);
6764 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6766 btrfs_set_extent_inline_ref_type(leaf, iref,
6767 BTRFS_TREE_BLOCK_REF_KEY);
6768 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6771 btrfs_mark_buffer_dirty(leaf);
6772 btrfs_free_path(path);
6774 ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6775 if (ret) { /* -ENOENT, logic error */
6776 btrfs_err(fs_info, "update block group failed for %llu %llu",
6777 ins->objectid, ins->offset);
6783 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6784 struct btrfs_root *root,
6785 u64 root_objectid, u64 owner,
6786 u64 offset, struct btrfs_key *ins)
6790 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6792 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6794 root_objectid, owner, offset,
6795 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6800 * this is used by the tree logging recovery code. It records that
6801 * an extent has been allocated and makes sure to clear the free
6802 * space cache bits as well
6804 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6805 struct btrfs_root *root,
6806 u64 root_objectid, u64 owner, u64 offset,
6807 struct btrfs_key *ins)
6810 struct btrfs_block_group_cache *block_group;
6813 * Mixed block groups will exclude before processing the log so we only
6814 * need to do the exlude dance if this fs isn't mixed.
6816 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6817 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6822 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6826 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6827 RESERVE_ALLOC_NO_ACCOUNT);
6828 BUG_ON(ret); /* logic error */
6829 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6830 0, owner, offset, ins, 1);
6831 btrfs_put_block_group(block_group);
6835 static struct extent_buffer *
6836 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6837 u64 bytenr, u32 blocksize, int level)
6839 struct extent_buffer *buf;
6841 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6843 return ERR_PTR(-ENOMEM);
6844 btrfs_set_header_generation(buf, trans->transid);
6845 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6846 btrfs_tree_lock(buf);
6847 clean_tree_block(trans, root, buf);
6848 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6850 btrfs_set_lock_blocking(buf);
6851 btrfs_set_buffer_uptodate(buf);
6853 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6855 * we allow two log transactions at a time, use different
6856 * EXENT bit to differentiate dirty pages.
6858 if (root->log_transid % 2 == 0)
6859 set_extent_dirty(&root->dirty_log_pages, buf->start,
6860 buf->start + buf->len - 1, GFP_NOFS);
6862 set_extent_new(&root->dirty_log_pages, buf->start,
6863 buf->start + buf->len - 1, GFP_NOFS);
6865 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6866 buf->start + buf->len - 1, GFP_NOFS);
6868 trans->blocks_used++;
6869 /* this returns a buffer locked for blocking */
6873 static struct btrfs_block_rsv *
6874 use_block_rsv(struct btrfs_trans_handle *trans,
6875 struct btrfs_root *root, u32 blocksize)
6877 struct btrfs_block_rsv *block_rsv;
6878 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6880 bool global_updated = false;
6882 block_rsv = get_block_rsv(trans, root);
6884 if (unlikely(block_rsv->size == 0))
6887 ret = block_rsv_use_bytes(block_rsv, blocksize);
6891 if (block_rsv->failfast)
6892 return ERR_PTR(ret);
6894 if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
6895 global_updated = true;
6896 update_global_block_rsv(root->fs_info);
6900 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6901 static DEFINE_RATELIMIT_STATE(_rs,
6902 DEFAULT_RATELIMIT_INTERVAL * 10,
6903 /*DEFAULT_RATELIMIT_BURST*/ 1);
6904 if (__ratelimit(&_rs))
6906 "btrfs: block rsv returned %d\n", ret);
6909 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6910 BTRFS_RESERVE_NO_FLUSH);
6914 * If we couldn't reserve metadata bytes try and use some from
6915 * the global reserve if its space type is the same as the global
6918 if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
6919 block_rsv->space_info == global_rsv->space_info) {
6920 ret = block_rsv_use_bytes(global_rsv, blocksize);
6924 return ERR_PTR(ret);
6927 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6928 struct btrfs_block_rsv *block_rsv, u32 blocksize)
6930 block_rsv_add_bytes(block_rsv, blocksize, 0);
6931 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6935 * finds a free extent and does all the dirty work required for allocation
6936 * returns the key for the extent through ins, and a tree buffer for
6937 * the first block of the extent through buf.
6939 * returns the tree buffer or NULL.
6941 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6942 struct btrfs_root *root, u32 blocksize,
6943 u64 parent, u64 root_objectid,
6944 struct btrfs_disk_key *key, int level,
6945 u64 hint, u64 empty_size)
6947 struct btrfs_key ins;
6948 struct btrfs_block_rsv *block_rsv;
6949 struct extent_buffer *buf;
6952 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6955 block_rsv = use_block_rsv(trans, root, blocksize);
6956 if (IS_ERR(block_rsv))
6957 return ERR_CAST(block_rsv);
6959 ret = btrfs_reserve_extent(root, blocksize, blocksize,
6960 empty_size, hint, &ins, 0);
6962 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6963 return ERR_PTR(ret);
6966 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6968 BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6970 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6972 parent = ins.objectid;
6973 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6977 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6978 struct btrfs_delayed_extent_op *extent_op;
6979 extent_op = btrfs_alloc_delayed_extent_op();
6980 BUG_ON(!extent_op); /* -ENOMEM */
6982 memcpy(&extent_op->key, key, sizeof(extent_op->key));
6984 memset(&extent_op->key, 0, sizeof(extent_op->key));
6985 extent_op->flags_to_set = flags;
6986 if (skinny_metadata)
6987 extent_op->update_key = 0;
6989 extent_op->update_key = 1;
6990 extent_op->update_flags = 1;
6991 extent_op->is_data = 0;
6992 extent_op->level = level;
6994 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6996 ins.offset, parent, root_objectid,
6997 level, BTRFS_ADD_DELAYED_EXTENT,
6999 BUG_ON(ret); /* -ENOMEM */
7004 struct walk_control {
7005 u64 refs[BTRFS_MAX_LEVEL];
7006 u64 flags[BTRFS_MAX_LEVEL];
7007 struct btrfs_key update_progress;
7018 #define DROP_REFERENCE 1
7019 #define UPDATE_BACKREF 2
7021 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7022 struct btrfs_root *root,
7023 struct walk_control *wc,
7024 struct btrfs_path *path)
7032 struct btrfs_key key;
7033 struct extent_buffer *eb;
7038 if (path->slots[wc->level] < wc->reada_slot) {
7039 wc->reada_count = wc->reada_count * 2 / 3;
7040 wc->reada_count = max(wc->reada_count, 2);
7042 wc->reada_count = wc->reada_count * 3 / 2;
7043 wc->reada_count = min_t(int, wc->reada_count,
7044 BTRFS_NODEPTRS_PER_BLOCK(root));
7047 eb = path->nodes[wc->level];
7048 nritems = btrfs_header_nritems(eb);
7049 blocksize = btrfs_level_size(root, wc->level - 1);
7051 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7052 if (nread >= wc->reada_count)
7056 bytenr = btrfs_node_blockptr(eb, slot);
7057 generation = btrfs_node_ptr_generation(eb, slot);
7059 if (slot == path->slots[wc->level])
7062 if (wc->stage == UPDATE_BACKREF &&
7063 generation <= root->root_key.offset)
7066 /* We don't lock the tree block, it's OK to be racy here */
7067 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7068 wc->level - 1, 1, &refs,
7070 /* We don't care about errors in readahead. */
7075 if (wc->stage == DROP_REFERENCE) {
7079 if (wc->level == 1 &&
7080 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7082 if (!wc->update_ref ||
7083 generation <= root->root_key.offset)
7085 btrfs_node_key_to_cpu(eb, &key, slot);
7086 ret = btrfs_comp_cpu_keys(&key,
7087 &wc->update_progress);
7091 if (wc->level == 1 &&
7092 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7096 ret = readahead_tree_block(root, bytenr, blocksize,
7102 wc->reada_slot = slot;
7106 * helper to process tree block while walking down the tree.
7108 * when wc->stage == UPDATE_BACKREF, this function updates
7109 * back refs for pointers in the block.
7111 * NOTE: return value 1 means we should stop walking down.
7113 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7114 struct btrfs_root *root,
7115 struct btrfs_path *path,
7116 struct walk_control *wc, int lookup_info)
7118 int level = wc->level;
7119 struct extent_buffer *eb = path->nodes[level];
7120 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7123 if (wc->stage == UPDATE_BACKREF &&
7124 btrfs_header_owner(eb) != root->root_key.objectid)
7128 * when reference count of tree block is 1, it won't increase
7129 * again. once full backref flag is set, we never clear it.
7132 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7133 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7134 BUG_ON(!path->locks[level]);
7135 ret = btrfs_lookup_extent_info(trans, root,
7136 eb->start, level, 1,
7139 BUG_ON(ret == -ENOMEM);
7142 BUG_ON(wc->refs[level] == 0);
7145 if (wc->stage == DROP_REFERENCE) {
7146 if (wc->refs[level] > 1)
7149 if (path->locks[level] && !wc->keep_locks) {
7150 btrfs_tree_unlock_rw(eb, path->locks[level]);
7151 path->locks[level] = 0;
7156 /* wc->stage == UPDATE_BACKREF */
7157 if (!(wc->flags[level] & flag)) {
7158 BUG_ON(!path->locks[level]);
7159 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7160 BUG_ON(ret); /* -ENOMEM */
7161 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7162 BUG_ON(ret); /* -ENOMEM */
7163 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7165 btrfs_header_level(eb), 0);
7166 BUG_ON(ret); /* -ENOMEM */
7167 wc->flags[level] |= flag;
7171 * the block is shared by multiple trees, so it's not good to
7172 * keep the tree lock
7174 if (path->locks[level] && level > 0) {
7175 btrfs_tree_unlock_rw(eb, path->locks[level]);
7176 path->locks[level] = 0;
7182 * helper to process tree block pointer.
7184 * when wc->stage == DROP_REFERENCE, this function checks
7185 * reference count of the block pointed to. if the block
7186 * is shared and we need update back refs for the subtree
7187 * rooted at the block, this function changes wc->stage to
7188 * UPDATE_BACKREF. if the block is shared and there is no
7189 * need to update back, this function drops the reference
7192 * NOTE: return value 1 means we should stop walking down.
7194 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7195 struct btrfs_root *root,
7196 struct btrfs_path *path,
7197 struct walk_control *wc, int *lookup_info)
7203 struct btrfs_key key;
7204 struct extent_buffer *next;
7205 int level = wc->level;
7209 generation = btrfs_node_ptr_generation(path->nodes[level],
7210 path->slots[level]);
7212 * if the lower level block was created before the snapshot
7213 * was created, we know there is no need to update back refs
7216 if (wc->stage == UPDATE_BACKREF &&
7217 generation <= root->root_key.offset) {
7222 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7223 blocksize = btrfs_level_size(root, level - 1);
7225 next = btrfs_find_tree_block(root, bytenr, blocksize);
7227 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7230 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7234 btrfs_tree_lock(next);
7235 btrfs_set_lock_blocking(next);
7237 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7238 &wc->refs[level - 1],
7239 &wc->flags[level - 1]);
7241 btrfs_tree_unlock(next);
7245 if (unlikely(wc->refs[level - 1] == 0)) {
7246 btrfs_err(root->fs_info, "Missing references.");
7251 if (wc->stage == DROP_REFERENCE) {
7252 if (wc->refs[level - 1] > 1) {
7254 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7257 if (!wc->update_ref ||
7258 generation <= root->root_key.offset)
7261 btrfs_node_key_to_cpu(path->nodes[level], &key,
7262 path->slots[level]);
7263 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7267 wc->stage = UPDATE_BACKREF;
7268 wc->shared_level = level - 1;
7272 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7276 if (!btrfs_buffer_uptodate(next, generation, 0)) {
7277 btrfs_tree_unlock(next);
7278 free_extent_buffer(next);
7284 if (reada && level == 1)
7285 reada_walk_down(trans, root, wc, path);
7286 next = read_tree_block(root, bytenr, blocksize, generation);
7287 if (!next || !extent_buffer_uptodate(next)) {
7288 free_extent_buffer(next);
7291 btrfs_tree_lock(next);
7292 btrfs_set_lock_blocking(next);
7296 BUG_ON(level != btrfs_header_level(next));
7297 path->nodes[level] = next;
7298 path->slots[level] = 0;
7299 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7305 wc->refs[level - 1] = 0;
7306 wc->flags[level - 1] = 0;
7307 if (wc->stage == DROP_REFERENCE) {
7308 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7309 parent = path->nodes[level]->start;
7311 BUG_ON(root->root_key.objectid !=
7312 btrfs_header_owner(path->nodes[level]));
7316 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7317 root->root_key.objectid, level - 1, 0, 0);
7318 BUG_ON(ret); /* -ENOMEM */
7320 btrfs_tree_unlock(next);
7321 free_extent_buffer(next);
7327 * helper to process tree block while walking up the tree.
7329 * when wc->stage == DROP_REFERENCE, this function drops
7330 * reference count on the block.
7332 * when wc->stage == UPDATE_BACKREF, this function changes
7333 * wc->stage back to DROP_REFERENCE if we changed wc->stage
7334 * to UPDATE_BACKREF previously while processing the block.
7336 * NOTE: return value 1 means we should stop walking up.
7338 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7339 struct btrfs_root *root,
7340 struct btrfs_path *path,
7341 struct walk_control *wc)
7344 int level = wc->level;
7345 struct extent_buffer *eb = path->nodes[level];
7348 if (wc->stage == UPDATE_BACKREF) {
7349 BUG_ON(wc->shared_level < level);
7350 if (level < wc->shared_level)
7353 ret = find_next_key(path, level + 1, &wc->update_progress);
7357 wc->stage = DROP_REFERENCE;
7358 wc->shared_level = -1;
7359 path->slots[level] = 0;
7362 * check reference count again if the block isn't locked.
7363 * we should start walking down the tree again if reference
7366 if (!path->locks[level]) {
7368 btrfs_tree_lock(eb);
7369 btrfs_set_lock_blocking(eb);
7370 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7372 ret = btrfs_lookup_extent_info(trans, root,
7373 eb->start, level, 1,
7377 btrfs_tree_unlock_rw(eb, path->locks[level]);
7378 path->locks[level] = 0;
7381 BUG_ON(wc->refs[level] == 0);
7382 if (wc->refs[level] == 1) {
7383 btrfs_tree_unlock_rw(eb, path->locks[level]);
7384 path->locks[level] = 0;
7390 /* wc->stage == DROP_REFERENCE */
7391 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7393 if (wc->refs[level] == 1) {
7395 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7396 ret = btrfs_dec_ref(trans, root, eb, 1,
7399 ret = btrfs_dec_ref(trans, root, eb, 0,
7401 BUG_ON(ret); /* -ENOMEM */
7403 /* make block locked assertion in clean_tree_block happy */
7404 if (!path->locks[level] &&
7405 btrfs_header_generation(eb) == trans->transid) {
7406 btrfs_tree_lock(eb);
7407 btrfs_set_lock_blocking(eb);
7408 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7410 clean_tree_block(trans, root, eb);
7413 if (eb == root->node) {
7414 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7417 BUG_ON(root->root_key.objectid !=
7418 btrfs_header_owner(eb));
7420 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7421 parent = path->nodes[level + 1]->start;
7423 BUG_ON(root->root_key.objectid !=
7424 btrfs_header_owner(path->nodes[level + 1]));
7427 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7429 wc->refs[level] = 0;
7430 wc->flags[level] = 0;
7434 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7435 struct btrfs_root *root,
7436 struct btrfs_path *path,
7437 struct walk_control *wc)
7439 int level = wc->level;
7440 int lookup_info = 1;
7443 while (level >= 0) {
7444 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7451 if (path->slots[level] >=
7452 btrfs_header_nritems(path->nodes[level]))
7455 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7457 path->slots[level]++;
7466 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7467 struct btrfs_root *root,
7468 struct btrfs_path *path,
7469 struct walk_control *wc, int max_level)
7471 int level = wc->level;
7474 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7475 while (level < max_level && path->nodes[level]) {
7477 if (path->slots[level] + 1 <
7478 btrfs_header_nritems(path->nodes[level])) {
7479 path->slots[level]++;
7482 ret = walk_up_proc(trans, root, path, wc);
7486 if (path->locks[level]) {
7487 btrfs_tree_unlock_rw(path->nodes[level],
7488 path->locks[level]);
7489 path->locks[level] = 0;
7491 free_extent_buffer(path->nodes[level]);
7492 path->nodes[level] = NULL;
7500 * drop a subvolume tree.
7502 * this function traverses the tree freeing any blocks that only
7503 * referenced by the tree.
7505 * when a shared tree block is found. this function decreases its
7506 * reference count by one. if update_ref is true, this function
7507 * also make sure backrefs for the shared block and all lower level
7508 * blocks are properly updated.
7510 * If called with for_reloc == 0, may exit early with -EAGAIN
7512 int btrfs_drop_snapshot(struct btrfs_root *root,
7513 struct btrfs_block_rsv *block_rsv, int update_ref,
7516 struct btrfs_path *path;
7517 struct btrfs_trans_handle *trans;
7518 struct btrfs_root *tree_root = root->fs_info->tree_root;
7519 struct btrfs_root_item *root_item = &root->root_item;
7520 struct walk_control *wc;
7521 struct btrfs_key key;
7525 bool root_dropped = false;
7527 path = btrfs_alloc_path();
7533 wc = kzalloc(sizeof(*wc), GFP_NOFS);
7535 btrfs_free_path(path);
7540 trans = btrfs_start_transaction(tree_root, 0);
7541 if (IS_ERR(trans)) {
7542 err = PTR_ERR(trans);
7547 trans->block_rsv = block_rsv;
7549 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7550 level = btrfs_header_level(root->node);
7551 path->nodes[level] = btrfs_lock_root_node(root);
7552 btrfs_set_lock_blocking(path->nodes[level]);
7553 path->slots[level] = 0;
7554 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7555 memset(&wc->update_progress, 0,
7556 sizeof(wc->update_progress));
7558 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7559 memcpy(&wc->update_progress, &key,
7560 sizeof(wc->update_progress));
7562 level = root_item->drop_level;
7564 path->lowest_level = level;
7565 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7566 path->lowest_level = 0;
7574 * unlock our path, this is safe because only this
7575 * function is allowed to delete this snapshot
7577 btrfs_unlock_up_safe(path, 0);
7579 level = btrfs_header_level(root->node);
7581 btrfs_tree_lock(path->nodes[level]);
7582 btrfs_set_lock_blocking(path->nodes[level]);
7583 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7585 ret = btrfs_lookup_extent_info(trans, root,
7586 path->nodes[level]->start,
7587 level, 1, &wc->refs[level],
7593 BUG_ON(wc->refs[level] == 0);
7595 if (level == root_item->drop_level)
7598 btrfs_tree_unlock(path->nodes[level]);
7599 path->locks[level] = 0;
7600 WARN_ON(wc->refs[level] != 1);
7606 wc->shared_level = -1;
7607 wc->stage = DROP_REFERENCE;
7608 wc->update_ref = update_ref;
7610 wc->for_reloc = for_reloc;
7611 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7615 ret = walk_down_tree(trans, root, path, wc);
7621 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7628 BUG_ON(wc->stage != DROP_REFERENCE);
7632 if (wc->stage == DROP_REFERENCE) {
7634 btrfs_node_key(path->nodes[level],
7635 &root_item->drop_progress,
7636 path->slots[level]);
7637 root_item->drop_level = level;
7640 BUG_ON(wc->level == 0);
7641 if (btrfs_should_end_transaction(trans, tree_root) ||
7642 (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7643 ret = btrfs_update_root(trans, tree_root,
7647 btrfs_abort_transaction(trans, tree_root, ret);
7652 btrfs_end_transaction_throttle(trans, tree_root);
7653 if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7654 pr_debug("btrfs: drop snapshot early exit\n");
7659 trans = btrfs_start_transaction(tree_root, 0);
7660 if (IS_ERR(trans)) {
7661 err = PTR_ERR(trans);
7665 trans->block_rsv = block_rsv;
7668 btrfs_release_path(path);
7672 ret = btrfs_del_root(trans, tree_root, &root->root_key);
7674 btrfs_abort_transaction(trans, tree_root, ret);
7678 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7679 ret = btrfs_find_root(tree_root, &root->root_key, path,
7682 btrfs_abort_transaction(trans, tree_root, ret);
7685 } else if (ret > 0) {
7686 /* if we fail to delete the orphan item this time
7687 * around, it'll get picked up the next time.
7689 * The most common failure here is just -ENOENT.
7691 btrfs_del_orphan_item(trans, tree_root,
7692 root->root_key.objectid);
7696 if (root->in_radix) {
7697 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7699 free_extent_buffer(root->node);
7700 free_extent_buffer(root->commit_root);
7701 btrfs_put_fs_root(root);
7703 root_dropped = true;
7705 btrfs_end_transaction_throttle(trans, tree_root);
7708 btrfs_free_path(path);
7711 * So if we need to stop dropping the snapshot for whatever reason we
7712 * need to make sure to add it back to the dead root list so that we
7713 * keep trying to do the work later. This also cleans up roots if we
7714 * don't have it in the radix (like when we recover after a power fail
7715 * or unmount) so we don't leak memory.
7717 if (!for_reloc && root_dropped == false)
7718 btrfs_add_dead_root(root);
7720 btrfs_std_error(root->fs_info, err);
7725 * drop subtree rooted at tree block 'node'.
7727 * NOTE: this function will unlock and release tree block 'node'
7728 * only used by relocation code
7730 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7731 struct btrfs_root *root,
7732 struct extent_buffer *node,
7733 struct extent_buffer *parent)
7735 struct btrfs_path *path;
7736 struct walk_control *wc;
7742 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7744 path = btrfs_alloc_path();
7748 wc = kzalloc(sizeof(*wc), GFP_NOFS);
7750 btrfs_free_path(path);
7754 btrfs_assert_tree_locked(parent);
7755 parent_level = btrfs_header_level(parent);
7756 extent_buffer_get(parent);
7757 path->nodes[parent_level] = parent;
7758 path->slots[parent_level] = btrfs_header_nritems(parent);
7760 btrfs_assert_tree_locked(node);
7761 level = btrfs_header_level(node);
7762 path->nodes[level] = node;
7763 path->slots[level] = 0;
7764 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7766 wc->refs[parent_level] = 1;
7767 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7769 wc->shared_level = -1;
7770 wc->stage = DROP_REFERENCE;
7774 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7777 wret = walk_down_tree(trans, root, path, wc);
7783 wret = walk_up_tree(trans, root, path, wc, parent_level);
7791 btrfs_free_path(path);
7795 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7801 * if restripe for this chunk_type is on pick target profile and
7802 * return, otherwise do the usual balance
7804 stripped = get_restripe_target(root->fs_info, flags);
7806 return extended_to_chunk(stripped);
7809 * we add in the count of missing devices because we want
7810 * to make sure that any RAID levels on a degraded FS
7811 * continue to be honored.
7813 num_devices = root->fs_info->fs_devices->rw_devices +
7814 root->fs_info->fs_devices->missing_devices;
7816 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7817 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7818 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7820 if (num_devices == 1) {
7821 stripped |= BTRFS_BLOCK_GROUP_DUP;
7822 stripped = flags & ~stripped;
7824 /* turn raid0 into single device chunks */
7825 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7828 /* turn mirroring into duplication */
7829 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7830 BTRFS_BLOCK_GROUP_RAID10))
7831 return stripped | BTRFS_BLOCK_GROUP_DUP;
7833 /* they already had raid on here, just return */
7834 if (flags & stripped)
7837 stripped |= BTRFS_BLOCK_GROUP_DUP;
7838 stripped = flags & ~stripped;
7840 /* switch duplicated blocks with raid1 */
7841 if (flags & BTRFS_BLOCK_GROUP_DUP)
7842 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7844 /* this is drive concat, leave it alone */
7850 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7852 struct btrfs_space_info *sinfo = cache->space_info;
7854 u64 min_allocable_bytes;
7859 * We need some metadata space and system metadata space for
7860 * allocating chunks in some corner cases until we force to set
7861 * it to be readonly.
7864 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7866 min_allocable_bytes = 1 * 1024 * 1024;
7868 min_allocable_bytes = 0;
7870 spin_lock(&sinfo->lock);
7871 spin_lock(&cache->lock);
7878 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7879 cache->bytes_super - btrfs_block_group_used(&cache->item);
7881 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7882 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7883 min_allocable_bytes <= sinfo->total_bytes) {
7884 sinfo->bytes_readonly += num_bytes;
7889 spin_unlock(&cache->lock);
7890 spin_unlock(&sinfo->lock);
7894 int btrfs_set_block_group_ro(struct btrfs_root *root,
7895 struct btrfs_block_group_cache *cache)
7898 struct btrfs_trans_handle *trans;
7904 trans = btrfs_join_transaction(root);
7906 return PTR_ERR(trans);
7908 alloc_flags = update_block_group_flags(root, cache->flags);
7909 if (alloc_flags != cache->flags) {
7910 ret = do_chunk_alloc(trans, root, alloc_flags,
7916 ret = set_block_group_ro(cache, 0);
7919 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7920 ret = do_chunk_alloc(trans, root, alloc_flags,
7924 ret = set_block_group_ro(cache, 0);
7926 btrfs_end_transaction(trans, root);
7930 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7931 struct btrfs_root *root, u64 type)
7933 u64 alloc_flags = get_alloc_profile(root, type);
7934 return do_chunk_alloc(trans, root, alloc_flags,
7939 * helper to account the unused space of all the readonly block group in the
7940 * list. takes mirrors into account.
7942 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7944 struct btrfs_block_group_cache *block_group;
7948 list_for_each_entry(block_group, groups_list, list) {
7949 spin_lock(&block_group->lock);
7951 if (!block_group->ro) {
7952 spin_unlock(&block_group->lock);
7956 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7957 BTRFS_BLOCK_GROUP_RAID10 |
7958 BTRFS_BLOCK_GROUP_DUP))
7963 free_bytes += (block_group->key.offset -
7964 btrfs_block_group_used(&block_group->item)) *
7967 spin_unlock(&block_group->lock);
7974 * helper to account the unused space of all the readonly block group in the
7975 * space_info. takes mirrors into account.
7977 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7982 spin_lock(&sinfo->lock);
7984 for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7985 if (!list_empty(&sinfo->block_groups[i]))
7986 free_bytes += __btrfs_get_ro_block_group_free_space(
7987 &sinfo->block_groups[i]);
7989 spin_unlock(&sinfo->lock);
7994 void btrfs_set_block_group_rw(struct btrfs_root *root,
7995 struct btrfs_block_group_cache *cache)
7997 struct btrfs_space_info *sinfo = cache->space_info;
8002 spin_lock(&sinfo->lock);
8003 spin_lock(&cache->lock);
8004 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8005 cache->bytes_super - btrfs_block_group_used(&cache->item);
8006 sinfo->bytes_readonly -= num_bytes;
8008 spin_unlock(&cache->lock);
8009 spin_unlock(&sinfo->lock);
8013 * checks to see if its even possible to relocate this block group.
8015 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8016 * ok to go ahead and try.
8018 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8020 struct btrfs_block_group_cache *block_group;
8021 struct btrfs_space_info *space_info;
8022 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8023 struct btrfs_device *device;
8024 struct btrfs_trans_handle *trans;
8033 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8035 /* odd, couldn't find the block group, leave it alone */
8039 min_free = btrfs_block_group_used(&block_group->item);
8041 /* no bytes used, we're good */
8045 space_info = block_group->space_info;
8046 spin_lock(&space_info->lock);
8048 full = space_info->full;
8051 * if this is the last block group we have in this space, we can't
8052 * relocate it unless we're able to allocate a new chunk below.
8054 * Otherwise, we need to make sure we have room in the space to handle
8055 * all of the extents from this block group. If we can, we're good
8057 if ((space_info->total_bytes != block_group->key.offset) &&
8058 (space_info->bytes_used + space_info->bytes_reserved +
8059 space_info->bytes_pinned + space_info->bytes_readonly +
8060 min_free < space_info->total_bytes)) {
8061 spin_unlock(&space_info->lock);
8064 spin_unlock(&space_info->lock);
8067 * ok we don't have enough space, but maybe we have free space on our
8068 * devices to allocate new chunks for relocation, so loop through our
8069 * alloc devices and guess if we have enough space. if this block
8070 * group is going to be restriped, run checks against the target
8071 * profile instead of the current one.
8083 target = get_restripe_target(root->fs_info, block_group->flags);
8085 index = __get_raid_index(extended_to_chunk(target));
8088 * this is just a balance, so if we were marked as full
8089 * we know there is no space for a new chunk
8094 index = get_block_group_index(block_group);
8097 if (index == BTRFS_RAID_RAID10) {
8101 } else if (index == BTRFS_RAID_RAID1) {
8103 } else if (index == BTRFS_RAID_DUP) {
8106 } else if (index == BTRFS_RAID_RAID0) {
8107 dev_min = fs_devices->rw_devices;
8108 do_div(min_free, dev_min);
8111 /* We need to do this so that we can look at pending chunks */
8112 trans = btrfs_join_transaction(root);
8113 if (IS_ERR(trans)) {
8114 ret = PTR_ERR(trans);
8118 mutex_lock(&root->fs_info->chunk_mutex);
8119 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8123 * check to make sure we can actually find a chunk with enough
8124 * space to fit our block group in.
8126 if (device->total_bytes > device->bytes_used + min_free &&
8127 !device->is_tgtdev_for_dev_replace) {
8128 ret = find_free_dev_extent(trans, device, min_free,
8133 if (dev_nr >= dev_min)
8139 mutex_unlock(&root->fs_info->chunk_mutex);
8140 btrfs_end_transaction(trans, root);
8142 btrfs_put_block_group(block_group);
8146 static int find_first_block_group(struct btrfs_root *root,
8147 struct btrfs_path *path, struct btrfs_key *key)
8150 struct btrfs_key found_key;
8151 struct extent_buffer *leaf;
8154 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8159 slot = path->slots[0];
8160 leaf = path->nodes[0];
8161 if (slot >= btrfs_header_nritems(leaf)) {
8162 ret = btrfs_next_leaf(root, path);
8169 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8171 if (found_key.objectid >= key->objectid &&
8172 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8182 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8184 struct btrfs_block_group_cache *block_group;
8188 struct inode *inode;
8190 block_group = btrfs_lookup_first_block_group(info, last);
8191 while (block_group) {
8192 spin_lock(&block_group->lock);
8193 if (block_group->iref)
8195 spin_unlock(&block_group->lock);
8196 block_group = next_block_group(info->tree_root,
8206 inode = block_group->inode;
8207 block_group->iref = 0;
8208 block_group->inode = NULL;
8209 spin_unlock(&block_group->lock);
8211 last = block_group->key.objectid + block_group->key.offset;
8212 btrfs_put_block_group(block_group);
8216 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8218 struct btrfs_block_group_cache *block_group;
8219 struct btrfs_space_info *space_info;
8220 struct btrfs_caching_control *caching_ctl;
8223 down_write(&info->extent_commit_sem);
8224 while (!list_empty(&info->caching_block_groups)) {
8225 caching_ctl = list_entry(info->caching_block_groups.next,
8226 struct btrfs_caching_control, list);
8227 list_del(&caching_ctl->list);
8228 put_caching_control(caching_ctl);
8230 up_write(&info->extent_commit_sem);
8232 spin_lock(&info->block_group_cache_lock);
8233 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8234 block_group = rb_entry(n, struct btrfs_block_group_cache,
8236 rb_erase(&block_group->cache_node,
8237 &info->block_group_cache_tree);
8238 spin_unlock(&info->block_group_cache_lock);
8240 down_write(&block_group->space_info->groups_sem);
8241 list_del(&block_group->list);
8242 up_write(&block_group->space_info->groups_sem);
8244 if (block_group->cached == BTRFS_CACHE_STARTED)
8245 wait_block_group_cache_done(block_group);
8248 * We haven't cached this block group, which means we could
8249 * possibly have excluded extents on this block group.
8251 if (block_group->cached == BTRFS_CACHE_NO ||
8252 block_group->cached == BTRFS_CACHE_ERROR)
8253 free_excluded_extents(info->extent_root, block_group);
8255 btrfs_remove_free_space_cache(block_group);
8256 btrfs_put_block_group(block_group);
8258 spin_lock(&info->block_group_cache_lock);
8260 spin_unlock(&info->block_group_cache_lock);
8262 /* now that all the block groups are freed, go through and
8263 * free all the space_info structs. This is only called during
8264 * the final stages of unmount, and so we know nobody is
8265 * using them. We call synchronize_rcu() once before we start,
8266 * just to be on the safe side.
8270 release_global_block_rsv(info);
8272 while(!list_empty(&info->space_info)) {
8273 space_info = list_entry(info->space_info.next,
8274 struct btrfs_space_info,
8276 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8277 if (space_info->bytes_pinned > 0 ||
8278 space_info->bytes_reserved > 0 ||
8279 space_info->bytes_may_use > 0) {
8281 dump_space_info(space_info, 0, 0);
8284 percpu_counter_destroy(&space_info->total_bytes_pinned);
8285 list_del(&space_info->list);
8291 static void __link_block_group(struct btrfs_space_info *space_info,
8292 struct btrfs_block_group_cache *cache)
8294 int index = get_block_group_index(cache);
8296 down_write(&space_info->groups_sem);
8297 list_add_tail(&cache->list, &space_info->block_groups[index]);
8298 up_write(&space_info->groups_sem);
8301 int btrfs_read_block_groups(struct btrfs_root *root)
8303 struct btrfs_path *path;
8305 struct btrfs_block_group_cache *cache;
8306 struct btrfs_fs_info *info = root->fs_info;
8307 struct btrfs_space_info *space_info;
8308 struct btrfs_key key;
8309 struct btrfs_key found_key;
8310 struct extent_buffer *leaf;
8314 root = info->extent_root;
8317 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8318 path = btrfs_alloc_path();
8323 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8324 if (btrfs_test_opt(root, SPACE_CACHE) &&
8325 btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8327 if (btrfs_test_opt(root, CLEAR_CACHE))
8331 ret = find_first_block_group(root, path, &key);
8336 leaf = path->nodes[0];
8337 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8338 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8343 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8345 if (!cache->free_space_ctl) {
8351 atomic_set(&cache->count, 1);
8352 spin_lock_init(&cache->lock);
8353 cache->fs_info = info;
8354 INIT_LIST_HEAD(&cache->list);
8355 INIT_LIST_HEAD(&cache->cluster_list);
8359 * When we mount with old space cache, we need to
8360 * set BTRFS_DC_CLEAR and set dirty flag.
8362 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8363 * truncate the old free space cache inode and
8365 * b) Setting 'dirty flag' makes sure that we flush
8366 * the new space cache info onto disk.
8368 cache->disk_cache_state = BTRFS_DC_CLEAR;
8369 if (btrfs_test_opt(root, SPACE_CACHE))
8373 read_extent_buffer(leaf, &cache->item,
8374 btrfs_item_ptr_offset(leaf, path->slots[0]),
8375 sizeof(cache->item));
8376 memcpy(&cache->key, &found_key, sizeof(found_key));
8378 key.objectid = found_key.objectid + found_key.offset;
8379 btrfs_release_path(path);
8380 cache->flags = btrfs_block_group_flags(&cache->item);
8381 cache->sectorsize = root->sectorsize;
8382 cache->full_stripe_len = btrfs_full_stripe_len(root,
8383 &root->fs_info->mapping_tree,
8384 found_key.objectid);
8385 btrfs_init_free_space_ctl(cache);
8388 * We need to exclude the super stripes now so that the space
8389 * info has super bytes accounted for, otherwise we'll think
8390 * we have more space than we actually do.
8392 ret = exclude_super_stripes(root, cache);
8395 * We may have excluded something, so call this just in
8398 free_excluded_extents(root, cache);
8399 kfree(cache->free_space_ctl);
8405 * check for two cases, either we are full, and therefore
8406 * don't need to bother with the caching work since we won't
8407 * find any space, or we are empty, and we can just add all
8408 * the space in and be done with it. This saves us _alot_ of
8409 * time, particularly in the full case.
8411 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8412 cache->last_byte_to_unpin = (u64)-1;
8413 cache->cached = BTRFS_CACHE_FINISHED;
8414 free_excluded_extents(root, cache);
8415 } else if (btrfs_block_group_used(&cache->item) == 0) {
8416 cache->last_byte_to_unpin = (u64)-1;
8417 cache->cached = BTRFS_CACHE_FINISHED;
8418 add_new_free_space(cache, root->fs_info,
8420 found_key.objectid +
8422 free_excluded_extents(root, cache);
8425 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8427 btrfs_remove_free_space_cache(cache);
8428 btrfs_put_block_group(cache);
8432 ret = update_space_info(info, cache->flags, found_key.offset,
8433 btrfs_block_group_used(&cache->item),
8436 btrfs_remove_free_space_cache(cache);
8437 spin_lock(&info->block_group_cache_lock);
8438 rb_erase(&cache->cache_node,
8439 &info->block_group_cache_tree);
8440 spin_unlock(&info->block_group_cache_lock);
8441 btrfs_put_block_group(cache);
8445 cache->space_info = space_info;
8446 spin_lock(&cache->space_info->lock);
8447 cache->space_info->bytes_readonly += cache->bytes_super;
8448 spin_unlock(&cache->space_info->lock);
8450 __link_block_group(space_info, cache);
8452 set_avail_alloc_bits(root->fs_info, cache->flags);
8453 if (btrfs_chunk_readonly(root, cache->key.objectid))
8454 set_block_group_ro(cache, 1);
8457 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8458 if (!(get_alloc_profile(root, space_info->flags) &
8459 (BTRFS_BLOCK_GROUP_RAID10 |
8460 BTRFS_BLOCK_GROUP_RAID1 |
8461 BTRFS_BLOCK_GROUP_RAID5 |
8462 BTRFS_BLOCK_GROUP_RAID6 |
8463 BTRFS_BLOCK_GROUP_DUP)))
8466 * avoid allocating from un-mirrored block group if there are
8467 * mirrored block groups.
8469 list_for_each_entry(cache,
8470 &space_info->block_groups[BTRFS_RAID_RAID0],
8472 set_block_group_ro(cache, 1);
8473 list_for_each_entry(cache,
8474 &space_info->block_groups[BTRFS_RAID_SINGLE],
8476 set_block_group_ro(cache, 1);
8479 init_global_block_rsv(info);
8482 btrfs_free_path(path);
8486 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8487 struct btrfs_root *root)
8489 struct btrfs_block_group_cache *block_group, *tmp;
8490 struct btrfs_root *extent_root = root->fs_info->extent_root;
8491 struct btrfs_block_group_item item;
8492 struct btrfs_key key;
8495 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8497 list_del_init(&block_group->new_bg_list);
8502 spin_lock(&block_group->lock);
8503 memcpy(&item, &block_group->item, sizeof(item));
8504 memcpy(&key, &block_group->key, sizeof(key));
8505 spin_unlock(&block_group->lock);
8507 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8510 btrfs_abort_transaction(trans, extent_root, ret);
8511 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8512 key.objectid, key.offset);
8514 btrfs_abort_transaction(trans, extent_root, ret);
8518 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8519 struct btrfs_root *root, u64 bytes_used,
8520 u64 type, u64 chunk_objectid, u64 chunk_offset,
8524 struct btrfs_root *extent_root;
8525 struct btrfs_block_group_cache *cache;
8527 extent_root = root->fs_info->extent_root;
8529 root->fs_info->last_trans_log_full_commit = trans->transid;
8531 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8534 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8536 if (!cache->free_space_ctl) {
8541 cache->key.objectid = chunk_offset;
8542 cache->key.offset = size;
8543 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8544 cache->sectorsize = root->sectorsize;
8545 cache->fs_info = root->fs_info;
8546 cache->full_stripe_len = btrfs_full_stripe_len(root,
8547 &root->fs_info->mapping_tree,
8550 atomic_set(&cache->count, 1);
8551 spin_lock_init(&cache->lock);
8552 INIT_LIST_HEAD(&cache->list);
8553 INIT_LIST_HEAD(&cache->cluster_list);
8554 INIT_LIST_HEAD(&cache->new_bg_list);
8556 btrfs_init_free_space_ctl(cache);
8558 btrfs_set_block_group_used(&cache->item, bytes_used);
8559 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8560 cache->flags = type;
8561 btrfs_set_block_group_flags(&cache->item, type);
8563 cache->last_byte_to_unpin = (u64)-1;
8564 cache->cached = BTRFS_CACHE_FINISHED;
8565 ret = exclude_super_stripes(root, cache);
8568 * We may have excluded something, so call this just in
8571 free_excluded_extents(root, cache);
8572 kfree(cache->free_space_ctl);
8577 add_new_free_space(cache, root->fs_info, chunk_offset,
8578 chunk_offset + size);
8580 free_excluded_extents(root, cache);
8582 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8584 btrfs_remove_free_space_cache(cache);
8585 btrfs_put_block_group(cache);
8589 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8590 &cache->space_info);
8592 btrfs_remove_free_space_cache(cache);
8593 spin_lock(&root->fs_info->block_group_cache_lock);
8594 rb_erase(&cache->cache_node,
8595 &root->fs_info->block_group_cache_tree);
8596 spin_unlock(&root->fs_info->block_group_cache_lock);
8597 btrfs_put_block_group(cache);
8600 update_global_block_rsv(root->fs_info);
8602 spin_lock(&cache->space_info->lock);
8603 cache->space_info->bytes_readonly += cache->bytes_super;
8604 spin_unlock(&cache->space_info->lock);
8606 __link_block_group(cache->space_info, cache);
8608 list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8610 set_avail_alloc_bits(extent_root->fs_info, type);
8615 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8617 u64 extra_flags = chunk_to_extended(flags) &
8618 BTRFS_EXTENDED_PROFILE_MASK;
8620 write_seqlock(&fs_info->profiles_lock);
8621 if (flags & BTRFS_BLOCK_GROUP_DATA)
8622 fs_info->avail_data_alloc_bits &= ~extra_flags;
8623 if (flags & BTRFS_BLOCK_GROUP_METADATA)
8624 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8625 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8626 fs_info->avail_system_alloc_bits &= ~extra_flags;
8627 write_sequnlock(&fs_info->profiles_lock);
8630 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8631 struct btrfs_root *root, u64 group_start)
8633 struct btrfs_path *path;
8634 struct btrfs_block_group_cache *block_group;
8635 struct btrfs_free_cluster *cluster;
8636 struct btrfs_root *tree_root = root->fs_info->tree_root;
8637 struct btrfs_key key;
8638 struct inode *inode;
8643 root = root->fs_info->extent_root;
8645 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8646 BUG_ON(!block_group);
8647 BUG_ON(!block_group->ro);
8650 * Free the reserved super bytes from this block group before
8653 free_excluded_extents(root, block_group);
8655 memcpy(&key, &block_group->key, sizeof(key));
8656 index = get_block_group_index(block_group);
8657 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8658 BTRFS_BLOCK_GROUP_RAID1 |
8659 BTRFS_BLOCK_GROUP_RAID10))
8664 /* make sure this block group isn't part of an allocation cluster */
8665 cluster = &root->fs_info->data_alloc_cluster;
8666 spin_lock(&cluster->refill_lock);
8667 btrfs_return_cluster_to_free_space(block_group, cluster);
8668 spin_unlock(&cluster->refill_lock);
8671 * make sure this block group isn't part of a metadata
8672 * allocation cluster
8674 cluster = &root->fs_info->meta_alloc_cluster;
8675 spin_lock(&cluster->refill_lock);
8676 btrfs_return_cluster_to_free_space(block_group, cluster);
8677 spin_unlock(&cluster->refill_lock);
8679 path = btrfs_alloc_path();
8685 inode = lookup_free_space_inode(tree_root, block_group, path);
8686 if (!IS_ERR(inode)) {
8687 ret = btrfs_orphan_add(trans, inode);
8689 btrfs_add_delayed_iput(inode);
8693 /* One for the block groups ref */
8694 spin_lock(&block_group->lock);
8695 if (block_group->iref) {
8696 block_group->iref = 0;
8697 block_group->inode = NULL;
8698 spin_unlock(&block_group->lock);
8701 spin_unlock(&block_group->lock);
8703 /* One for our lookup ref */
8704 btrfs_add_delayed_iput(inode);
8707 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8708 key.offset = block_group->key.objectid;
8711 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8715 btrfs_release_path(path);
8717 ret = btrfs_del_item(trans, tree_root, path);
8720 btrfs_release_path(path);
8723 spin_lock(&root->fs_info->block_group_cache_lock);
8724 rb_erase(&block_group->cache_node,
8725 &root->fs_info->block_group_cache_tree);
8727 if (root->fs_info->first_logical_byte == block_group->key.objectid)
8728 root->fs_info->first_logical_byte = (u64)-1;
8729 spin_unlock(&root->fs_info->block_group_cache_lock);
8731 down_write(&block_group->space_info->groups_sem);
8733 * we must use list_del_init so people can check to see if they
8734 * are still on the list after taking the semaphore
8736 list_del_init(&block_group->list);
8737 if (list_empty(&block_group->space_info->block_groups[index]))
8738 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8739 up_write(&block_group->space_info->groups_sem);
8741 if (block_group->cached == BTRFS_CACHE_STARTED)
8742 wait_block_group_cache_done(block_group);
8744 btrfs_remove_free_space_cache(block_group);
8746 spin_lock(&block_group->space_info->lock);
8747 block_group->space_info->total_bytes -= block_group->key.offset;
8748 block_group->space_info->bytes_readonly -= block_group->key.offset;
8749 block_group->space_info->disk_total -= block_group->key.offset * factor;
8750 spin_unlock(&block_group->space_info->lock);
8752 memcpy(&key, &block_group->key, sizeof(key));
8754 btrfs_clear_space_info_full(root->fs_info);
8756 btrfs_put_block_group(block_group);
8757 btrfs_put_block_group(block_group);
8759 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8765 ret = btrfs_del_item(trans, root, path);
8767 btrfs_free_path(path);
8771 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8773 struct btrfs_space_info *space_info;
8774 struct btrfs_super_block *disk_super;
8780 disk_super = fs_info->super_copy;
8781 if (!btrfs_super_root(disk_super))
8784 features = btrfs_super_incompat_flags(disk_super);
8785 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8788 flags = BTRFS_BLOCK_GROUP_SYSTEM;
8789 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8794 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8795 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8797 flags = BTRFS_BLOCK_GROUP_METADATA;
8798 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8802 flags = BTRFS_BLOCK_GROUP_DATA;
8803 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8809 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8811 return unpin_extent_range(root, start, end);
8814 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8815 u64 num_bytes, u64 *actual_bytes)
8817 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8820 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8822 struct btrfs_fs_info *fs_info = root->fs_info;
8823 struct btrfs_block_group_cache *cache = NULL;
8828 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8832 * try to trim all FS space, our block group may start from non-zero.
8834 if (range->len == total_bytes)
8835 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8837 cache = btrfs_lookup_block_group(fs_info, range->start);
8840 if (cache->key.objectid >= (range->start + range->len)) {
8841 btrfs_put_block_group(cache);
8845 start = max(range->start, cache->key.objectid);
8846 end = min(range->start + range->len,
8847 cache->key.objectid + cache->key.offset);
8849 if (end - start >= range->minlen) {
8850 if (!block_group_cache_done(cache)) {
8851 ret = cache_block_group(cache, 0);
8853 btrfs_put_block_group(cache);
8856 ret = wait_block_group_cache_done(cache);
8858 btrfs_put_block_group(cache);
8862 ret = btrfs_trim_block_group(cache,
8868 trimmed += group_trimmed;
8870 btrfs_put_block_group(cache);
8875 cache = next_block_group(fs_info->tree_root, cache);
8878 range->len = trimmed;