]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/btrfs/extent-tree.c
Btrfs: mark delayed refs as for cow
[karo-tx-linux.git] / fs / btrfs / extent-tree.c
index 30c0558eae845fee8078988af186b800b1917179..dc8b9a834596470522f02b070039543cf6ead0a8 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/rcupdate.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
+#include <linux/ratelimit.h>
 #include "compat.h"
 #include "hash.h"
 #include "ctree.h"
@@ -466,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
                             struct btrfs_root *root,
                             int load_cache_only)
 {
+       DEFINE_WAIT(wait);
        struct btrfs_fs_info *fs_info = cache->fs_info;
        struct btrfs_caching_control *caching_ctl;
        int ret = 0;
 
-       smp_mb();
-       if (cache->cached != BTRFS_CACHE_NO)
+       caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
+       BUG_ON(!caching_ctl);
+
+       INIT_LIST_HEAD(&caching_ctl->list);
+       mutex_init(&caching_ctl->mutex);
+       init_waitqueue_head(&caching_ctl->wait);
+       caching_ctl->block_group = cache;
+       caching_ctl->progress = cache->key.objectid;
+       atomic_set(&caching_ctl->count, 1);
+       caching_ctl->work.func = caching_thread;
+
+       spin_lock(&cache->lock);
+       /*
+        * This should be a rare occasion, but this could happen I think in the
+        * case where one thread starts to load the space cache info, and then
+        * some other thread starts a transaction commit which tries to do an
+        * allocation while the other thread is still loading the space cache
+        * info.  The previous loop should have kept us from choosing this block
+        * group, but if we've moved to the state where we will wait on caching
+        * block groups we need to first check if we're doing a fast load here,
+        * so we can wait for it to finish, otherwise we could end up allocating
+        * from a block group who's cache gets evicted for one reason or
+        * another.
+        */
+       while (cache->cached == BTRFS_CACHE_FAST) {
+               struct btrfs_caching_control *ctl;
+
+               ctl = cache->caching_ctl;
+               atomic_inc(&ctl->count);
+               prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
+               spin_unlock(&cache->lock);
+
+               schedule();
+
+               finish_wait(&ctl->wait, &wait);
+               put_caching_control(ctl);
+               spin_lock(&cache->lock);
+       }
+
+       if (cache->cached != BTRFS_CACHE_NO) {
+               spin_unlock(&cache->lock);
+               kfree(caching_ctl);
                return 0;
+       }
+       WARN_ON(cache->caching_ctl);
+       cache->caching_ctl = caching_ctl;
+       cache->cached = BTRFS_CACHE_FAST;
+       spin_unlock(&cache->lock);
 
        /*
         * We can't do the read from on-disk cache during a commit since we need
@@ -481,57 +528,53 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
         * we likely hold important locks.
         */
        if (trans && (!trans->transaction->in_commit) &&
-           (root && root != root->fs_info->tree_root)) {
-               spin_lock(&cache->lock);
-               if (cache->cached != BTRFS_CACHE_NO) {
-                       spin_unlock(&cache->lock);
-                       return 0;
-               }
-               cache->cached = BTRFS_CACHE_STARTED;
-               spin_unlock(&cache->lock);
-
+           (root && root != root->fs_info->tree_root) &&
+           btrfs_test_opt(root, SPACE_CACHE)) {
                ret = load_free_space_cache(fs_info, cache);
 
                spin_lock(&cache->lock);
                if (ret == 1) {
+                       cache->caching_ctl = NULL;
                        cache->cached = BTRFS_CACHE_FINISHED;
                        cache->last_byte_to_unpin = (u64)-1;
                } else {
-                       cache->cached = BTRFS_CACHE_NO;
+                       if (load_cache_only) {
+                               cache->caching_ctl = NULL;
+                               cache->cached = BTRFS_CACHE_NO;
+                       } else {
+                               cache->cached = BTRFS_CACHE_STARTED;
+                       }
                }
                spin_unlock(&cache->lock);
+               wake_up(&caching_ctl->wait);
                if (ret == 1) {
+                       put_caching_control(caching_ctl);
                        free_excluded_extents(fs_info->extent_root, cache);
                        return 0;
                }
+       } else {
+               /*
+                * We are not going to do the fast caching, set cached to the
+                * appropriate value and wakeup any waiters.
+                */
+               spin_lock(&cache->lock);
+               if (load_cache_only) {
+                       cache->caching_ctl = NULL;
+                       cache->cached = BTRFS_CACHE_NO;
+               } else {
+                       cache->cached = BTRFS_CACHE_STARTED;
+               }
+               spin_unlock(&cache->lock);
+               wake_up(&caching_ctl->wait);
        }
 
-       if (load_cache_only)
-               return 0;
-
-       caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
-       BUG_ON(!caching_ctl);
-
-       INIT_LIST_HEAD(&caching_ctl->list);
-       mutex_init(&caching_ctl->mutex);
-       init_waitqueue_head(&caching_ctl->wait);
-       caching_ctl->block_group = cache;
-       caching_ctl->progress = cache->key.objectid;
-       /* one for caching kthread, one for caching block group list */
-       atomic_set(&caching_ctl->count, 2);
-       caching_ctl->work.func = caching_thread;
-
-       spin_lock(&cache->lock);
-       if (cache->cached != BTRFS_CACHE_NO) {
-               spin_unlock(&cache->lock);
-               kfree(caching_ctl);
+       if (load_cache_only) {
+               put_caching_control(caching_ctl);
                return 0;
        }
-       cache->caching_ctl = caching_ctl;
-       cache->cached = BTRFS_CACHE_STARTED;
-       spin_unlock(&cache->lock);
 
        down_write(&fs_info->extent_commit_sem);
+       atomic_inc(&caching_ctl->count);
        list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
        up_write(&fs_info->extent_commit_sem);
 
@@ -1786,18 +1829,18 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
 {
        int ret;
        u64 discarded_bytes = 0;
-       struct btrfs_multi_bio *multi = NULL;
+       struct btrfs_bio *bbio = NULL;
 
 
        /* Tell the block device(s) that the sectors can be discarded */
        ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
-                             bytenr, &num_bytes, &multi, 0);
+                             bytenr, &num_bytes, &bbio, 0);
        if (!ret) {
-               struct btrfs_bio_stripe *stripe = multi->stripes;
+               struct btrfs_bio_stripe *stripe = bbio->stripes;
                int i;
 
 
-               for (i = 0; i < multi->num_stripes; i++, stripe++) {
+               for (i = 0; i < bbio->num_stripes; i++, stripe++) {
                        if (!stripe->dev->can_discard)
                                continue;
 
@@ -1816,7 +1859,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
                         */
                        ret = 0;
                }
-               kfree(multi);
+               kfree(bbio);
        }
 
        if (actual_bytes)
@@ -1829,20 +1872,24 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
                         struct btrfs_root *root,
                         u64 bytenr, u64 num_bytes, u64 parent,
-                        u64 root_objectid, u64 owner, u64 offset)
+                        u64 root_objectid, u64 owner, u64 offset, int for_cow)
 {
        int ret;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+
        BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
               root_objectid == BTRFS_TREE_LOG_OBJECTID);
 
        if (owner < BTRFS_FIRST_FREE_OBJECTID) {
-               ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
+               ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+                                       num_bytes,
                                        parent, root_objectid, (int)owner,
-                                       BTRFS_ADD_DELAYED_REF, NULL);
+                                       BTRFS_ADD_DELAYED_REF, NULL, for_cow);
        } else {
-               ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
+               ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+                                       num_bytes,
                                        parent, root_objectid, owner, offset,
-                                       BTRFS_ADD_DELAYED_REF, NULL);
+                                       BTRFS_ADD_DELAYED_REF, NULL, for_cow);
        }
        return ret;
 }
@@ -2362,7 +2409,8 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
        extent_op->update_key = 0;
        extent_op->is_data = is_data ? 1 : 0;
 
-       ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
+       ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
+                                         num_bytes, extent_op);
        if (ret)
                kfree(extent_op);
        return ret;
@@ -2547,7 +2595,7 @@ out:
 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          int full_backref, int inc)
+                          int full_backref, int inc, int for_cow)
 {
        u64 bytenr;
        u64 num_bytes;
@@ -2560,7 +2608,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
        int level;
        int ret = 0;
        int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
-                           u64, u64, u64, u64, u64, u64);
+                           u64, u64, u64, u64, u64, u64, int);
 
        ref_root = btrfs_header_owner(buf);
        nritems = btrfs_header_nritems(buf);
@@ -2597,14 +2645,15 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
                        key.offset -= btrfs_file_extent_offset(buf, fi);
                        ret = process_func(trans, root, bytenr, num_bytes,
                                           parent, ref_root, key.objectid,
-                                          key.offset);
+                                          key.offset, for_cow);
                        if (ret)
                                goto fail;
                } else {
                        bytenr = btrfs_node_blockptr(buf, i);
                        num_bytes = btrfs_level_size(root, level - 1);
                        ret = process_func(trans, root, bytenr, num_bytes,
-                                          parent, ref_root, level - 1, 0);
+                                          parent, ref_root, level - 1, 0,
+                                          for_cow);
                        if (ret)
                                goto fail;
                }
@@ -2616,15 +2665,15 @@ fail:
 }
 
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                 struct extent_buffer *buf, int full_backref)
+                 struct extent_buffer *buf, int full_backref, int for_cow)
 {
-       return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
+       return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
 }
 
 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                 struct extent_buffer *buf, int full_backref)
+                 struct extent_buffer *buf, int full_backref, int for_cow)
 {
-       return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
+       return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
 }
 
 static int write_one_cache_group(struct btrfs_trans_handle *trans,
@@ -2716,6 +2765,13 @@ again:
                goto again;
        }
 
+       /* We've already setup this transaction, go ahead and exit */
+       if (block_group->cache_generation == trans->transid &&
+           i_size_read(inode)) {
+               dcs = BTRFS_DC_SETUP;
+               goto out_put;
+       }
+
        /*
         * We want to set the generation to 0, that way if anything goes wrong
         * from here on out we know not to trust this cache when we load up next
@@ -2765,12 +2821,15 @@ again:
        if (!ret)
                dcs = BTRFS_DC_SETUP;
        btrfs_free_reserved_data_space(inode, num_pages);
+
 out_put:
        iput(inode);
 out_free:
        btrfs_release_path(path);
 out:
        spin_lock(&block_group->lock);
+       if (!ret)
+               block_group->cache_generation = trans->transid;
        block_group->disk_cache_state = dcs;
        spin_unlock(&block_group->lock);
 
@@ -3197,7 +3256,7 @@ static int should_alloc_chunk(struct btrfs_root *root,
         * about 1% of the FS size.
         */
        if (force == CHUNK_ALLOC_LIMITED) {
-               thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+               thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
                thresh = max_t(u64, 64 * 1024 * 1024,
                               div_factor_fine(thresh, 1));
 
@@ -3219,7 +3278,7 @@ static int should_alloc_chunk(struct btrfs_root *root,
        if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
                return 0;
 
-       thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+       thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
 
        /* 256MB or 5% of the FS */
        thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
@@ -3322,19 +3381,21 @@ out:
 /*
  * shrink metadata reservation for delalloc
  */
-static int shrink_delalloc(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root, u64 to_reclaim, int sync)
+static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
+                          bool wait_ordered)
 {
        struct btrfs_block_rsv *block_rsv;
        struct btrfs_space_info *space_info;
+       struct btrfs_trans_handle *trans;
        u64 reserved;
        u64 max_reclaim;
        u64 reclaimed = 0;
        long time_left;
-       int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
+       unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
        int loops = 0;
        unsigned long progress;
 
+       trans = (struct btrfs_trans_handle *)current->journal_info;
        block_rsv = &root->fs_info->delalloc_block_rsv;
        space_info = block_rsv->space_info;
 
@@ -3354,7 +3415,8 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
        }
 
        max_reclaim = min(reserved, to_reclaim);
-
+       nr_pages = max_t(unsigned long, nr_pages,
+                        max_reclaim >> PAGE_CACHE_SHIFT);
        while (loops < 1024) {
                /* have the flusher threads jump in and do some IO */
                smp_mb();
@@ -3376,11 +3438,15 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
                if (trans && trans->transaction->blocked)
                        return -EAGAIN;
 
-               time_left = schedule_timeout_interruptible(1);
+               if (wait_ordered && !trans) {
+                       btrfs_wait_ordered_extents(root, 0, 0);
+               } else {
+                       time_left = schedule_timeout_interruptible(1);
 
-               /* We were interrupted, exit */
-               if (time_left)
-                       break;
+                       /* We were interrupted, exit */
+                       if (time_left)
+                               break;
+               }
 
                /* we've kicked the IO a few times, if anything has been freed,
                 * exit.  There is no sense in looping here for a long time
@@ -3395,34 +3461,91 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
                }
 
        }
-       if (reclaimed >= to_reclaim && !trans)
-               btrfs_wait_ordered_extents(root, 0, 0);
+
        return reclaimed >= to_reclaim;
 }
 
-/*
- * Retries tells us how many times we've called reserve_metadata_bytes.  The
- * idea is if this is the first call (retries == 0) then we will add to our
- * reserved count if we can't make the allocation in order to hold our place
- * while we go and try and free up space.  That way for retries > 1 we don't try
- * and add space, we just check to see if the amount of unused space is >= the
- * total space, meaning that our reservation is valid.
+/**
+ * maybe_commit_transaction - possibly commit the transaction if its ok to
+ * @root - the root we're allocating for
+ * @bytes - the number of bytes we want to reserve
+ * @force - force the commit
  *
- * However if we don't intend to retry this reservation, pass -1 as retries so
- * that it short circuits this logic.
+ * This will check to make sure that committing the transaction will actually
+ * get us somewhere and then commit the transaction if it does.  Otherwise it
+ * will return -ENOSPC.
  */
-static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
-                                 struct btrfs_root *root,
+static int may_commit_transaction(struct btrfs_root *root,
+                                 struct btrfs_space_info *space_info,
+                                 u64 bytes, int force)
+{
+       struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
+       struct btrfs_trans_handle *trans;
+
+       trans = (struct btrfs_trans_handle *)current->journal_info;
+       if (trans)
+               return -EAGAIN;
+
+       if (force)
+               goto commit;
+
+       /* See if there is enough pinned space to make this reservation */
+       spin_lock(&space_info->lock);
+       if (space_info->bytes_pinned >= bytes) {
+               spin_unlock(&space_info->lock);
+               goto commit;
+       }
+       spin_unlock(&space_info->lock);
+
+       /*
+        * See if there is some space in the delayed insertion reservation for
+        * this reservation.
+        */
+       if (space_info != delayed_rsv->space_info)
+               return -ENOSPC;
+
+       spin_lock(&delayed_rsv->lock);
+       if (delayed_rsv->size < bytes) {
+               spin_unlock(&delayed_rsv->lock);
+               return -ENOSPC;
+       }
+       spin_unlock(&delayed_rsv->lock);
+
+commit:
+       trans = btrfs_join_transaction(root);
+       if (IS_ERR(trans))
+               return -ENOSPC;
+
+       return btrfs_commit_transaction(trans, root);
+}
+
+/**
+ * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
+ * @root - the root we're allocating for
+ * @block_rsv - the block_rsv we're allocating for
+ * @orig_bytes - the number of bytes we want
+ * @flush - wether or not we can flush to make our reservation
+ *
+ * This will reserve orgi_bytes number of bytes from the space info associated
+ * with the block_rsv.  If there is not enough space it will make an attempt to
+ * flush out space to make room.  It will do this by flushing delalloc if
+ * possible or committing the transaction.  If flush is 0 then no attempts to
+ * regain reservations will be made and this will fail if there is not enough
+ * space already.
+ */
+static int reserve_metadata_bytes(struct btrfs_root *root,
                                  struct btrfs_block_rsv *block_rsv,
                                  u64 orig_bytes, int flush)
 {
        struct btrfs_space_info *space_info = block_rsv->space_info;
-       u64 unused;
+       u64 used;
        u64 num_bytes = orig_bytes;
        int retries = 0;
        int ret = 0;
        bool committed = false;
        bool flushing = false;
+       bool wait_ordered = false;
+
 again:
        ret = 0;
        spin_lock(&space_info->lock);
@@ -3438,7 +3561,7 @@ again:
                 * deadlock since we are waiting for the flusher to finish, but
                 * hold the current transaction open.
                 */
-               if (trans)
+               if (current->journal_info)
                        return -EAGAIN;
                ret = wait_event_interruptible(space_info->wait,
                                               !space_info->flush);
@@ -3450,9 +3573,9 @@ again:
        }
 
        ret = -ENOSPC;
-       unused = space_info->bytes_used + space_info->bytes_reserved +
-                space_info->bytes_pinned + space_info->bytes_readonly +
-                space_info->bytes_may_use;
+       used = space_info->bytes_used + space_info->bytes_reserved +
+               space_info->bytes_pinned + space_info->bytes_readonly +
+               space_info->bytes_may_use;
 
        /*
         * The idea here is that we've not already over-reserved the block group
@@ -3461,9 +3584,8 @@ again:
         * lets start flushing stuff first and then come back and try to make
         * our reservation.
         */
-       if (unused <= space_info->total_bytes) {
-               unused = space_info->total_bytes - unused;
-               if (unused >= num_bytes) {
+       if (used <= space_info->total_bytes) {
+               if (used + orig_bytes <= space_info->total_bytes) {
                        space_info->bytes_may_use += orig_bytes;
                        ret = 0;
                } else {
@@ -3480,10 +3602,64 @@ again:
                 * amount plus the amount of bytes that we need for this
                 * reservation.
                 */
-               num_bytes = unused - space_info->total_bytes +
+               wait_ordered = true;
+               num_bytes = used - space_info->total_bytes +
                        (orig_bytes * (retries + 1));
        }
 
+       if (ret) {
+               u64 profile = btrfs_get_alloc_profile(root, 0);
+               u64 avail;
+
+               /*
+                * If we have a lot of space that's pinned, don't bother doing
+                * the overcommit dance yet and just commit the transaction.
+                */
+               avail = (space_info->total_bytes - space_info->bytes_used) * 8;
+               do_div(avail, 10);
+               if (space_info->bytes_pinned >= avail && flush && !committed) {
+                       space_info->flush = 1;
+                       flushing = true;
+                       spin_unlock(&space_info->lock);
+                       ret = may_commit_transaction(root, space_info,
+                                                    orig_bytes, 1);
+                       if (ret)
+                               goto out;
+                       committed = true;
+                       goto again;
+               }
+
+               spin_lock(&root->fs_info->free_chunk_lock);
+               avail = root->fs_info->free_chunk_space;
+
+               /*
+                * If we have dup, raid1 or raid10 then only half of the free
+                * space is actually useable.
+                */
+               if (profile & (BTRFS_BLOCK_GROUP_DUP |
+                              BTRFS_BLOCK_GROUP_RAID1 |
+                              BTRFS_BLOCK_GROUP_RAID10))
+                       avail >>= 1;
+
+               /*
+                * If we aren't flushing don't let us overcommit too much, say
+                * 1/8th of the space.  If we can flush, let it overcommit up to
+                * 1/2 of the space.
+                */
+               if (flush)
+                       avail >>= 3;
+               else
+                       avail >>= 1;
+                spin_unlock(&root->fs_info->free_chunk_lock);
+
+               if (used + num_bytes < space_info->total_bytes + avail) {
+                       space_info->bytes_may_use += orig_bytes;
+                       ret = 0;
+               } else {
+                       wait_ordered = true;
+               }
+       }
+
        /*
         * Couldn't make our reservation, save our place so while we're trying
         * to reclaim space we can actually use it instead of somebody else
@@ -3503,7 +3679,7 @@ again:
         * We do synchronous shrinking since we don't actually unreserve
         * metadata until after the IO is completed.
         */
-       ret = shrink_delalloc(trans, root, num_bytes, 1);
+       ret = shrink_delalloc(root, num_bytes, wait_ordered);
        if (ret < 0)
                goto out;
 
@@ -3515,35 +3691,17 @@ again:
         * so go back around and try again.
         */
        if (retries < 2) {
+               wait_ordered = true;
                retries++;
                goto again;
        }
 
-       /*
-        * Not enough space to be reclaimed, don't bother committing the
-        * transaction.
-        */
-       spin_lock(&space_info->lock);
-       if (space_info->bytes_pinned < orig_bytes)
-               ret = -ENOSPC;
-       spin_unlock(&space_info->lock);
-       if (ret)
-               goto out;
-
-       ret = -EAGAIN;
-       if (trans)
-               goto out;
-
        ret = -ENOSPC;
        if (committed)
                goto out;
 
-       trans = btrfs_join_transaction(root);
-       if (IS_ERR(trans))
-               goto out;
-       ret = btrfs_commit_transaction(trans, root);
+       ret = may_commit_transaction(root, space_info, orig_bytes, 0);
        if (!ret) {
-               trans = NULL;
                committed = true;
                goto again;
        }
@@ -3561,10 +3719,12 @@ out:
 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
                                             struct btrfs_root *root)
 {
-       struct btrfs_block_rsv *block_rsv;
-       if (root->ref_cows)
+       struct btrfs_block_rsv *block_rsv = NULL;
+
+       if (root->ref_cows || root == root->fs_info->csum_root)
                block_rsv = trans->block_rsv;
-       else
+
+       if (!block_rsv)
                block_rsv = root->block_rsv;
 
        if (!block_rsv)
@@ -3659,8 +3819,6 @@ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
 {
        memset(rsv, 0, sizeof(*rsv));
        spin_lock_init(&rsv->lock);
-       atomic_set(&rsv->usage, 1);
-       rsv->priority = 6;
 }
 
 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
@@ -3681,23 +3839,20 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
 void btrfs_free_block_rsv(struct btrfs_root *root,
                          struct btrfs_block_rsv *rsv)
 {
-       if (rsv && atomic_dec_and_test(&rsv->usage)) {
-               btrfs_block_rsv_release(root, rsv, (u64)-1);
-               kfree(rsv);
-       }
+       btrfs_block_rsv_release(root, rsv, (u64)-1);
+       kfree(rsv);
 }
 
-int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
-                       struct btrfs_root *root,
-                       struct btrfs_block_rsv *block_rsv,
-                       u64 num_bytes)
+static inline int __block_rsv_add(struct btrfs_root *root,
+                                 struct btrfs_block_rsv *block_rsv,
+                                 u64 num_bytes, int flush)
 {
        int ret;
 
        if (num_bytes == 0)
                return 0;
 
-       ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
+       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
        if (!ret) {
                block_rsv_add_bytes(block_rsv, num_bytes, 1);
                return 0;
@@ -3706,63 +3861,80 @@ int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
        return ret;
 }
 
-int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
-                         struct btrfs_root *root,
-                         struct btrfs_block_rsv *block_rsv,
-                         u64 min_reserved, int min_factor)
+int btrfs_block_rsv_add(struct btrfs_root *root,
+                       struct btrfs_block_rsv *block_rsv,
+                       u64 num_bytes)
+{
+       return __block_rsv_add(root, block_rsv, num_bytes, 1);
+}
+
+int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
+                               struct btrfs_block_rsv *block_rsv,
+                               u64 num_bytes)
+{
+       return __block_rsv_add(root, block_rsv, num_bytes, 0);
+}
+
+int btrfs_block_rsv_check(struct btrfs_root *root,
+                         struct btrfs_block_rsv *block_rsv, int min_factor)
 {
        u64 num_bytes = 0;
-       int commit_trans = 0;
        int ret = -ENOSPC;
 
        if (!block_rsv)
                return 0;
 
        spin_lock(&block_rsv->lock);
-       if (min_factor > 0)
-               num_bytes = div_factor(block_rsv->size, min_factor);
-       if (min_reserved > num_bytes)
-               num_bytes = min_reserved;
+       num_bytes = div_factor(block_rsv->size, min_factor);
+       if (block_rsv->reserved >= num_bytes)
+               ret = 0;
+       spin_unlock(&block_rsv->lock);
 
-       if (block_rsv->reserved >= num_bytes) {
+       return ret;
+}
+
+static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
+                                          struct btrfs_block_rsv *block_rsv,
+                                          u64 min_reserved, int flush)
+{
+       u64 num_bytes = 0;
+       int ret = -ENOSPC;
+
+       if (!block_rsv)
+               return 0;
+
+       spin_lock(&block_rsv->lock);
+       num_bytes = min_reserved;
+       if (block_rsv->reserved >= num_bytes)
                ret = 0;
-       } else {
+       else
                num_bytes -= block_rsv->reserved;
-               commit_trans = 1;
-       }
        spin_unlock(&block_rsv->lock);
+
        if (!ret)
                return 0;
 
-       if (block_rsv->refill_used) {
-               ret = reserve_metadata_bytes(trans, root, block_rsv,
-                                            num_bytes, 0);
-               if (!ret) {
-                       block_rsv_add_bytes(block_rsv, num_bytes, 0);
-                       return 0;
-               }
+       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
+       if (!ret) {
+               block_rsv_add_bytes(block_rsv, num_bytes, 0);
+               return 0;
        }
 
-       if (commit_trans) {
-               struct btrfs_space_info *sinfo = block_rsv->space_info;
-
-               if (trans)
-                       return -EAGAIN;
-
-               spin_lock(&sinfo->lock);
-               if (sinfo->bytes_pinned < num_bytes) {
-                       spin_unlock(&sinfo->lock);
-                       return -ENOSPC;
-               }
-               spin_unlock(&sinfo->lock);
+       return ret;
+}
 
-               trans = btrfs_join_transaction(root);
-               BUG_ON(IS_ERR(trans));
-               ret = btrfs_commit_transaction(trans, root);
-               return 0;
-       }
+int btrfs_block_rsv_refill(struct btrfs_root *root,
+                          struct btrfs_block_rsv *block_rsv,
+                          u64 min_reserved)
+{
+       return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
+}
 
-       return -ENOSPC;
+int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
+                                  struct btrfs_block_rsv *block_rsv,
+                                  u64 min_reserved)
+{
+       return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
 }
 
 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
@@ -3794,7 +3966,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
        u64 num_bytes;
        u64 meta_used;
        u64 data_used;
-       int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
+       int csum_size = btrfs_super_csum_size(fs_info->super_copy);
 
        sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
        spin_lock(&sinfo->lock);
@@ -3859,16 +4031,13 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
 
        space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
        fs_info->chunk_block_rsv.space_info = space_info;
-       fs_info->chunk_block_rsv.priority = 10;
 
        space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
        fs_info->global_block_rsv.space_info = space_info;
-       fs_info->global_block_rsv.priority = 10;
-       fs_info->global_block_rsv.refill_used = 1;
        fs_info->delalloc_block_rsv.space_info = space_info;
        fs_info->trans_block_rsv.space_info = space_info;
        fs_info->empty_block_rsv.space_info = space_info;
-       fs_info->empty_block_rsv.priority = 10;
+       fs_info->delayed_block_rsv.space_info = space_info;
 
        fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
        fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
@@ -3888,37 +4057,8 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
        WARN_ON(fs_info->trans_block_rsv.reserved > 0);
        WARN_ON(fs_info->chunk_block_rsv.size > 0);
        WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
-}
-
-int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
-                                   struct btrfs_root *root,
-                                   struct btrfs_block_rsv *rsv)
-{
-       struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
-       u64 num_bytes;
-       int ret;
-
-       /*
-        * Truncate should be freeing data, but give us 2 items just in case it
-        * needs to use some space.  We may want to be smarter about this in the
-        * future.
-        */
-       num_bytes = btrfs_calc_trans_metadata_size(root, 2);
-
-       /* We already have enough bytes, just return */
-       if (rsv->reserved >= num_bytes)
-               return 0;
-
-       num_bytes -= rsv->reserved;
-
-       /*
-        * You should have reserved enough space before hand to do this, so this
-        * should not fail.
-        */
-       ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
-       BUG_ON(ret);
-
-       return 0;
+       WARN_ON(fs_info->delayed_block_rsv.size > 0);
+       WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
 }
 
 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
@@ -3927,9 +4067,7 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
        if (!trans->bytes_reserved)
                return;
 
-       BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
-       btrfs_block_rsv_release(root, trans->block_rsv,
-                               trans->bytes_reserved);
+       btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
        trans->bytes_reserved = 0;
 }
 
@@ -3982,23 +4120,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
  */
 static unsigned drop_outstanding_extent(struct inode *inode)
 {
+       unsigned drop_inode_space = 0;
        unsigned dropped_extents = 0;
 
        BUG_ON(!BTRFS_I(inode)->outstanding_extents);
        BTRFS_I(inode)->outstanding_extents--;
 
+       if (BTRFS_I(inode)->outstanding_extents == 0 &&
+           BTRFS_I(inode)->delalloc_meta_reserved) {
+               drop_inode_space = 1;
+               BTRFS_I(inode)->delalloc_meta_reserved = 0;
+       }
+
        /*
         * If we have more or the same amount of outsanding extents than we have
         * reserved then we need to leave the reserved extents count alone.
         */
        if (BTRFS_I(inode)->outstanding_extents >=
            BTRFS_I(inode)->reserved_extents)
-               return 0;
+               return drop_inode_space;
 
        dropped_extents = BTRFS_I(inode)->reserved_extents -
                BTRFS_I(inode)->outstanding_extents;
        BTRFS_I(inode)->reserved_extents -= dropped_extents;
-       return dropped_extents;
+       return dropped_extents + drop_inode_space;
 }
 
 /**
@@ -4065,9 +4210,13 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
        u64 to_reserve = 0;
        unsigned nr_extents = 0;
+       int flush = 1;
        int ret;
 
-       if (btrfs_transaction_in_commit(root->fs_info))
+       if (btrfs_is_free_space_inode(root, inode))
+               flush = 0;
+
+       if (flush && btrfs_transaction_in_commit(root->fs_info))
                schedule_timeout(1);
 
        num_bytes = ALIGN(num_bytes, root->sectorsize);
@@ -4080,24 +4229,41 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
                nr_extents = BTRFS_I(inode)->outstanding_extents -
                        BTRFS_I(inode)->reserved_extents;
                BTRFS_I(inode)->reserved_extents += nr_extents;
+       }
 
-               to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
+       /*
+        * Add an item to reserve for updating the inode when we complete the
+        * delalloc io.
+        */
+       if (!BTRFS_I(inode)->delalloc_meta_reserved) {
+               nr_extents++;
+               BTRFS_I(inode)->delalloc_meta_reserved = 1;
        }
+
+       to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
        to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
        spin_unlock(&BTRFS_I(inode)->lock);
 
-       ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
+       ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
        if (ret) {
+               u64 to_free = 0;
                unsigned dropped;
-               /*
-                * We don't need the return value since our reservation failed,
-                * we just need to clean up our counter.
-                */
+
                spin_lock(&BTRFS_I(inode)->lock);
                dropped = drop_outstanding_extent(inode);
-               WARN_ON(dropped > 1);
-               BTRFS_I(inode)->csum_bytes -= num_bytes;
+               to_free = calc_csum_metadata_size(inode, num_bytes, 0);
                spin_unlock(&BTRFS_I(inode)->lock);
+               to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
+               /*
+                * Somebody could have come in and twiddled with the
+                * reservation, so if we have to free more than we would have
+                * reserved from this reservation go ahead and release those
+                * bytes.
+                */
+               to_free -= to_reserve;
+               if (to_free)
+                       btrfs_block_rsv_release(root, block_rsv, to_free);
                return ret;
        }
 
@@ -4198,12 +4364,12 @@ static int update_block_group(struct btrfs_trans_handle *trans,
 
        /* block accounting for super block */
        spin_lock(&info->delalloc_lock);
-       old_val = btrfs_super_bytes_used(&info->super_copy);
+       old_val = btrfs_super_bytes_used(info->super_copy);
        if (alloc)
                old_val += num_bytes;
        else
                old_val -= num_bytes;
-       btrfs_set_super_bytes_used(&info->super_copy, old_val);
+       btrfs_set_super_bytes_used(info->super_copy, old_val);
        spin_unlock(&info->delalloc_lock);
 
        while (total) {
@@ -4231,7 +4397,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
                spin_lock(&cache->space_info->lock);
                spin_lock(&cache->lock);
 
-               if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
+               if (btrfs_test_opt(root, SPACE_CACHE) &&
                    cache->disk_cache_state < BTRFS_DC_CLEAR)
                        cache->disk_cache_state = BTRFS_DC_CLEAR;
 
@@ -4320,6 +4486,34 @@ int btrfs_pin_extent(struct btrfs_root *root,
        return 0;
 }
 
+/*
+ * this function must be called within transaction
+ */
+int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
+                                   struct btrfs_root *root,
+                                   u64 bytenr, u64 num_bytes)
+{
+       struct btrfs_block_group_cache *cache;
+
+       cache = btrfs_lookup_block_group(root->fs_info, bytenr);
+       BUG_ON(!cache);
+
+       /*
+        * pull in the free space cache (if any) so that our pin
+        * removes the free space from the cache.  We have load_only set
+        * to one because the slow code to read in the free extents does check
+        * the pinned extents.
+        */
+       cache_block_group(cache, trans, root, 1);
+
+       pin_down_extent(root, cache, bytenr, num_bytes, 0);
+
+       /* remove us from the free space cache (if we're there at all) */
+       btrfs_remove_free_space(cache, bytenr, num_bytes);
+       btrfs_put_block_group(cache);
+       return 0;
+}
+
 /**
  * btrfs_update_reserved_bytes - update the block_group and space info counters
  * @cache:     The cache we are manipulating
@@ -4749,27 +4943,24 @@ out:
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          u64 parent, int last_ref)
+                          u64 parent, int last_ref, int for_cow)
 {
-       struct btrfs_block_rsv *block_rsv;
        struct btrfs_block_group_cache *cache = NULL;
        int ret;
 
        if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
-               ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
-                                               parent, root->root_key.objectid,
-                                               btrfs_header_level(buf),
-                                               BTRFS_DROP_DELAYED_REF, NULL);
+               ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
+                                       buf->start, buf->len,
+                                       parent, root->root_key.objectid,
+                                       btrfs_header_level(buf),
+                                       BTRFS_DROP_DELAYED_REF, NULL, for_cow);
                BUG_ON(ret);
        }
 
        if (!last_ref)
                return;
 
-       block_rsv = get_block_rsv(trans, root);
        cache = btrfs_lookup_block_group(root->fs_info, buf->start);
-       if (block_rsv->space_info != cache->space_info)
-               goto out;
 
        if (btrfs_header_generation(buf) == trans->transid) {
                if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
@@ -4797,12 +4988,12 @@ out:
        btrfs_put_block_group(cache);
 }
 
-int btrfs_free_extent(struct btrfs_trans_handle *trans,
-                     struct btrfs_root *root,
-                     u64 bytenr, u64 num_bytes, u64 parent,
-                     u64 root_objectid, u64 owner, u64 offset)
+int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+                     u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
+                     u64 owner, u64 offset, int for_cow)
 {
        int ret;
+       struct btrfs_fs_info *fs_info = root->fs_info;
 
        /*
         * tree log blocks never actually go into the extent allocation
@@ -4814,14 +5005,17 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
                btrfs_pin_extent(root, bytenr, num_bytes, 1);
                ret = 0;
        } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
-               ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
+               ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+                                       num_bytes,
                                        parent, root_objectid, (int)owner,
-                                       BTRFS_DROP_DELAYED_REF, NULL);
+                                       BTRFS_DROP_DELAYED_REF, NULL, for_cow);
                BUG_ON(ret);
        } else {
-               ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
-                                       parent, root_objectid, owner,
-                                       offset, BTRFS_DROP_DELAYED_REF, NULL);
+               ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+                                               num_bytes,
+                                               parent, root_objectid, owner,
+                                               offset, BTRFS_DROP_DELAYED_REF,
+                                               NULL, for_cow);
                BUG_ON(ret);
        }
        return ret;
@@ -4935,6 +5129,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        bool failed_cluster_refill = false;
        bool failed_alloc = false;
        bool use_cluster = true;
+       bool have_caching_bg = false;
        u64 ideal_cache_percent = 0;
        u64 ideal_cache_offset = 0;
 
@@ -5017,6 +5212,7 @@ ideal_cache:
                }
        }
 search:
+       have_caching_bg = false;
        down_read(&space_info->groups_sem);
        list_for_each_entry(block_group, &space_info->block_groups[index],
                            list) {
@@ -5046,13 +5242,15 @@ search:
                }
 
 have_block_group:
-               if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
+               cached = block_group_cache_done(block_group);
+               if (unlikely(!cached)) {
                        u64 free_percent;
 
+                       found_uncached_bg = true;
                        ret = cache_block_group(block_group, trans,
                                                orig_root, 1);
                        if (block_group->cached == BTRFS_CACHE_FINISHED)
-                               goto have_block_group;
+                               goto alloc;
 
                        free_percent = btrfs_block_group_used(&block_group->item);
                        free_percent *= 100;
@@ -5074,7 +5272,6 @@ have_block_group:
                                                        orig_root, 0);
                                BUG_ON(ret);
                        }
-                       found_uncached_bg = true;
 
                        /*
                         * If loop is set for cached only, try the next block
@@ -5084,17 +5281,14 @@ have_block_group:
                                goto loop;
                }
 
-               cached = block_group_cache_done(block_group);
-               if (unlikely(!cached))
-                       found_uncached_bg = true;
-
+alloc:
                if (unlikely(block_group->ro))
                        goto loop;
 
                spin_lock(&block_group->free_space_ctl->tree_lock);
                if (cached &&
                    block_group->free_space_ctl->free_space <
-                   num_bytes + empty_size) {
+                   num_bytes + empty_cluster + empty_size) {
                        spin_unlock(&block_group->free_space_ctl->tree_lock);
                        goto loop;
                }
@@ -5115,12 +5309,10 @@ have_block_group:
                         * people trying to start a new cluster
                         */
                        spin_lock(&last_ptr->refill_lock);
-                       if (last_ptr->block_group &&
-                           (last_ptr->block_group->ro ||
-                           !block_group_bits(last_ptr->block_group, data))) {
-                               offset = 0;
+                       if (!last_ptr->block_group ||
+                           last_ptr->block_group->ro ||
+                           !block_group_bits(last_ptr->block_group, data))
                                goto refill_cluster;
-                       }
 
                        offset = btrfs_alloc_from_cluster(block_group, last_ptr,
                                                 num_bytes, search_start);
@@ -5171,7 +5363,7 @@ refill_cluster:
                        /* allocate a cluster in this block group */
                        ret = btrfs_find_space_cluster(trans, root,
                                               block_group, last_ptr,
-                                              offset, num_bytes,
+                                              search_start, num_bytes,
                                               empty_cluster + empty_size);
                        if (ret == 0) {
                                /*
@@ -5225,6 +5417,8 @@ refill_cluster:
                        failed_alloc = true;
                        goto have_block_group;
                } else if (!offset) {
+                       if (!cached)
+                               have_caching_bg = true;
                        goto loop;
                }
 checks:
@@ -5275,6 +5469,9 @@ loop:
        }
        up_read(&space_info->groups_sem);
 
+       if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
+               goto search;
+
        if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
                goto search;
 
@@ -5460,7 +5657,8 @@ again:
        return ret;
 }
 
-int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
+static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+                                       u64 start, u64 len, int pin)
 {
        struct btrfs_block_group_cache *cache;
        int ret = 0;
@@ -5475,8 +5673,12 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
        if (btrfs_test_opt(root, DISCARD))
                ret = btrfs_discard_extent(root, start, len, NULL);
 
-       btrfs_add_free_space(cache, start, len);
-       btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
+       if (pin)
+               pin_down_extent(root, cache, start, len, 1);
+       else {
+               btrfs_add_free_space(cache, start, len);
+               btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
+       }
        btrfs_put_block_group(cache);
 
        trace_btrfs_reserved_extent_free(root, start, len);
@@ -5484,6 +5686,18 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
        return ret;
 }
 
+int btrfs_free_reserved_extent(struct btrfs_root *root,
+                                       u64 start, u64 len)
+{
+       return __btrfs_free_reserved_extent(root, start, len, 0);
+}
+
+int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
+                                      u64 start, u64 len)
+{
+       return __btrfs_free_reserved_extent(root, start, len, 1);
+}
+
 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
                                      struct btrfs_root *root,
                                      u64 parent, u64 root_objectid,
@@ -5622,9 +5836,10 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
 
        BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
 
-       ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
-                                        0, root_objectid, owner, offset,
-                                        BTRFS_ADD_DELAYED_EXTENT, NULL);
+       ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
+                                        ins->offset, 0,
+                                        root_objectid, owner, offset,
+                                        BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
        return ret;
 }
 
@@ -5737,8 +5952,7 @@ use_block_rsv(struct btrfs_trans_handle *trans,
        block_rsv = get_block_rsv(trans, root);
 
        if (block_rsv->size == 0) {
-               ret = reserve_metadata_bytes(trans, root, block_rsv,
-                                            blocksize, 0);
+               ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
                /*
                 * If we couldn't reserve metadata bytes try and use some from
                 * the global reserve.
@@ -5758,13 +5972,15 @@ use_block_rsv(struct btrfs_trans_handle *trans,
        if (!ret)
                return block_rsv;
        if (ret) {
-               WARN_ON(1);
-               ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
-                                            0);
+               static DEFINE_RATELIMIT_STATE(_rs,
+                               DEFAULT_RATELIMIT_INTERVAL,
+                               /*DEFAULT_RATELIMIT_BURST*/ 2);
+               if (__ratelimit(&_rs)) {
+                       printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
+                       WARN_ON(1);
+               }
+               ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
                if (!ret) {
-                       spin_lock(&block_rsv->lock);
-                       block_rsv->size += blocksize;
-                       spin_unlock(&block_rsv->lock);
                        return block_rsv;
                } else if (ret && block_rsv != global_rsv) {
                        ret = block_rsv_use_bytes(global_rsv, blocksize);
@@ -5793,7 +6009,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        struct btrfs_root *root, u32 blocksize,
                                        u64 parent, u64 root_objectid,
                                        struct btrfs_disk_key *key, int level,
-                                       u64 hint, u64 empty_size)
+                                       u64 hint, u64 empty_size, int for_cow)
 {
        struct btrfs_key ins;
        struct btrfs_block_rsv *block_rsv;
@@ -5837,10 +6053,11 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                extent_op->update_flags = 1;
                extent_op->is_data = 0;
 
-               ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
+               ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
+                                       ins.objectid,
                                        ins.offset, parent, root_objectid,
                                        level, BTRFS_ADD_DELAYED_EXTENT,
-                                       extent_op);
+                                       extent_op, for_cow);
                BUG_ON(ret);
        }
        return buf;
@@ -5857,6 +6074,7 @@ struct walk_control {
        int keep_locks;
        int reada_slot;
        int reada_count;
+       int for_reloc;
 };
 
 #define DROP_REFERENCE 1
@@ -5995,9 +6213,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
        /* wc->stage == UPDATE_BACKREF */
        if (!(wc->flags[level] & flag)) {
                BUG_ON(!path->locks[level]);
-               ret = btrfs_inc_ref(trans, root, eb, 1);
+               ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
                BUG_ON(ret);
-               ret = btrfs_dec_ref(trans, root, eb, 0);
+               ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
                BUG_ON(ret);
                ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
                                                  eb->len, flag, 0);
@@ -6141,7 +6359,7 @@ skip:
                }
 
                ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
-                                       root->root_key.objectid, level - 1, 0);
+                               root->root_key.objectid, level - 1, 0, 0);
                BUG_ON(ret);
        }
        btrfs_tree_unlock(next);
@@ -6215,9 +6433,11 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
        if (wc->refs[level] == 1) {
                if (level == 0) {
                        if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
-                               ret = btrfs_dec_ref(trans, root, eb, 1);
+                               ret = btrfs_dec_ref(trans, root, eb, 1,
+                                                   wc->for_reloc);
                        else
-                               ret = btrfs_dec_ref(trans, root, eb, 0);
+                               ret = btrfs_dec_ref(trans, root, eb, 0,
+                                                   wc->for_reloc);
                        BUG_ON(ret);
                }
                /* make block locked assertion in clean_tree_block happy */
@@ -6244,7 +6464,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                               btrfs_header_owner(path->nodes[level + 1]));
        }
 
-       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
+       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
 out:
        wc->refs[level] = 0;
        wc->flags[level] = 0;
@@ -6328,7 +6548,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  * blocks are properly updated.
  */
 void btrfs_drop_snapshot(struct btrfs_root *root,
-                        struct btrfs_block_rsv *block_rsv, int update_ref)
+                        struct btrfs_block_rsv *block_rsv, int update_ref,
+                        int for_reloc)
 {
        struct btrfs_path *path;
        struct btrfs_trans_handle *trans;
@@ -6416,6 +6637,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
        wc->stage = DROP_REFERENCE;
        wc->update_ref = update_ref;
        wc->keep_locks = 0;
+       wc->for_reloc = for_reloc;
        wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
 
        while (1) {
@@ -6500,6 +6722,7 @@ out:
  * drop subtree rooted at tree block 'node'.
  *
  * NOTE: this function will unlock and release tree block 'node'
+ * only used by relocation code
  */
 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
                        struct btrfs_root *root,
@@ -6544,6 +6767,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
        wc->stage = DROP_REFERENCE;
        wc->update_ref = 0;
        wc->keep_locks = 1;
+       wc->for_reloc = 1;
        wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
 
        while (1) {
@@ -7054,14 +7278,12 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                return -ENOMEM;
        path->reada = 1;
 
-       cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
-       if (cache_gen != 0 &&
-           btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
+       cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
+       if (btrfs_test_opt(root, SPACE_CACHE) &&
+           btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
                need_clear = 1;
        if (btrfs_test_opt(root, CLEAR_CACHE))
                need_clear = 1;
-       if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
-               printk(KERN_INFO "btrfs: disk space caching is enabled\n");
 
        while (1) {
                ret = find_first_block_group(root, path, &key);
@@ -7300,7 +7522,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                goto out;
        }
 
-       inode = lookup_free_space_inode(root, block_group, path);
+       inode = lookup_free_space_inode(tree_root, block_group, path);
        if (!IS_ERR(inode)) {
                ret = btrfs_orphan_add(trans, inode);
                BUG_ON(ret);
@@ -7316,7 +7538,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                        spin_unlock(&block_group->lock);
                }
                /* One for our lookup ref */
-               iput(inode);
+               btrfs_add_delayed_iput(inode);
        }
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -7387,7 +7609,7 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
        int mixed = 0;
        int ret;
 
-       disk_super = &fs_info->super_copy;
+       disk_super = fs_info->super_copy;
        if (!btrfs_super_root(disk_super))
                return 1;