]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/btrfs/extent-tree.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[karo-tx-linux.git] / fs / btrfs / extent-tree.c
index 23e936c3de76aaed14cb364c8dae89049ae3ea63..930ae8949737313a9cabfaa3de787c844bdecdf6 100644 (file)
@@ -467,13 +467,59 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
                             struct btrfs_root *root,
                             int load_cache_only)
 {
+       DEFINE_WAIT(wait);
        struct btrfs_fs_info *fs_info = cache->fs_info;
        struct btrfs_caching_control *caching_ctl;
        int ret = 0;
 
-       smp_mb();
-       if (cache->cached != BTRFS_CACHE_NO)
+       caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
+       BUG_ON(!caching_ctl);
+
+       INIT_LIST_HEAD(&caching_ctl->list);
+       mutex_init(&caching_ctl->mutex);
+       init_waitqueue_head(&caching_ctl->wait);
+       caching_ctl->block_group = cache;
+       caching_ctl->progress = cache->key.objectid;
+       atomic_set(&caching_ctl->count, 1);
+       caching_ctl->work.func = caching_thread;
+
+       spin_lock(&cache->lock);
+       /*
+        * This should be a rare occasion, but this could happen I think in the
+        * case where one thread starts to load the space cache info, and then
+        * some other thread starts a transaction commit which tries to do an
+        * allocation while the other thread is still loading the space cache
+        * info.  The previous loop should have kept us from choosing this block
+        * group, but if we've moved to the state where we will wait on caching
+        * block groups we need to first check if we're doing a fast load here,
+        * so we can wait for it to finish, otherwise we could end up allocating
+        * from a block group who's cache gets evicted for one reason or
+        * another.
+        */
+       while (cache->cached == BTRFS_CACHE_FAST) {
+               struct btrfs_caching_control *ctl;
+
+               ctl = cache->caching_ctl;
+               atomic_inc(&ctl->count);
+               prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
+               spin_unlock(&cache->lock);
+
+               schedule();
+
+               finish_wait(&ctl->wait, &wait);
+               put_caching_control(ctl);
+               spin_lock(&cache->lock);
+       }
+
+       if (cache->cached != BTRFS_CACHE_NO) {
+               spin_unlock(&cache->lock);
+               kfree(caching_ctl);
                return 0;
+       }
+       WARN_ON(cache->caching_ctl);
+       cache->caching_ctl = caching_ctl;
+       cache->cached = BTRFS_CACHE_FAST;
+       spin_unlock(&cache->lock);
 
        /*
         * We can't do the read from on-disk cache during a commit since we need
@@ -484,56 +530,51 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        if (trans && (!trans->transaction->in_commit) &&
            (root && root != root->fs_info->tree_root) &&
            btrfs_test_opt(root, SPACE_CACHE)) {
-               spin_lock(&cache->lock);
-               if (cache->cached != BTRFS_CACHE_NO) {
-                       spin_unlock(&cache->lock);
-                       return 0;
-               }
-               cache->cached = BTRFS_CACHE_STARTED;
-               spin_unlock(&cache->lock);
-
                ret = load_free_space_cache(fs_info, cache);
 
                spin_lock(&cache->lock);
                if (ret == 1) {
+                       cache->caching_ctl = NULL;
                        cache->cached = BTRFS_CACHE_FINISHED;
                        cache->last_byte_to_unpin = (u64)-1;
                } else {
-                       cache->cached = BTRFS_CACHE_NO;
+                       if (load_cache_only) {
+                               cache->caching_ctl = NULL;
+                               cache->cached = BTRFS_CACHE_NO;
+                       } else {
+                               cache->cached = BTRFS_CACHE_STARTED;
+                       }
                }
                spin_unlock(&cache->lock);
+               wake_up(&caching_ctl->wait);
                if (ret == 1) {
+                       put_caching_control(caching_ctl);
                        free_excluded_extents(fs_info->extent_root, cache);
                        return 0;
                }
+       } else {
+               /*
+                * We are not going to do the fast caching, set cached to the
+                * appropriate value and wakeup any waiters.
+                */
+               spin_lock(&cache->lock);
+               if (load_cache_only) {
+                       cache->caching_ctl = NULL;
+                       cache->cached = BTRFS_CACHE_NO;
+               } else {
+                       cache->cached = BTRFS_CACHE_STARTED;
+               }
+               spin_unlock(&cache->lock);
+               wake_up(&caching_ctl->wait);
        }
 
-       if (load_cache_only)
-               return 0;
-
-       caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
-       BUG_ON(!caching_ctl);
-
-       INIT_LIST_HEAD(&caching_ctl->list);
-       mutex_init(&caching_ctl->mutex);
-       init_waitqueue_head(&caching_ctl->wait);
-       caching_ctl->block_group = cache;
-       caching_ctl->progress = cache->key.objectid;
-       /* one for caching kthread, one for caching block group list */
-       atomic_set(&caching_ctl->count, 2);
-       caching_ctl->work.func = caching_thread;
-
-       spin_lock(&cache->lock);
-       if (cache->cached != BTRFS_CACHE_NO) {
-               spin_unlock(&cache->lock);
-               kfree(caching_ctl);
+       if (load_cache_only) {
+               put_caching_control(caching_ctl);
                return 0;
        }
-       cache->caching_ctl = caching_ctl;
-       cache->cached = BTRFS_CACHE_STARTED;
-       spin_unlock(&cache->lock);
 
        down_write(&fs_info->extent_commit_sem);
+       atomic_inc(&caching_ctl->count);
        list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
        up_write(&fs_info->extent_commit_sem);
 
@@ -1788,18 +1829,18 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
 {
        int ret;
        u64 discarded_bytes = 0;
-       struct btrfs_multi_bio *multi = NULL;
+       struct btrfs_bio *bbio = NULL;
 
 
        /* Tell the block device(s) that the sectors can be discarded */
        ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
-                             bytenr, &num_bytes, &multi, 0);
+                             bytenr, &num_bytes, &bbio, 0);
        if (!ret) {
-               struct btrfs_bio_stripe *stripe = multi->stripes;
+               struct btrfs_bio_stripe *stripe = bbio->stripes;
                int i;
 
 
-               for (i = 0; i < multi->num_stripes; i++, stripe++) {
+               for (i = 0; i < bbio->num_stripes; i++, stripe++) {
                        if (!stripe->dev->can_discard)
                                continue;
 
@@ -1818,7 +1859,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
                         */
                        ret = 0;
                }
-               kfree(multi);
+               kfree(bbio);
        }
 
        if (actual_bytes)
@@ -3375,7 +3416,8 @@ static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
                smp_mb();
                nr_pages = min_t(unsigned long, nr_pages,
                       root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
-               writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
+               writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
+                                               WB_REASON_FS_FREE_SPACE);
 
                spin_lock(&space_info->lock);
                if (reserved > space_info->bytes_may_use)
@@ -3796,16 +3838,16 @@ void btrfs_free_block_rsv(struct btrfs_root *root,
        kfree(rsv);
 }
 
-int btrfs_block_rsv_add(struct btrfs_root *root,
-                       struct btrfs_block_rsv *block_rsv,
-                       u64 num_bytes)
+static inline int __block_rsv_add(struct btrfs_root *root,
+                                 struct btrfs_block_rsv *block_rsv,
+                                 u64 num_bytes, int flush)
 {
        int ret;
 
        if (num_bytes == 0)
                return 0;
 
-       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
+       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
        if (!ret) {
                block_rsv_add_bytes(block_rsv, num_bytes, 1);
                return 0;
@@ -3814,22 +3856,18 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
        return ret;
 }
 
+int btrfs_block_rsv_add(struct btrfs_root *root,
+                       struct btrfs_block_rsv *block_rsv,
+                       u64 num_bytes)
+{
+       return __block_rsv_add(root, block_rsv, num_bytes, 1);
+}
+
 int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
                                struct btrfs_block_rsv *block_rsv,
                                u64 num_bytes)
 {
-       int ret;
-
-       if (num_bytes == 0)
-               return 0;
-
-       ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
-       if (!ret) {
-               block_rsv_add_bytes(block_rsv, num_bytes, 1);
-               return 0;
-       }
-
-       return ret;
+       return __block_rsv_add(root, block_rsv, num_bytes, 0);
 }
 
 int btrfs_block_rsv_check(struct btrfs_root *root,
@@ -4063,23 +4101,30 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
  */
 static unsigned drop_outstanding_extent(struct inode *inode)
 {
+       unsigned drop_inode_space = 0;
        unsigned dropped_extents = 0;
 
        BUG_ON(!BTRFS_I(inode)->outstanding_extents);
        BTRFS_I(inode)->outstanding_extents--;
 
+       if (BTRFS_I(inode)->outstanding_extents == 0 &&
+           BTRFS_I(inode)->delalloc_meta_reserved) {
+               drop_inode_space = 1;
+               BTRFS_I(inode)->delalloc_meta_reserved = 0;
+       }
+
        /*
         * If we have more or the same amount of outsanding extents than we have
         * reserved then we need to leave the reserved extents count alone.
         */
        if (BTRFS_I(inode)->outstanding_extents >=
            BTRFS_I(inode)->reserved_extents)
-               return 0;
+               return drop_inode_space;
 
        dropped_extents = BTRFS_I(inode)->reserved_extents -
                BTRFS_I(inode)->outstanding_extents;
        BTRFS_I(inode)->reserved_extents -= dropped_extents;
-       return dropped_extents;
+       return dropped_extents + drop_inode_space;
 }
 
 /**
@@ -4165,9 +4210,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
                nr_extents = BTRFS_I(inode)->outstanding_extents -
                        BTRFS_I(inode)->reserved_extents;
                BTRFS_I(inode)->reserved_extents += nr_extents;
+       }
 
-               to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
+       /*
+        * Add an item to reserve for updating the inode when we complete the
+        * delalloc io.
+        */
+       if (!BTRFS_I(inode)->delalloc_meta_reserved) {
+               nr_extents++;
+               BTRFS_I(inode)->delalloc_meta_reserved = 1;
        }
+
+       to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
        to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
        spin_unlock(&BTRFS_I(inode)->lock);
 
@@ -5165,13 +5219,15 @@ search:
                }
 
 have_block_group:
-               if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
+               cached = block_group_cache_done(block_group);
+               if (unlikely(!cached)) {
                        u64 free_percent;
 
+                       found_uncached_bg = true;
                        ret = cache_block_group(block_group, trans,
                                                orig_root, 1);
                        if (block_group->cached == BTRFS_CACHE_FINISHED)
-                               goto have_block_group;
+                               goto alloc;
 
                        free_percent = btrfs_block_group_used(&block_group->item);
                        free_percent *= 100;
@@ -5193,7 +5249,6 @@ have_block_group:
                                                        orig_root, 0);
                                BUG_ON(ret);
                        }
-                       found_uncached_bg = true;
 
                        /*
                         * If loop is set for cached only, try the next block
@@ -5203,10 +5258,7 @@ have_block_group:
                                goto loop;
                }
 
-               cached = block_group_cache_done(block_group);
-               if (unlikely(!cached))
-                       found_uncached_bg = true;
-
+alloc:
                if (unlikely(block_group->ro))
                        goto loop;