]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Nov 2009 21:38:59 +0000 (13:38 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Nov 2009 21:38:59 +0000 (13:38 -0800)
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: fix panic when trying to destroy a newly allocated
  Btrfs: allow more metadata chunk preallocation
  Btrfs: fallback on uncompressed io if compressed io fails
  Btrfs: find ideal block group for caching
  Btrfs: avoid null deref in unpin_extent_cache()
  Btrfs: skip btrfs_release_path in btrfs_update_root and btrfs_del_root
  Btrfs: fix some metadata enospc issues
  Btrfs: fix how we set max_size for free space clusters
  Btrfs: cleanup transaction starting and fix journal_info usage
  Btrfs: fix data allocation hint start

fs/btrfs/extent-tree.c
fs/btrfs/extent_map.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/root-tree.c
fs/btrfs/transaction.c

index e238a0cdac677c06b1774018bf549b03399da4c8..94627c4cc193343aacc82b3aca1a6e9eaaefd8dd 100644 (file)
@@ -2977,10 +2977,10 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
 
        free_space = btrfs_super_total_bytes(disk_super);
        /*
-        * we allow the metadata to grow to a max of either 5gb or 5% of the
+        * we allow the metadata to grow to a max of either 10gb or 5% of the
         * space in the volume.
         */
-       min_metadata = min((u64)5 * 1024 * 1024 * 1024,
+       min_metadata = min((u64)10 * 1024 * 1024 * 1024,
                             div64_u64(free_space * 5, 100));
        if (info->total_bytes >= min_metadata) {
                spin_unlock(&info->lock);
@@ -4102,7 +4102,7 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
 }
 
 enum btrfs_loop_type {
-       LOOP_CACHED_ONLY = 0,
+       LOOP_FIND_IDEAL = 0,
        LOOP_CACHING_NOWAIT = 1,
        LOOP_CACHING_WAIT = 2,
        LOOP_ALLOC_CHUNK = 3,
@@ -4131,12 +4131,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        struct btrfs_block_group_cache *block_group = NULL;
        int empty_cluster = 2 * 1024 * 1024;
        int allowed_chunk_alloc = 0;
+       int done_chunk_alloc = 0;
        struct btrfs_space_info *space_info;
        int last_ptr_loop = 0;
        int loop = 0;
        bool found_uncached_bg = false;
        bool failed_cluster_refill = false;
        bool failed_alloc = false;
+       u64 ideal_cache_percent = 0;
+       u64 ideal_cache_offset = 0;
 
        WARN_ON(num_bytes < root->sectorsize);
        btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -4172,14 +4175,19 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                empty_cluster = 0;
 
        if (search_start == hint_byte) {
+ideal_cache:
                block_group = btrfs_lookup_block_group(root->fs_info,
                                                       search_start);
                /*
                 * we don't want to use the block group if it doesn't match our
                 * allocation bits, or if its not cached.
+                *
+                * However if we are re-searching with an ideal block group
+                * picked out then we don't care that the block group is cached.
                 */
                if (block_group && block_group_bits(block_group, data) &&
-                   block_group_cache_done(block_group)) {
+                   (block_group->cached != BTRFS_CACHE_NO ||
+                    search_start == ideal_cache_offset)) {
                        down_read(&space_info->groups_sem);
                        if (list_empty(&block_group->list) ||
                            block_group->ro) {
@@ -4191,13 +4199,13 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                                 */
                                btrfs_put_block_group(block_group);
                                up_read(&space_info->groups_sem);
-                       } else
+                       } else {
                                goto have_block_group;
+                       }
                } else if (block_group) {
                        btrfs_put_block_group(block_group);
                }
        }
-
 search:
        down_read(&space_info->groups_sem);
        list_for_each_entry(block_group, &space_info->block_groups, list) {
@@ -4209,28 +4217,45 @@ search:
 
 have_block_group:
                if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
+                       u64 free_percent;
+
+                       free_percent = btrfs_block_group_used(&block_group->item);
+                       free_percent *= 100;
+                       free_percent = div64_u64(free_percent,
+                                                block_group->key.offset);
+                       free_percent = 100 - free_percent;
+                       if (free_percent > ideal_cache_percent &&
+                           likely(!block_group->ro)) {
+                               ideal_cache_offset = block_group->key.objectid;
+                               ideal_cache_percent = free_percent;
+                       }
+
                        /*
-                        * we want to start caching kthreads, but not too many
-                        * right off the bat so we don't overwhelm the system,
-                        * so only start them if there are less than 2 and we're
-                        * in the initial allocation phase.
+                        * We only want to start kthread caching if we are at
+                        * the point where we will wait for caching to make
+                        * progress, or if our ideal search is over and we've
+                        * found somebody to start caching.
                         */
                        if (loop > LOOP_CACHING_NOWAIT ||
-                           atomic_read(&space_info->caching_threads) < 2) {
+                           (loop > LOOP_FIND_IDEAL &&
+                            atomic_read(&space_info->caching_threads) < 2)) {
                                ret = cache_block_group(block_group);
                                BUG_ON(ret);
                        }
-               }
-
-               cached = block_group_cache_done(block_group);
-               if (unlikely(!cached)) {
                        found_uncached_bg = true;
 
-                       /* if we only want cached bgs, loop */
-                       if (loop == LOOP_CACHED_ONLY)
+                       /*
+                        * If loop is set for cached only, try the next block
+                        * group.
+                        */
+                       if (loop == LOOP_FIND_IDEAL)
                                goto loop;
                }
 
+               cached = block_group_cache_done(block_group);
+               if (unlikely(!cached))
+                       found_uncached_bg = true;
+
                if (unlikely(block_group->ro))
                        goto loop;
 
@@ -4410,9 +4435,11 @@ loop:
        }
        up_read(&space_info->groups_sem);
 
-       /* LOOP_CACHED_ONLY, only search fully cached block groups
-        * LOOP_CACHING_NOWAIT, search partially cached block groups, but
-        *                      dont wait foR them to finish caching
+       /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
+        *                      for them to make caching progress.  Also
+        *                      determine the best possible bg to cache
+        * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
+        *                      caching kthreads as we move along
         * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
         * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
         * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
@@ -4421,12 +4448,47 @@ loop:
        if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
            (found_uncached_bg || empty_size || empty_cluster ||
             allowed_chunk_alloc)) {
-               if (found_uncached_bg) {
+               if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
                        found_uncached_bg = false;
-                       if (loop < LOOP_CACHING_WAIT) {
-                               loop++;
+                       loop++;
+                       if (!ideal_cache_percent &&
+                           atomic_read(&space_info->caching_threads))
                                goto search;
-                       }
+
+                       /*
+                        * 1 of the following 2 things have happened so far
+                        *
+                        * 1) We found an ideal block group for caching that
+                        * is mostly full and will cache quickly, so we might
+                        * as well wait for it.
+                        *
+                        * 2) We searched for cached only and we didn't find
+                        * anything, and we didn't start any caching kthreads
+                        * either, so chances are we will loop through and
+                        * start a couple caching kthreads, and then come back
+                        * around and just wait for them.  This will be slower
+                        * because we will have 2 caching kthreads reading at
+                        * the same time when we could have just started one
+                        * and waited for it to get far enough to give us an
+                        * allocation, so go ahead and go to the wait caching
+                        * loop.
+                        */
+                       loop = LOOP_CACHING_WAIT;
+                       search_start = ideal_cache_offset;
+                       ideal_cache_percent = 0;
+                       goto ideal_cache;
+               } else if (loop == LOOP_FIND_IDEAL) {
+                       /*
+                        * Didn't find a uncached bg, wait on anything we find
+                        * next.
+                        */
+                       loop = LOOP_CACHING_WAIT;
+                       goto search;
+               }
+
+               if (loop < LOOP_CACHING_WAIT) {
+                       loop++;
+                       goto search;
                }
 
                if (loop == LOOP_ALLOC_CHUNK) {
@@ -4438,7 +4500,8 @@ loop:
                        ret = do_chunk_alloc(trans, root, num_bytes +
                                             2 * 1024 * 1024, data, 1);
                        allowed_chunk_alloc = 0;
-               } else {
+                       done_chunk_alloc = 1;
+               } else if (!done_chunk_alloc) {
                        space_info->force_alloc = 1;
                }
 
index 2c726b7b9faa17af725b8a5b8319148758b7dd91..ccbdcb54ec5d988f39690b33f9964c35da134e30 100644 (file)
@@ -208,7 +208,7 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
        write_lock(&tree->lock);
        em = lookup_extent_mapping(tree, start, len);
 
-       WARN_ON(em->start != start || !em);
+       WARN_ON(!em || em->start != start);
 
        if (!em)
                goto out;
index 5c2caad76212d2b5bfcc4329e255978525d6f5ed..cb2849f03251d081c6a6eca73f9372f0798517ac 100644 (file)
@@ -1296,7 +1296,7 @@ again:
                        window_start = entry->offset;
                        window_free = entry->bytes;
                        last = entry;
-                       max_extent = 0;
+                       max_extent = entry->bytes;
                } else {
                        last = next;
                        window_free += next->bytes;
index dae12dc7e1595e5e4a78e6f9b03fe566b7dcc24e..b3ad168a0bfc97906dd1fa556b61297250faae58 100644 (file)
@@ -538,7 +538,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        struct extent_io_tree *io_tree;
-       int ret;
+       int ret = 0;
 
        if (list_empty(&async_cow->extents))
                return 0;
@@ -552,6 +552,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
 
                io_tree = &BTRFS_I(inode)->io_tree;
 
+retry:
                /* did the compression code fall back to uncompressed IO? */
                if (!async_extent->pages) {
                        int page_started = 0;
@@ -562,11 +563,11 @@ static noinline int submit_compressed_extents(struct inode *inode,
                                    async_extent->ram_size - 1, GFP_NOFS);
 
                        /* allocate blocks */
-                       cow_file_range(inode, async_cow->locked_page,
-                                      async_extent->start,
-                                      async_extent->start +
-                                      async_extent->ram_size - 1,
-                                      &page_started, &nr_written, 0);
+                       ret = cow_file_range(inode, async_cow->locked_page,
+                                            async_extent->start,
+                                            async_extent->start +
+                                            async_extent->ram_size - 1,
+                                            &page_started, &nr_written, 0);
 
                        /*
                         * if page_started, cow_file_range inserted an
@@ -574,7 +575,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
                         * and IO for us.  Otherwise, we need to submit
                         * all those pages down to the drive.
                         */
-                       if (!page_started)
+                       if (!page_started && !ret)
                                extent_write_locked_range(io_tree,
                                                  inode, async_extent->start,
                                                  async_extent->start +
@@ -602,7 +603,21 @@ static noinline int submit_compressed_extents(struct inode *inode,
                                           async_extent->compressed_size,
                                           0, alloc_hint,
                                           (u64)-1, &ins, 1);
-               BUG_ON(ret);
+               if (ret) {
+                       int i;
+                       for (i = 0; i < async_extent->nr_pages; i++) {
+                               WARN_ON(async_extent->pages[i]->mapping);
+                               page_cache_release(async_extent->pages[i]);
+                       }
+                       kfree(async_extent->pages);
+                       async_extent->nr_pages = 0;
+                       async_extent->pages = NULL;
+                       unlock_extent(io_tree, async_extent->start,
+                                     async_extent->start +
+                                     async_extent->ram_size - 1, GFP_NOFS);
+                       goto retry;
+               }
+
                em = alloc_extent_map(GFP_NOFS);
                em->start = async_extent->start;
                em->len = async_extent->ram_size;
@@ -743,8 +758,22 @@ static noinline int cow_file_range(struct inode *inode,
        em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
                                   start, num_bytes);
        if (em) {
-               alloc_hint = em->block_start;
-               free_extent_map(em);
+               /*
+                * if block start isn't an actual block number then find the
+                * first block in this inode and use that as a hint.  If that
+                * block is also bogus then just don't worry about it.
+                */
+               if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
+                       free_extent_map(em);
+                       em = search_extent_mapping(em_tree, 0, 0);
+                       if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
+                               alloc_hint = em->block_start;
+                       if (em)
+                               free_extent_map(em);
+               } else {
+                       alloc_hint = em->block_start;
+                       free_extent_map(em);
+               }
        }
        read_unlock(&BTRFS_I(inode)->extent_tree.lock);
        btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
@@ -2474,7 +2503,19 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
 
        root = BTRFS_I(dir)->root;
 
+       /*
+        * 5 items for unlink inode
+        * 1 for orphan
+        */
+       ret = btrfs_reserve_metadata_space(root, 6);
+       if (ret)
+               return ret;
+
        trans = btrfs_start_transaction(root, 1);
+       if (IS_ERR(trans)) {
+               btrfs_unreserve_metadata_space(root, 6);
+               return PTR_ERR(trans);
+       }
 
        btrfs_set_trans_block_group(trans, dir);
 
@@ -2489,6 +2530,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
        nr = trans->blocks_used;
 
        btrfs_end_transaction_throttle(trans, root);
+       btrfs_unreserve_metadata_space(root, 6);
        btrfs_btree_balance_dirty(root, nr);
        return ret;
 }
@@ -2569,7 +2611,16 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
            inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
                return -ENOTEMPTY;
 
+       ret = btrfs_reserve_metadata_space(root, 5);
+       if (ret)
+               return ret;
+
        trans = btrfs_start_transaction(root, 1);
+       if (IS_ERR(trans)) {
+               btrfs_unreserve_metadata_space(root, 5);
+               return PTR_ERR(trans);
+       }
+
        btrfs_set_trans_block_group(trans, dir);
 
        if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
@@ -2592,6 +2643,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
 out:
        nr = trans->blocks_used;
        ret = btrfs_end_transaction_throttle(trans, root);
+       btrfs_unreserve_metadata_space(root, 5);
        btrfs_btree_balance_dirty(root, nr);
 
        if (ret && !err)
@@ -5128,6 +5180,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->logged_trans = 0;
        ei->outstanding_extents = 0;
        ei->reserved_extents = 0;
+       ei->root = NULL;
        spin_lock_init(&ei->accounting_lock);
        btrfs_ordered_inode_tree_init(&ei->ordered_tree);
        INIT_LIST_HEAD(&ei->i_orphan);
@@ -5143,6 +5196,14 @@ void btrfs_destroy_inode(struct inode *inode)
        WARN_ON(!list_empty(&inode->i_dentry));
        WARN_ON(inode->i_data.nrpages);
 
+       /*
+        * This can happen where we create an inode, but somebody else also
+        * created the same inode and we need to destroy the one we already
+        * created.
+        */
+       if (!root)
+               goto free;
+
        /*
         * Make sure we're properly removed from the ordered operation
         * lists.
@@ -5178,6 +5239,7 @@ void btrfs_destroy_inode(struct inode *inode)
        }
        inode_tree_del(inode);
        btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
+free:
        kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
 }
 
@@ -5283,11 +5345,14 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                return -ENOTEMPTY;
 
        /*
-        * 2 items for dir items
-        * 1 item for orphan entry
-        * 1 item for ref
+        * We want to reserve the absolute worst case amount of items.  So if
+        * both inodes are subvols and we need to unlink them then that would
+        * require 4 item modifications, but if they are both normal inodes it
+        * would require 5 item modifications, so we'll assume their normal
+        * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
+        * should cover the worst case number of items we'll modify.
         */
-       ret = btrfs_reserve_metadata_space(root, 4);
+       ret = btrfs_reserve_metadata_space(root, 11);
        if (ret)
                return ret;
 
@@ -5403,7 +5468,7 @@ out_fail:
        if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&root->fs_info->subvol_sem);
 
-       btrfs_unreserve_metadata_space(root, 4);
+       btrfs_unreserve_metadata_space(root, 11);
        return ret;
 }
 
index 9351428f30e2129343e7c9b882b1fbb26e56c874..67fa2d29d663dcc2f665b3217bba98302784a60e 100644 (file)
@@ -159,7 +159,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
        write_extent_buffer(l, item, ptr, sizeof(*item));
        btrfs_mark_buffer_dirty(path->nodes[0]);
 out:
-       btrfs_release_path(root, path);
        btrfs_free_path(path);
        return ret;
 }
@@ -332,7 +331,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
        BUG_ON(refs != 0);
        ret = btrfs_del_item(trans, root, path);
 out:
-       btrfs_release_path(root, path);
        btrfs_free_path(path);
        return ret;
 }
index bca82a4ca8e6d8f78bad1bd21b511a433aaed48e..c207e8c32c9bfc5212e111edb440adf4b59f2059 100644 (file)
@@ -163,8 +163,14 @@ static void wait_current_trans(struct btrfs_root *root)
        }
 }
 
+enum btrfs_trans_type {
+       TRANS_START,
+       TRANS_JOIN,
+       TRANS_USERSPACE,
+};
+
 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
-                                            int num_blocks, int wait)
+                                            int num_blocks, int type)
 {
        struct btrfs_trans_handle *h =
                kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
@@ -172,7 +178,8 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
 
        mutex_lock(&root->fs_info->trans_mutex);
        if (!root->fs_info->log_root_recovering &&
-           ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2))
+           ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
+            type == TRANS_USERSPACE))
                wait_current_trans(root);
        ret = join_transaction(root);
        BUG_ON(ret);
@@ -186,7 +193,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
        h->alloc_exclude_start = 0;
        h->delayed_ref_updates = 0;
 
-       if (!current->journal_info)
+       if (!current->journal_info && type != TRANS_USERSPACE)
                current->journal_info = h;
 
        root->fs_info->running_transaction->use_count++;
@@ -198,18 +205,18 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
                                                   int num_blocks)
 {
-       return start_transaction(root, num_blocks, 1);
+       return start_transaction(root, num_blocks, TRANS_START);
 }
 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
                                                   int num_blocks)
 {
-       return start_transaction(root, num_blocks, 0);
+       return start_transaction(root, num_blocks, TRANS_JOIN);
 }
 
 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
                                                         int num_blocks)
 {
-       return start_transaction(r, num_blocks, 2);
+       return start_transaction(r, num_blocks, TRANS_USERSPACE);
 }
 
 /* wait for a transaction commit to be fully complete */