2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
27 #include "transaction.h"
30 #include "inode-map.h"
32 #define BTRFS_ROOT_TRANS_TAG 0
34 static noinline void put_transaction(struct btrfs_transaction *transaction)
36 WARN_ON(atomic_read(&transaction->use_count) == 0);
37 if (atomic_dec_and_test(&transaction->use_count)) {
38 BUG_ON(!list_empty(&transaction->list));
39 memset(transaction, 0, sizeof(*transaction));
40 kmem_cache_free(btrfs_transaction_cachep, transaction);
44 static noinline void switch_commit_root(struct btrfs_root *root)
46 free_extent_buffer(root->commit_root);
47 root->commit_root = btrfs_root_node(root);
51 * either allocate a new transaction or hop into the existing one
53 static noinline int join_transaction(struct btrfs_root *root, int nofail)
55 struct btrfs_transaction *cur_trans;
57 spin_lock(&root->fs_info->trans_lock);
58 if (root->fs_info->trans_no_join) {
60 spin_unlock(&root->fs_info->trans_lock);
65 cur_trans = root->fs_info->running_transaction;
67 atomic_inc(&cur_trans->use_count);
68 atomic_inc(&cur_trans->num_writers);
69 cur_trans->num_joined++;
70 spin_unlock(&root->fs_info->trans_lock);
73 spin_unlock(&root->fs_info->trans_lock);
75 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
78 spin_lock(&root->fs_info->trans_lock);
79 if (root->fs_info->running_transaction) {
80 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
81 cur_trans = root->fs_info->running_transaction;
82 atomic_inc(&cur_trans->use_count);
83 atomic_inc(&cur_trans->num_writers);
84 cur_trans->num_joined++;
85 spin_unlock(&root->fs_info->trans_lock);
88 atomic_set(&cur_trans->num_writers, 1);
89 cur_trans->num_joined = 0;
90 init_waitqueue_head(&cur_trans->writer_wait);
91 init_waitqueue_head(&cur_trans->commit_wait);
92 cur_trans->in_commit = 0;
93 cur_trans->blocked = 0;
95 * One for this trans handle, one so it will live on until we
96 * commit the transaction.
98 atomic_set(&cur_trans->use_count, 2);
99 cur_trans->commit_done = 0;
100 cur_trans->start_time = get_seconds();
102 cur_trans->delayed_refs.root = RB_ROOT;
103 cur_trans->delayed_refs.num_entries = 0;
104 cur_trans->delayed_refs.num_heads_ready = 0;
105 cur_trans->delayed_refs.num_heads = 0;
106 cur_trans->delayed_refs.flushing = 0;
107 cur_trans->delayed_refs.run_delayed_start = 0;
108 spin_lock_init(&cur_trans->commit_lock);
109 spin_lock_init(&cur_trans->delayed_refs.lock);
111 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
112 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
113 extent_io_tree_init(&cur_trans->dirty_pages,
114 root->fs_info->btree_inode->i_mapping);
115 root->fs_info->generation++;
116 cur_trans->transid = root->fs_info->generation;
117 root->fs_info->running_transaction = cur_trans;
118 spin_unlock(&root->fs_info->trans_lock);
124 * this does all the record keeping required to make sure that a reference
125 * counted root is properly recorded in a given transaction. This is required
126 * to make sure the old root from before we joined the transaction is deleted
127 * when the transaction commits
129 static int record_root_in_trans(struct btrfs_trans_handle *trans,
130 struct btrfs_root *root)
132 if (root->ref_cows && root->last_trans < trans->transid) {
133 WARN_ON(root == root->fs_info->extent_root);
134 WARN_ON(root->commit_root != root->node);
137 * see below for in_trans_setup usage rules
138 * we have the reloc mutex held now, so there
139 * is only one writer in this function
141 root->in_trans_setup = 1;
143 /* make sure readers find in_trans_setup before
144 * they find our root->last_trans update
148 spin_lock(&root->fs_info->fs_roots_radix_lock);
149 if (root->last_trans == trans->transid) {
150 spin_unlock(&root->fs_info->fs_roots_radix_lock);
153 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
154 (unsigned long)root->root_key.objectid,
155 BTRFS_ROOT_TRANS_TAG);
156 spin_unlock(&root->fs_info->fs_roots_radix_lock);
157 root->last_trans = trans->transid;
159 /* this is pretty tricky. We don't want to
160 * take the relocation lock in btrfs_record_root_in_trans
161 * unless we're really doing the first setup for this root in
164 * Normally we'd use root->last_trans as a flag to decide
165 * if we want to take the expensive mutex.
167 * But, we have to set root->last_trans before we
168 * init the relocation root, otherwise, we trip over warnings
169 * in ctree.c. The solution used here is to flag ourselves
170 * with root->in_trans_setup. When this is 1, we're still
171 * fixing up the reloc trees and everyone must wait.
173 * When this is zero, they can trust root->last_trans and fly
174 * through btrfs_record_root_in_trans without having to take the
175 * lock. smp_wmb() makes sure that all the writes above are
176 * done before we pop in the zero below
178 btrfs_init_reloc_root(trans, root);
180 root->in_trans_setup = 0;
186 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
187 struct btrfs_root *root)
193 * see record_root_in_trans for comments about in_trans_setup usage
197 if (root->last_trans == trans->transid &&
198 !root->in_trans_setup)
201 mutex_lock(&root->fs_info->reloc_mutex);
202 record_root_in_trans(trans, root);
203 mutex_unlock(&root->fs_info->reloc_mutex);
208 /* wait for commit against the current transaction to become unblocked
209 * when this is done, it is safe to start a new transaction, but the current
210 * transaction might not be fully on disk.
212 static void wait_current_trans(struct btrfs_root *root)
214 struct btrfs_transaction *cur_trans;
216 spin_lock(&root->fs_info->trans_lock);
217 cur_trans = root->fs_info->running_transaction;
218 if (cur_trans && cur_trans->blocked) {
219 atomic_inc(&cur_trans->use_count);
220 spin_unlock(&root->fs_info->trans_lock);
222 wait_event(root->fs_info->transaction_wait,
223 !cur_trans->blocked);
224 put_transaction(cur_trans);
226 spin_unlock(&root->fs_info->trans_lock);
230 enum btrfs_trans_type {
237 static int may_wait_transaction(struct btrfs_root *root, int type)
239 if (root->fs_info->log_root_recovering)
242 if (type == TRANS_USERSPACE)
245 if (type == TRANS_START &&
246 !atomic_read(&root->fs_info->open_ioctl_trans))
252 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
253 u64 num_items, int type)
255 struct btrfs_trans_handle *h;
256 struct btrfs_transaction *cur_trans;
260 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
261 return ERR_PTR(-EROFS);
263 if (current->journal_info) {
264 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
265 h = current->journal_info;
267 h->orig_rsv = h->block_rsv;
273 * Do the reservation before we join the transaction so we can do all
274 * the appropriate flushing if need be.
276 if (num_items > 0 && root != root->fs_info->chunk_root) {
277 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
278 ret = btrfs_block_rsv_add(root,
279 &root->fs_info->trans_block_rsv,
285 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
287 return ERR_PTR(-ENOMEM);
289 if (may_wait_transaction(root, type))
290 wait_current_trans(root);
293 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
295 wait_current_trans(root);
296 } while (ret == -EBUSY);
299 kmem_cache_free(btrfs_trans_handle_cachep, h);
303 cur_trans = root->fs_info->running_transaction;
305 h->transid = cur_trans->transid;
306 h->transaction = cur_trans;
308 h->bytes_reserved = 0;
309 h->delayed_ref_updates = 0;
315 if (cur_trans->blocked && may_wait_transaction(root, type)) {
316 btrfs_commit_transaction(h, root);
321 h->block_rsv = &root->fs_info->trans_block_rsv;
322 h->bytes_reserved = num_bytes;
326 btrfs_record_root_in_trans(h, root);
328 if (!current->journal_info && type != TRANS_USERSPACE)
329 current->journal_info = h;
333 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
336 return start_transaction(root, num_items, TRANS_START);
338 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
340 return start_transaction(root, 0, TRANS_JOIN);
343 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
345 return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
348 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
350 return start_transaction(root, 0, TRANS_USERSPACE);
353 /* wait for a transaction commit to be fully complete */
354 static noinline void wait_for_commit(struct btrfs_root *root,
355 struct btrfs_transaction *commit)
357 wait_event(commit->commit_wait, commit->commit_done);
360 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
362 struct btrfs_transaction *cur_trans = NULL, *t;
367 if (transid <= root->fs_info->last_trans_committed)
370 /* find specified transaction */
371 spin_lock(&root->fs_info->trans_lock);
372 list_for_each_entry(t, &root->fs_info->trans_list, list) {
373 if (t->transid == transid) {
375 atomic_inc(&cur_trans->use_count);
378 if (t->transid > transid)
381 spin_unlock(&root->fs_info->trans_lock);
384 goto out; /* bad transid */
386 /* find newest transaction that is committing | committed */
387 spin_lock(&root->fs_info->trans_lock);
388 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
394 atomic_inc(&cur_trans->use_count);
398 spin_unlock(&root->fs_info->trans_lock);
400 goto out; /* nothing committing|committed */
403 wait_for_commit(root, cur_trans);
405 put_transaction(cur_trans);
411 void btrfs_throttle(struct btrfs_root *root)
413 if (!atomic_read(&root->fs_info->open_ioctl_trans))
414 wait_current_trans(root);
417 static int should_end_transaction(struct btrfs_trans_handle *trans,
418 struct btrfs_root *root)
421 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 0,
426 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
427 struct btrfs_root *root)
429 struct btrfs_transaction *cur_trans = trans->transaction;
430 struct btrfs_block_rsv *rsv = trans->block_rsv;
434 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
438 * We need to do this in case we're deleting csums so the global block
439 * rsv get's used instead of the csum block rsv.
441 trans->block_rsv = NULL;
443 updates = trans->delayed_ref_updates;
444 trans->delayed_ref_updates = 0;
446 btrfs_run_delayed_refs(trans, root, updates);
448 trans->block_rsv = rsv;
450 return should_end_transaction(trans, root);
453 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
454 struct btrfs_root *root, int throttle, int lock)
456 struct btrfs_transaction *cur_trans = trans->transaction;
457 struct btrfs_fs_info *info = root->fs_info;
460 if (--trans->use_count) {
461 trans->block_rsv = trans->orig_rsv;
465 trans->block_rsv = NULL;
467 unsigned long cur = trans->delayed_ref_updates;
468 trans->delayed_ref_updates = 0;
470 trans->transaction->delayed_refs.num_heads_ready > 64) {
471 trans->delayed_ref_updates = 0;
474 * do a full flush if the transaction is trying
477 if (trans->transaction->delayed_refs.flushing)
479 btrfs_run_delayed_refs(trans, root, cur);
486 btrfs_trans_release_metadata(trans, root);
488 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
489 should_end_transaction(trans, root)) {
490 trans->transaction->blocked = 1;
494 if (lock && cur_trans->blocked && !cur_trans->in_commit) {
497 * We may race with somebody else here so end up having
498 * to call end_transaction on ourselves again, so inc
502 return btrfs_commit_transaction(trans, root);
504 wake_up_process(info->transaction_kthread);
508 WARN_ON(cur_trans != info->running_transaction);
509 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
510 atomic_dec(&cur_trans->num_writers);
513 if (waitqueue_active(&cur_trans->writer_wait))
514 wake_up(&cur_trans->writer_wait);
515 put_transaction(cur_trans);
517 if (current->journal_info == trans)
518 current->journal_info = NULL;
519 memset(trans, 0, sizeof(*trans));
520 kmem_cache_free(btrfs_trans_handle_cachep, trans);
523 btrfs_run_delayed_iputs(root);
528 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
529 struct btrfs_root *root)
533 ret = __btrfs_end_transaction(trans, root, 0, 1);
539 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
540 struct btrfs_root *root)
544 ret = __btrfs_end_transaction(trans, root, 1, 1);
550 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
551 struct btrfs_root *root)
555 ret = __btrfs_end_transaction(trans, root, 0, 0);
561 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
562 struct btrfs_root *root)
564 return __btrfs_end_transaction(trans, root, 1, 1);
568 * when btree blocks are allocated, they have some corresponding bits set for
569 * them in one of two extent_io trees. This is used to make sure all of
570 * those extents are sent to disk but does not wait on them
572 int btrfs_write_marked_extents(struct btrfs_root *root,
573 struct extent_io_tree *dirty_pages, int mark)
577 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
581 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
583 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
585 err = filemap_fdatawrite_range(mapping, start, end);
597 * when btree blocks are allocated, they have some corresponding bits set for
598 * them in one of two extent_io trees. This is used to make sure all of
599 * those extents are on disk for transaction or log commit. We wait
600 * on all the pages and clear them from the dirty pages state tree
602 int btrfs_wait_marked_extents(struct btrfs_root *root,
603 struct extent_io_tree *dirty_pages, int mark)
607 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
611 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
613 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
614 err = filemap_fdatawait_range(mapping, start, end);
626 * when btree blocks are allocated, they have some corresponding bits set for
627 * them in one of two extent_io trees. This is used to make sure all of
628 * those extents are on disk for transaction or log commit
630 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
631 struct extent_io_tree *dirty_pages, int mark)
636 ret = btrfs_write_marked_extents(root, dirty_pages, mark);
637 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
641 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
642 struct btrfs_root *root)
644 if (!trans || !trans->transaction) {
645 struct inode *btree_inode;
646 btree_inode = root->fs_info->btree_inode;
647 return filemap_write_and_wait(btree_inode->i_mapping);
649 return btrfs_write_and_wait_marked_extents(root,
650 &trans->transaction->dirty_pages,
655 * this is used to update the root pointer in the tree of tree roots.
657 * But, in the case of the extent allocation tree, updating the root
658 * pointer may allocate blocks which may change the root of the extent
661 * So, this loops and repeats and makes sure the cowonly root didn't
662 * change while the root pointer was being updated in the metadata.
664 static int update_cowonly_root(struct btrfs_trans_handle *trans,
665 struct btrfs_root *root)
670 struct btrfs_root *tree_root = root->fs_info->tree_root;
672 old_root_used = btrfs_root_used(&root->root_item);
673 btrfs_write_dirty_block_groups(trans, root);
676 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
677 if (old_root_bytenr == root->node->start &&
678 old_root_used == btrfs_root_used(&root->root_item))
681 btrfs_set_root_node(&root->root_item, root->node);
682 ret = btrfs_update_root(trans, tree_root,
687 old_root_used = btrfs_root_used(&root->root_item);
688 ret = btrfs_write_dirty_block_groups(trans, root);
692 if (root != root->fs_info->extent_root)
693 switch_commit_root(root);
699 * update all the cowonly tree roots on disk
701 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
702 struct btrfs_root *root)
704 struct btrfs_fs_info *fs_info = root->fs_info;
705 struct list_head *next;
706 struct extent_buffer *eb;
709 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
712 eb = btrfs_lock_root_node(fs_info->tree_root);
713 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
714 btrfs_tree_unlock(eb);
715 free_extent_buffer(eb);
717 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
720 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
721 next = fs_info->dirty_cowonly_roots.next;
723 root = list_entry(next, struct btrfs_root, dirty_list);
725 update_cowonly_root(trans, root);
728 down_write(&fs_info->extent_commit_sem);
729 switch_commit_root(fs_info->extent_root);
730 up_write(&fs_info->extent_commit_sem);
736 * dead roots are old snapshots that need to be deleted. This allocates
737 * a dirty root struct and adds it into the list of dead roots that need to
740 int btrfs_add_dead_root(struct btrfs_root *root)
742 spin_lock(&root->fs_info->trans_lock);
743 list_add(&root->root_list, &root->fs_info->dead_roots);
744 spin_unlock(&root->fs_info->trans_lock);
749 * update all the cowonly tree roots on disk
751 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
752 struct btrfs_root *root)
754 struct btrfs_root *gang[8];
755 struct btrfs_fs_info *fs_info = root->fs_info;
760 spin_lock(&fs_info->fs_roots_radix_lock);
762 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
765 BTRFS_ROOT_TRANS_TAG);
768 for (i = 0; i < ret; i++) {
770 radix_tree_tag_clear(&fs_info->fs_roots_radix,
771 (unsigned long)root->root_key.objectid,
772 BTRFS_ROOT_TRANS_TAG);
773 spin_unlock(&fs_info->fs_roots_radix_lock);
775 btrfs_free_log(trans, root);
776 btrfs_update_reloc_root(trans, root);
777 btrfs_orphan_commit_root(trans, root);
779 btrfs_save_ino_cache(root, trans);
781 if (root->commit_root != root->node) {
782 mutex_lock(&root->fs_commit_mutex);
783 switch_commit_root(root);
784 btrfs_unpin_free_ino(root);
785 mutex_unlock(&root->fs_commit_mutex);
787 btrfs_set_root_node(&root->root_item,
791 err = btrfs_update_root(trans, fs_info->tree_root,
794 spin_lock(&fs_info->fs_roots_radix_lock);
799 spin_unlock(&fs_info->fs_roots_radix_lock);
804 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
805 * otherwise every leaf in the btree is read and defragged.
807 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
809 struct btrfs_fs_info *info = root->fs_info;
810 struct btrfs_trans_handle *trans;
814 if (xchg(&root->defrag_running, 1))
818 trans = btrfs_start_transaction(root, 0);
820 return PTR_ERR(trans);
822 ret = btrfs_defrag_leaves(trans, root, cacheonly);
824 nr = trans->blocks_used;
825 btrfs_end_transaction(trans, root);
826 btrfs_btree_balance_dirty(info->tree_root, nr);
829 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
832 root->defrag_running = 0;
837 * new snapshots need to be created at a very specific time in the
838 * transaction commit. This does the actual creation
840 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
841 struct btrfs_fs_info *fs_info,
842 struct btrfs_pending_snapshot *pending)
844 struct btrfs_key key;
845 struct btrfs_root_item *new_root_item;
846 struct btrfs_root *tree_root = fs_info->tree_root;
847 struct btrfs_root *root = pending->root;
848 struct btrfs_root *parent_root;
849 struct btrfs_block_rsv *rsv;
850 struct inode *parent_inode;
851 struct dentry *parent;
852 struct dentry *dentry;
853 struct extent_buffer *tmp;
854 struct extent_buffer *old;
861 rsv = trans->block_rsv;
863 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
864 if (!new_root_item) {
865 pending->error = -ENOMEM;
869 ret = btrfs_find_free_objectid(tree_root, &objectid);
871 pending->error = ret;
875 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
877 if (to_reserve > 0) {
878 ret = btrfs_block_rsv_add(root, &pending->block_rsv,
881 pending->error = ret;
886 key.objectid = objectid;
887 key.offset = (u64)-1;
888 key.type = BTRFS_ROOT_ITEM_KEY;
890 trans->block_rsv = &pending->block_rsv;
892 dentry = pending->dentry;
893 parent = dget_parent(dentry);
894 parent_inode = parent->d_inode;
895 parent_root = BTRFS_I(parent_inode)->root;
896 record_root_in_trans(trans, parent_root);
899 * insert the directory item
901 ret = btrfs_set_inode_index(parent_inode, &index);
903 ret = btrfs_insert_dir_item(trans, parent_root,
904 dentry->d_name.name, dentry->d_name.len,
906 BTRFS_FT_DIR, index);
909 btrfs_i_size_write(parent_inode, parent_inode->i_size +
910 dentry->d_name.len * 2);
911 ret = btrfs_update_inode(trans, parent_root, parent_inode);
915 * pull in the delayed directory update
916 * and the delayed inode item
917 * otherwise we corrupt the FS during
920 ret = btrfs_run_delayed_items(trans, root);
923 record_root_in_trans(trans, root);
924 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
925 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
926 btrfs_check_and_init_root_item(new_root_item);
928 root_flags = btrfs_root_flags(new_root_item);
929 if (pending->readonly)
930 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
932 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
933 btrfs_set_root_flags(new_root_item, root_flags);
935 old = btrfs_lock_root_node(root);
936 btrfs_cow_block(trans, root, old, NULL, 0, &old);
937 btrfs_set_lock_blocking(old);
939 btrfs_copy_root(trans, root, old, &tmp, objectid);
940 btrfs_tree_unlock(old);
941 free_extent_buffer(old);
943 btrfs_set_root_node(new_root_item, tmp);
944 /* record when the snapshot was created in key.offset */
945 key.offset = trans->transid;
946 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
947 btrfs_tree_unlock(tmp);
948 free_extent_buffer(tmp);
952 * insert root back/forward references
954 ret = btrfs_add_root_ref(trans, tree_root, objectid,
955 parent_root->root_key.objectid,
956 btrfs_ino(parent_inode), index,
957 dentry->d_name.name, dentry->d_name.len);
961 key.offset = (u64)-1;
962 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
963 BUG_ON(IS_ERR(pending->snap));
965 btrfs_reloc_post_snapshot(trans, pending);
967 kfree(new_root_item);
968 trans->block_rsv = rsv;
969 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
974 * create all the snapshots we've scheduled for creation
976 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
977 struct btrfs_fs_info *fs_info)
979 struct btrfs_pending_snapshot *pending;
980 struct list_head *head = &trans->transaction->pending_snapshots;
983 list_for_each_entry(pending, head, list) {
984 ret = create_pending_snapshot(trans, fs_info, pending);
990 static void update_super_roots(struct btrfs_root *root)
992 struct btrfs_root_item *root_item;
993 struct btrfs_super_block *super;
995 super = &root->fs_info->super_copy;
997 root_item = &root->fs_info->chunk_root->root_item;
998 super->chunk_root = root_item->bytenr;
999 super->chunk_root_generation = root_item->generation;
1000 super->chunk_root_level = root_item->level;
1002 root_item = &root->fs_info->tree_root->root_item;
1003 super->root = root_item->bytenr;
1004 super->generation = root_item->generation;
1005 super->root_level = root_item->level;
1006 if (btrfs_test_opt(root, SPACE_CACHE))
1007 super->cache_generation = root_item->generation;
1010 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1013 spin_lock(&info->trans_lock);
1014 if (info->running_transaction)
1015 ret = info->running_transaction->in_commit;
1016 spin_unlock(&info->trans_lock);
1020 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1023 spin_lock(&info->trans_lock);
1024 if (info->running_transaction)
1025 ret = info->running_transaction->blocked;
1026 spin_unlock(&info->trans_lock);
1031 * wait for the current transaction commit to start and block subsequent
1034 static void wait_current_trans_commit_start(struct btrfs_root *root,
1035 struct btrfs_transaction *trans)
1037 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1041 * wait for the current transaction to start and then become unblocked.
1044 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1045 struct btrfs_transaction *trans)
1047 wait_event(root->fs_info->transaction_wait,
1048 trans->commit_done || (trans->in_commit && !trans->blocked));
1052 * commit transactions asynchronously. once btrfs_commit_transaction_async
1053 * returns, any subsequent transaction will not be allowed to join.
1055 struct btrfs_async_commit {
1056 struct btrfs_trans_handle *newtrans;
1057 struct btrfs_root *root;
1058 struct delayed_work work;
1061 static void do_async_commit(struct work_struct *work)
1063 struct btrfs_async_commit *ac =
1064 container_of(work, struct btrfs_async_commit, work.work);
1066 btrfs_commit_transaction(ac->newtrans, ac->root);
1070 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1071 struct btrfs_root *root,
1072 int wait_for_unblock)
1074 struct btrfs_async_commit *ac;
1075 struct btrfs_transaction *cur_trans;
1077 ac = kmalloc(sizeof(*ac), GFP_NOFS);
1081 INIT_DELAYED_WORK(&ac->work, do_async_commit);
1083 ac->newtrans = btrfs_join_transaction(root);
1084 if (IS_ERR(ac->newtrans)) {
1085 int err = PTR_ERR(ac->newtrans);
1090 /* take transaction reference */
1091 cur_trans = trans->transaction;
1092 atomic_inc(&cur_trans->use_count);
1094 btrfs_end_transaction(trans, root);
1095 schedule_delayed_work(&ac->work, 0);
1097 /* wait for transaction to start and unblock */
1098 if (wait_for_unblock)
1099 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1101 wait_current_trans_commit_start(root, cur_trans);
1103 if (current->journal_info == trans)
1104 current->journal_info = NULL;
1106 put_transaction(cur_trans);
1111 * btrfs_transaction state sequence:
1112 * in_commit = 0, blocked = 0 (initial)
1113 * in_commit = 1, blocked = 1
1117 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1118 struct btrfs_root *root)
1120 unsigned long joined = 0;
1121 struct btrfs_transaction *cur_trans;
1122 struct btrfs_transaction *prev_trans = NULL;
1125 int should_grow = 0;
1126 unsigned long now = get_seconds();
1127 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1129 btrfs_run_ordered_operations(root, 0);
1131 trans->block_rsv = NULL;
1133 /* make a pass through all the delayed refs we have so far
1134 * any runnings procs may add more while we are here
1136 ret = btrfs_run_delayed_refs(trans, root, 0);
1139 btrfs_trans_release_metadata(trans, root);
1141 cur_trans = trans->transaction;
1143 * set the flushing flag so procs in this transaction have to
1144 * start sending their work down.
1146 cur_trans->delayed_refs.flushing = 1;
1148 ret = btrfs_run_delayed_refs(trans, root, 0);
1151 spin_lock(&cur_trans->commit_lock);
1152 if (cur_trans->in_commit) {
1153 spin_unlock(&cur_trans->commit_lock);
1154 atomic_inc(&cur_trans->use_count);
1155 btrfs_end_transaction(trans, root);
1157 wait_for_commit(root, cur_trans);
1159 put_transaction(cur_trans);
1164 trans->transaction->in_commit = 1;
1165 trans->transaction->blocked = 1;
1166 spin_unlock(&cur_trans->commit_lock);
1167 wake_up(&root->fs_info->transaction_blocked_wait);
1169 spin_lock(&root->fs_info->trans_lock);
1170 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1171 prev_trans = list_entry(cur_trans->list.prev,
1172 struct btrfs_transaction, list);
1173 if (!prev_trans->commit_done) {
1174 atomic_inc(&prev_trans->use_count);
1175 spin_unlock(&root->fs_info->trans_lock);
1177 wait_for_commit(root, prev_trans);
1179 put_transaction(prev_trans);
1181 spin_unlock(&root->fs_info->trans_lock);
1184 spin_unlock(&root->fs_info->trans_lock);
1187 if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1191 int snap_pending = 0;
1193 joined = cur_trans->num_joined;
1194 if (!list_empty(&trans->transaction->pending_snapshots))
1197 WARN_ON(cur_trans != trans->transaction);
1199 if (flush_on_commit || snap_pending) {
1200 btrfs_start_delalloc_inodes(root, 1);
1201 ret = btrfs_wait_ordered_extents(root, 0, 1);
1205 ret = btrfs_run_delayed_items(trans, root);
1209 * rename don't use btrfs_join_transaction, so, once we
1210 * set the transaction to blocked above, we aren't going
1211 * to get any new ordered operations. We can safely run
1212 * it here and no for sure that nothing new will be added
1215 btrfs_run_ordered_operations(root, 1);
1217 prepare_to_wait(&cur_trans->writer_wait, &wait,
1218 TASK_UNINTERRUPTIBLE);
1220 if (atomic_read(&cur_trans->num_writers) > 1)
1221 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1222 else if (should_grow)
1223 schedule_timeout(1);
1225 finish_wait(&cur_trans->writer_wait, &wait);
1226 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1227 (should_grow && cur_trans->num_joined != joined));
1230 * Ok now we need to make sure to block out any other joins while we
1231 * commit the transaction. We could have started a join before setting
1232 * no_join so make sure to wait for num_writers to == 1 again.
1234 spin_lock(&root->fs_info->trans_lock);
1235 root->fs_info->trans_no_join = 1;
1236 spin_unlock(&root->fs_info->trans_lock);
1237 wait_event(cur_trans->writer_wait,
1238 atomic_read(&cur_trans->num_writers) == 1);
1241 * the reloc mutex makes sure that we stop
1242 * the balancing code from coming in and moving
1243 * extents around in the middle of the commit
1245 mutex_lock(&root->fs_info->reloc_mutex);
1247 ret = btrfs_run_delayed_items(trans, root);
1250 ret = create_pending_snapshots(trans, root->fs_info);
1253 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1257 * make sure none of the code above managed to slip in a
1260 btrfs_assert_delayed_root_empty(root);
1262 WARN_ON(cur_trans != trans->transaction);
1264 btrfs_scrub_pause(root);
1265 /* btrfs_commit_tree_roots is responsible for getting the
1266 * various roots consistent with each other. Every pointer
1267 * in the tree of tree roots has to point to the most up to date
1268 * root for every subvolume and other tree. So, we have to keep
1269 * the tree logging code from jumping in and changing any
1272 * At this point in the commit, there can't be any tree-log
1273 * writers, but a little lower down we drop the trans mutex
1274 * and let new people in. By holding the tree_log_mutex
1275 * from now until after the super is written, we avoid races
1276 * with the tree-log code.
1278 mutex_lock(&root->fs_info->tree_log_mutex);
1280 ret = commit_fs_roots(trans, root);
1283 /* commit_fs_roots gets rid of all the tree log roots, it is now
1284 * safe to free the root of tree log roots
1286 btrfs_free_log_root_tree(trans, root->fs_info);
1288 ret = commit_cowonly_roots(trans, root);
1291 btrfs_prepare_extent_commit(trans, root);
1293 cur_trans = root->fs_info->running_transaction;
1295 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1296 root->fs_info->tree_root->node);
1297 switch_commit_root(root->fs_info->tree_root);
1299 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1300 root->fs_info->chunk_root->node);
1301 switch_commit_root(root->fs_info->chunk_root);
1303 update_super_roots(root);
1305 if (!root->fs_info->log_root_recovering) {
1306 btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
1307 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
1310 memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
1311 sizeof(root->fs_info->super_copy));
1313 trans->transaction->blocked = 0;
1314 spin_lock(&root->fs_info->trans_lock);
1315 root->fs_info->running_transaction = NULL;
1316 root->fs_info->trans_no_join = 0;
1317 spin_unlock(&root->fs_info->trans_lock);
1318 mutex_unlock(&root->fs_info->reloc_mutex);
1320 wake_up(&root->fs_info->transaction_wait);
1322 ret = btrfs_write_and_wait_transaction(trans, root);
1324 write_ctree_super(trans, root, 0);
1327 * the super is written, we can safely allow the tree-loggers
1328 * to go about their business
1330 mutex_unlock(&root->fs_info->tree_log_mutex);
1332 btrfs_finish_extent_commit(trans, root);
1334 cur_trans->commit_done = 1;
1336 root->fs_info->last_trans_committed = cur_trans->transid;
1338 wake_up(&cur_trans->commit_wait);
1340 spin_lock(&root->fs_info->trans_lock);
1341 list_del_init(&cur_trans->list);
1342 spin_unlock(&root->fs_info->trans_lock);
1344 put_transaction(cur_trans);
1345 put_transaction(cur_trans);
1347 trace_btrfs_transaction_commit(root);
1349 btrfs_scrub_continue(root);
1351 if (current->journal_info == trans)
1352 current->journal_info = NULL;
1354 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1356 if (current != root->fs_info->transaction_kthread)
1357 btrfs_run_delayed_iputs(root);
1363 * interface function to delete all the snapshots we have scheduled for deletion
1365 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1368 struct btrfs_fs_info *fs_info = root->fs_info;
1370 spin_lock(&fs_info->trans_lock);
1371 list_splice_init(&fs_info->dead_roots, &list);
1372 spin_unlock(&fs_info->trans_lock);
1374 while (!list_empty(&list)) {
1375 root = list_entry(list.next, struct btrfs_root, root_list);
1376 list_del(&root->root_list);
1378 btrfs_kill_all_delayed_nodes(root);
1380 if (btrfs_header_backref_rev(root->node) <
1381 BTRFS_MIXED_BACKREF_REV)
1382 btrfs_drop_snapshot(root, NULL, 0);
1384 btrfs_drop_snapshot(root, NULL, 1);