btrfs_commit_transaction(trans, root);
ret = btrfs_write_and_wait_transaction(NULL, root);
BUG_ON(ret);
+
write_ctree_super(NULL, root);
mutex_unlock(&fs_info->fs_mutex);
extent_io_tree_empty_lru(&fs_info->extent_ins);
extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
- flush_workqueue(end_io_workqueue);
flush_workqueue(async_submit_workqueue);
+ flush_workqueue(end_io_workqueue);
truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
- flush_workqueue(end_io_workqueue);
- destroy_workqueue(end_io_workqueue);
-
flush_workqueue(async_submit_workqueue);
destroy_workqueue(async_submit_workqueue);
+ flush_workqueue(end_io_workqueue);
+ destroy_workqueue(end_io_workqueue);
+
iput(fs_info->btree_inode);
#if 0
while(!list_empty(&fs_info->hashers)) {
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
{
- balance_dirty_pages_ratelimited_nr(
+ struct extent_io_tree *tree;
+ u64 num_dirty;
+ u64 start = 0;
+ unsigned long thresh = 16 * 1024 * 1024;
+ tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
+
+ if (current_is_pdflush())
+ return;
+
+ num_dirty = count_range_bits(tree, &start, (u64)-1,
+ thresh, EXTENT_DIRTY);
+ if (num_dirty > thresh) {
+ balance_dirty_pages_ratelimited_nr(
root->fs_info->btree_inode->i_mapping, 1);
+ }
}
void btrfs_set_buffer_defrag(struct extent_buffer *buf)
int ret;
mutex_lock(&root->fs_info->fs_mutex);
+ if (root->fs_info->closing)
+ goto out;
+
mutex_lock(&root->fs_info->trans_mutex);
cur = root->fs_info->running_transaction;
if (!cur) {
void btrfs_transaction_queue_work(struct btrfs_root *root, int delay)
{
- queue_delayed_work(trans_wq, &root->fs_info->trans_work, delay);
+ if (!root->fs_info->closing)
+ queue_delayed_work(trans_wq, &root->fs_info->trans_work, delay);
}
void btrfs_transaction_flush_work(struct btrfs_root *root)
{
- cancel_rearming_delayed_workqueue(trans_wq, &root->fs_info->trans_work);
+ cancel_delayed_work(&root->fs_info->trans_work);
flush_workqueue(trans_wq);
}