return 0;
}
+static int check_tree_block_fsid(struct btrfs_root *root,
+ struct extent_buffer *eb)
+{
+ struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+ u8 fsid[BTRFS_UUID_SIZE];
+ int ret = 1;
+
+ read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
+ BTRFS_FSID_SIZE);
+ while (fs_devices) {
+ if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
+ ret = 0;
+ break;
+ }
+ fs_devices = fs_devices->seed;
+ }
+ return ret;
+}
+
int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
ret = -EIO;
goto err;
}
- if (memcmp_extent_buffer(eb, root->fs_info->fsid,
- (unsigned long)btrfs_header_fsid(eb),
- BTRFS_FSID_SIZE)) {
+ if (check_tree_block_fsid(root, eb)) {
printk("bad fsid on block %Lu\n", eb->start);
ret = -EIO;
goto err;
u64 objectid)
{
root->node = NULL;
- root->inode = NULL;
root->commit_root = NULL;
root->ref_tree = NULL;
root->sectorsize = sectorsize;
root->defrag_running = 0;
root->defrag_level = 0;
root->root_key.objectid = objectid;
+ root->anon_super.s_root = NULL;
+ root->anon_super.s_dev = 0;
+ INIT_LIST_HEAD(&root->anon_super.s_list);
+ INIT_LIST_HEAD(&root->anon_super.s_instances);
+ init_rwsem(&root->anon_super.s_umount);
+
return 0;
}
root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
if (IS_ERR(root))
return root;
+
+ set_anon_super(&root->anon_super, NULL);
+
ret = radix_tree_insert(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid,
root);
kfree(root);
return ERR_PTR(ret);
}
- ret = btrfs_find_dead_roots(fs_info->tree_root,
- root->root_key.objectid, root);
- BUG_ON(ret);
-
+ if (!(fs_info->sb->s_flags & MS_RDONLY)) {
+ ret = btrfs_find_dead_roots(fs_info->tree_root,
+ root->root_key.objectid, root);
+ BUG_ON(ret);
+ btrfs_orphan_cleanup(root);
+ }
return root;
}
kfree(root);
return ERR_PTR(ret);
}
-
+#if 0
ret = btrfs_sysfs_add_root(root);
if (ret) {
free_extent_buffer(root->node);
kfree(root);
return ERR_PTR(ret);
}
+#endif
root->in_sysfs = 1;
return root;
}
u64 offset;
/* the generic O_DIRECT read code does this */
- if (!page) {
+ if (1 || !page) {
__unplug_io_fn(bdi, page);
return;
}
u32 blocksize;
u32 stripesize;
u64 generation;
+ struct btrfs_key location;
struct buffer_head *bh;
struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
if (!btrfs_super_root(disk_super))
goto fail_sb_buffer;
- err = btrfs_parse_options(tree_root, options);
- if (err)
+ ret = btrfs_parse_options(tree_root, options);
+ if (ret) {
+ err = ret;
goto fail_sb_buffer;
+ }
/*
* we need to start all the end_io workers up front because the
btrfs_start_workers(&fs_info->endio_write_workers,
fs_info->thread_pool_size);
- err = -EINVAL;
- if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
- printk("Btrfs: wanted %llu devices, but found %llu\n",
- (unsigned long long)btrfs_super_num_devices(disk_super),
- (unsigned long long)fs_devices->open_devices);
- if (btrfs_test_opt(tree_root, DEGRADED))
- printk("continuing in degraded mode\n");
- else {
- goto fail_sb_buffer;
- }
- }
-
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
4 * 1024 * 1024 / PAGE_CACHE_SIZE);
mutex_lock(&fs_info->chunk_mutex);
ret = btrfs_read_chunk_tree(chunk_root);
mutex_unlock(&fs_info->chunk_mutex);
- BUG_ON(ret);
+ if (ret) {
+ printk("btrfs: failed to read chunk tree on %s\n", sb->s_id);
+ goto fail_chunk_root;
+ }
btrfs_close_extra_devices(fs_devices);
btrfs_super_root(disk_super),
blocksize, generation);
if (!tree_root->node)
- goto fail_sb_buffer;
+ goto fail_chunk_root;
ret = find_and_setup_root(tree_root, fs_info,
btrfs_read_block_groups(extent_root);
- fs_info->generation = btrfs_super_generation(disk_super) + 1;
+ fs_info->generation = generation + 1;
+ fs_info->last_trans_committed = generation;
fs_info->data_alloc_profile = (u64)-1;
fs_info->metadata_alloc_profile = (u64)-1;
fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
if (!fs_info->transaction_kthread)
goto fail_cleaner;
+ if (sb->s_flags & MS_RDONLY)
+ goto read_fs_root;
+
if (btrfs_super_log_root(disk_super) != 0) {
u32 blocksize;
u64 bytenr = btrfs_super_log_root(disk_super);
ret = btrfs_recover_log_trees(log_tree_root);
BUG_ON(ret);
}
- fs_info->last_trans_committed = btrfs_super_generation(disk_super);
ret = btrfs_cleanup_reloc_trees(tree_root);
BUG_ON(ret);
+ location.objectid = BTRFS_FS_TREE_OBJECTID;
+ location.type = BTRFS_ROOT_ITEM_KEY;
+ location.offset = (u64)-1;
+
+read_fs_root:
+ fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
+ if (!fs_info->fs_root)
+ goto fail_cleaner;
return tree_root;
fail_cleaner:
free_extent_buffer(extent_root->node);
fail_tree_root:
free_extent_buffer(tree_root->node);
+fail_chunk_root:
+ free_extent_buffer(chunk_root->node);
fail_sys_array:
fail_sb_buffer:
btrfs_stop_workers(&fs_info->fixup_workers);
total_errors++;
continue;
}
- if (!dev->in_fs_metadata)
+ if (!dev->in_fs_metadata || !dev->writeable)
continue;
+ btrfs_set_stack_device_generation(dev_item, 0);
btrfs_set_stack_device_type(dev_item, dev->type);
btrfs_set_stack_device_id(dev_item, dev->devid);
btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
btrfs_set_stack_device_io_width(dev_item, dev->io_width);
btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
+ memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
flags = btrfs_super_flags(sb);
btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
dev = list_entry(cur, struct btrfs_device, dev_list);
if (!dev->bdev)
continue;
- if (!dev->in_fs_metadata)
+ if (!dev->in_fs_metadata || !dev->writeable)
continue;
BUG_ON(!dev->pending_io);
{
radix_tree_delete(&fs_info->fs_roots_radix,
(unsigned long)root->root_key.objectid);
+ if (root->anon_super.s_dev) {
+ down_write(&root->anon_super.s_umount);
+ kill_anon_super(&root->anon_super);
+ }
+#if 0
if (root->in_sysfs)
btrfs_sysfs_del_root(root);
- if (root->inode)
- iput(root->inode);
+#endif
if (root->node)
free_extent_buffer(root->node);
if (root->commit_root)
return 0;
}
-int close_ctree(struct btrfs_root *root)
+int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
{
+ u64 root_objectid = 0;
+ struct btrfs_root *gang[8];
+ int i;
int ret;
- struct btrfs_trans_handle *trans;
- struct btrfs_fs_info *fs_info = root->fs_info;
- fs_info->closing = 1;
- smp_mb();
+ while (1) {
+ ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+ (void **)gang, root_objectid,
+ ARRAY_SIZE(gang));
+ if (!ret)
+ break;
+ for (i = 0; i < ret; i++) {
+ root_objectid = gang[i]->root_key.objectid;
+ ret = btrfs_find_dead_roots(fs_info->tree_root,
+ root_objectid, gang[i]);
+ BUG_ON(ret);
+ btrfs_orphan_cleanup(gang[i]);
+ }
+ root_objectid++;
+ }
+ return 0;
+}
- kthread_stop(root->fs_info->transaction_kthread);
- kthread_stop(root->fs_info->cleaner_kthread);
+int btrfs_commit_super(struct btrfs_root *root)
+{
+ struct btrfs_trans_handle *trans;
+ int ret;
+ mutex_lock(&root->fs_info->cleaner_mutex);
btrfs_clean_old_snapshots(root);
+ mutex_unlock(&root->fs_info->cleaner_mutex);
trans = btrfs_start_transaction(root, 1);
ret = btrfs_commit_transaction(trans, root);
- /* run commit again to drop the original snapshot */
+ BUG_ON(ret);
+ /* run commit again to drop the original snapshot */
trans = btrfs_start_transaction(root, 1);
btrfs_commit_transaction(trans, root);
ret = btrfs_write_and_wait_transaction(NULL, root);
BUG_ON(ret);
- write_ctree_super(NULL, root);
+ ret = write_ctree_super(NULL, root);
+ return ret;
+}
+
+int close_ctree(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret;
+
+ fs_info->closing = 1;
+ smp_mb();
+
+ kthread_stop(root->fs_info->transaction_kthread);
+ kthread_stop(root->fs_info->cleaner_kthread);
+
+ if (!(fs_info->sb->s_flags & MS_RDONLY)) {
+ ret = btrfs_commit_super(root);
+ if (ret) {
+ printk("btrfs: commit super returns %d\n", ret);
+ }
+ }
if (fs_info->delalloc_bytes) {
printk("btrfs: at unmount delalloc count %Lu\n",
free_extent_buffer(root->fs_info->dev_root->node);
btrfs_free_block_groups(root->fs_info);
- fs_info->closing = 2;
- del_fs_roots(fs_info);
- filemap_write_and_wait(fs_info->btree_inode->i_mapping);
+ del_fs_roots(fs_info);
- truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
+ iput(fs_info->btree_inode);
btrfs_stop_workers(&fs_info->fixup_workers);
btrfs_stop_workers(&fs_info->delalloc_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->submit_workers);
- iput(fs_info->btree_inode);
#if 0
while(!list_empty(&fs_info->hashers)) {
struct btrfs_hasher *hasher;