2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <linux/iocontext.h>
24 #include <asm/div64.h>
27 #include "extent_map.h"
29 #include "transaction.h"
30 #include "print-tree.h"
32 #include "async-thread.h"
42 struct btrfs_bio_stripe stripes[];
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46 struct btrfs_root *root,
47 struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
50 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
51 (sizeof(struct btrfs_bio_stripe) * (n)))
53 static DEFINE_MUTEX(uuid_mutex);
54 static LIST_HEAD(fs_uuids);
56 void btrfs_lock_volumes(void)
58 mutex_lock(&uuid_mutex);
61 void btrfs_unlock_volumes(void)
63 mutex_unlock(&uuid_mutex);
66 static void lock_chunks(struct btrfs_root *root)
68 mutex_lock(&root->fs_info->chunk_mutex);
71 static void unlock_chunks(struct btrfs_root *root)
73 mutex_unlock(&root->fs_info->chunk_mutex);
76 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
78 struct btrfs_device *device;
79 WARN_ON(fs_devices->opened);
80 while (!list_empty(&fs_devices->devices)) {
81 device = list_entry(fs_devices->devices.next,
82 struct btrfs_device, dev_list);
83 list_del(&device->dev_list);
90 int btrfs_cleanup_fs_uuids(void)
92 struct btrfs_fs_devices *fs_devices;
94 while (!list_empty(&fs_uuids)) {
95 fs_devices = list_entry(fs_uuids.next,
96 struct btrfs_fs_devices, list);
97 list_del(&fs_devices->list);
98 free_fs_devices(fs_devices);
103 static noinline struct btrfs_device *__find_device(struct list_head *head,
106 struct btrfs_device *dev;
108 list_for_each_entry(dev, head, dev_list) {
109 if (dev->devid == devid &&
110 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
117 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
119 struct btrfs_fs_devices *fs_devices;
121 list_for_each_entry(fs_devices, &fs_uuids, list) {
122 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
129 * we try to collect pending bios for a device so we don't get a large
130 * number of procs sending bios down to the same device. This greatly
131 * improves the schedulers ability to collect and merge the bios.
133 * But, it also turns into a long list of bios to process and that is sure
134 * to eventually make the worker thread block. The solution here is to
135 * make some progress and then put this work struct back at the end of
136 * the list if the block device is congested. This way, multiple devices
137 * can make progress from a single worker thread.
139 static noinline int run_scheduled_bios(struct btrfs_device *device)
142 struct backing_dev_info *bdi;
143 struct btrfs_fs_info *fs_info;
147 unsigned long num_run = 0;
149 unsigned long last_waited = 0;
151 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
152 fs_info = device->dev_root->fs_info;
153 limit = btrfs_async_submit_limit(fs_info);
154 limit = limit * 2 / 3;
157 spin_lock(&device->io_lock);
160 /* take all the bios off the list at once and process them
161 * later on (without the lock held). But, remember the
162 * tail and other pointers so the bios can be properly reinserted
163 * into the list if we hit congestion
165 pending = device->pending_bios;
166 tail = device->pending_bio_tail;
167 WARN_ON(pending && !tail);
168 device->pending_bios = NULL;
169 device->pending_bio_tail = NULL;
172 * if pending was null this time around, no bios need processing
173 * at all and we can stop. Otherwise it'll loop back up again
174 * and do an additional check so no bios are missed.
176 * device->running_pending is used to synchronize with the
181 device->running_pending = 1;
184 device->running_pending = 0;
186 spin_unlock(&device->io_lock);
190 pending = pending->bi_next;
192 atomic_dec(&fs_info->nr_async_bios);
194 if (atomic_read(&fs_info->nr_async_bios) < limit &&
195 waitqueue_active(&fs_info->async_submit_wait))
196 wake_up(&fs_info->async_submit_wait);
198 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
200 submit_bio(cur->bi_rw, cur);
205 * we made progress, there is more work to do and the bdi
206 * is now congested. Back off and let other work structs
209 if (pending && bdi_write_congested(bdi) && num_run > 16 &&
210 fs_info->fs_devices->open_devices > 1) {
211 struct bio *old_head;
212 struct io_context *ioc;
214 ioc = current->io_context;
217 * the main goal here is that we don't want to
218 * block if we're going to be able to submit
219 * more requests without blocking.
221 * This code does two great things, it pokes into
222 * the elevator code from a filesystem _and_
223 * it makes assumptions about how batching works.
225 if (ioc && ioc->nr_batch_requests > 0 &&
226 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
228 ioc->last_waited == last_waited)) {
230 * we want to go through our batch of
231 * requests and stop. So, we copy out
232 * the ioc->last_waited time and test
233 * against it before looping
235 last_waited = ioc->last_waited;
238 spin_lock(&device->io_lock);
240 old_head = device->pending_bios;
241 device->pending_bios = pending;
242 if (device->pending_bio_tail)
243 tail->bi_next = old_head;
245 device->pending_bio_tail = tail;
247 device->running_pending = 1;
249 spin_unlock(&device->io_lock);
250 btrfs_requeue_work(&device->work);
257 spin_lock(&device->io_lock);
258 if (device->pending_bios)
260 spin_unlock(&device->io_lock);
265 static void pending_bios_fn(struct btrfs_work *work)
267 struct btrfs_device *device;
269 device = container_of(work, struct btrfs_device, work);
270 run_scheduled_bios(device);
273 static noinline int device_list_add(const char *path,
274 struct btrfs_super_block *disk_super,
275 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
277 struct btrfs_device *device;
278 struct btrfs_fs_devices *fs_devices;
279 u64 found_transid = btrfs_super_generation(disk_super);
281 fs_devices = find_fsid(disk_super->fsid);
283 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
286 INIT_LIST_HEAD(&fs_devices->devices);
287 INIT_LIST_HEAD(&fs_devices->alloc_list);
288 list_add(&fs_devices->list, &fs_uuids);
289 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
290 fs_devices->latest_devid = devid;
291 fs_devices->latest_trans = found_transid;
294 device = __find_device(&fs_devices->devices, devid,
295 disk_super->dev_item.uuid);
298 if (fs_devices->opened)
301 device = kzalloc(sizeof(*device), GFP_NOFS);
303 /* we can safely leave the fs_devices entry around */
306 device->devid = devid;
307 device->work.func = pending_bios_fn;
308 memcpy(device->uuid, disk_super->dev_item.uuid,
310 device->barriers = 1;
311 spin_lock_init(&device->io_lock);
312 device->name = kstrdup(path, GFP_NOFS);
317 INIT_LIST_HEAD(&device->dev_alloc_list);
318 list_add(&device->dev_list, &fs_devices->devices);
319 device->fs_devices = fs_devices;
320 fs_devices->num_devices++;
323 if (found_transid > fs_devices->latest_trans) {
324 fs_devices->latest_devid = devid;
325 fs_devices->latest_trans = found_transid;
327 *fs_devices_ret = fs_devices;
331 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
333 struct btrfs_fs_devices *fs_devices;
334 struct btrfs_device *device;
335 struct btrfs_device *orig_dev;
337 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
339 return ERR_PTR(-ENOMEM);
341 INIT_LIST_HEAD(&fs_devices->devices);
342 INIT_LIST_HEAD(&fs_devices->alloc_list);
343 INIT_LIST_HEAD(&fs_devices->list);
344 fs_devices->latest_devid = orig->latest_devid;
345 fs_devices->latest_trans = orig->latest_trans;
346 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
348 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
349 device = kzalloc(sizeof(*device), GFP_NOFS);
353 device->name = kstrdup(orig_dev->name, GFP_NOFS);
357 device->devid = orig_dev->devid;
358 device->work.func = pending_bios_fn;
359 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
360 device->barriers = 1;
361 spin_lock_init(&device->io_lock);
362 INIT_LIST_HEAD(&device->dev_list);
363 INIT_LIST_HEAD(&device->dev_alloc_list);
365 list_add(&device->dev_list, &fs_devices->devices);
366 device->fs_devices = fs_devices;
367 fs_devices->num_devices++;
371 free_fs_devices(fs_devices);
372 return ERR_PTR(-ENOMEM);
375 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
377 struct btrfs_device *device, *next;
379 mutex_lock(&uuid_mutex);
381 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
382 if (device->in_fs_metadata)
386 close_bdev_exclusive(device->bdev, device->mode);
388 fs_devices->open_devices--;
390 if (device->writeable) {
391 list_del_init(&device->dev_alloc_list);
392 device->writeable = 0;
393 fs_devices->rw_devices--;
395 list_del_init(&device->dev_list);
396 fs_devices->num_devices--;
401 if (fs_devices->seed) {
402 fs_devices = fs_devices->seed;
406 mutex_unlock(&uuid_mutex);
410 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
412 struct btrfs_device *device;
414 if (--fs_devices->opened > 0)
417 list_for_each_entry(device, &fs_devices->devices, dev_list) {
419 close_bdev_exclusive(device->bdev, device->mode);
420 fs_devices->open_devices--;
422 if (device->writeable) {
423 list_del_init(&device->dev_alloc_list);
424 fs_devices->rw_devices--;
428 device->writeable = 0;
429 device->in_fs_metadata = 0;
431 WARN_ON(fs_devices->open_devices);
432 WARN_ON(fs_devices->rw_devices);
433 fs_devices->opened = 0;
434 fs_devices->seeding = 0;
439 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
441 struct btrfs_fs_devices *seed_devices = NULL;
444 mutex_lock(&uuid_mutex);
445 ret = __btrfs_close_devices(fs_devices);
446 if (!fs_devices->opened) {
447 seed_devices = fs_devices->seed;
448 fs_devices->seed = NULL;
450 mutex_unlock(&uuid_mutex);
452 while (seed_devices) {
453 fs_devices = seed_devices;
454 seed_devices = fs_devices->seed;
455 __btrfs_close_devices(fs_devices);
456 free_fs_devices(fs_devices);
461 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
462 fmode_t flags, void *holder)
464 struct block_device *bdev;
465 struct list_head *head = &fs_devices->devices;
466 struct btrfs_device *device;
467 struct block_device *latest_bdev = NULL;
468 struct buffer_head *bh;
469 struct btrfs_super_block *disk_super;
470 u64 latest_devid = 0;
471 u64 latest_transid = 0;
476 list_for_each_entry(device, head, dev_list) {
482 bdev = open_bdev_exclusive(device->name, flags, holder);
484 printk(KERN_INFO "open %s failed\n", device->name);
487 set_blocksize(bdev, 4096);
489 bh = btrfs_read_dev_super(bdev);
493 disk_super = (struct btrfs_super_block *)bh->b_data;
494 devid = le64_to_cpu(disk_super->dev_item.devid);
495 if (devid != device->devid)
498 if (memcmp(device->uuid, disk_super->dev_item.uuid,
502 device->generation = btrfs_super_generation(disk_super);
503 if (!latest_transid || device->generation > latest_transid) {
504 latest_devid = devid;
505 latest_transid = device->generation;
509 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
510 device->writeable = 0;
512 device->writeable = !bdev_read_only(bdev);
517 device->in_fs_metadata = 0;
518 device->mode = flags;
520 fs_devices->open_devices++;
521 if (device->writeable) {
522 fs_devices->rw_devices++;
523 list_add(&device->dev_alloc_list,
524 &fs_devices->alloc_list);
531 close_bdev_exclusive(bdev, FMODE_READ);
535 if (fs_devices->open_devices == 0) {
539 fs_devices->seeding = seeding;
540 fs_devices->opened = 1;
541 fs_devices->latest_bdev = latest_bdev;
542 fs_devices->latest_devid = latest_devid;
543 fs_devices->latest_trans = latest_transid;
544 fs_devices->total_rw_bytes = 0;
549 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
550 fmode_t flags, void *holder)
554 mutex_lock(&uuid_mutex);
555 if (fs_devices->opened) {
556 fs_devices->opened++;
559 ret = __btrfs_open_devices(fs_devices, flags, holder);
561 mutex_unlock(&uuid_mutex);
565 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
566 struct btrfs_fs_devices **fs_devices_ret)
568 struct btrfs_super_block *disk_super;
569 struct block_device *bdev;
570 struct buffer_head *bh;
575 mutex_lock(&uuid_mutex);
577 bdev = open_bdev_exclusive(path, flags, holder);
584 ret = set_blocksize(bdev, 4096);
587 bh = btrfs_read_dev_super(bdev);
592 disk_super = (struct btrfs_super_block *)bh->b_data;
593 devid = le64_to_cpu(disk_super->dev_item.devid);
594 transid = btrfs_super_generation(disk_super);
595 if (disk_super->label[0])
596 printk(KERN_INFO "device label %s ", disk_super->label);
598 /* FIXME, make a readl uuid parser */
599 printk(KERN_INFO "device fsid %llx-%llx ",
600 *(unsigned long long *)disk_super->fsid,
601 *(unsigned long long *)(disk_super->fsid + 8));
603 printk(KERN_CONT "devid %llu transid %llu %s\n",
604 (unsigned long long)devid, (unsigned long long)transid, path);
605 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
609 close_bdev_exclusive(bdev, flags);
611 mutex_unlock(&uuid_mutex);
616 * this uses a pretty simple search, the expectation is that it is
617 * called very infrequently and that a given device has a small number
620 static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
621 struct btrfs_device *device,
622 u64 num_bytes, u64 *start)
624 struct btrfs_key key;
625 struct btrfs_root *root = device->dev_root;
626 struct btrfs_dev_extent *dev_extent = NULL;
627 struct btrfs_path *path;
630 u64 search_start = 0;
631 u64 search_end = device->total_bytes;
635 struct extent_buffer *l;
637 path = btrfs_alloc_path();
643 /* FIXME use last free of some kind */
645 /* we don't want to overwrite the superblock on the drive,
646 * so we make sure to start at an offset of at least 1MB
648 search_start = max((u64)1024 * 1024, search_start);
650 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
651 search_start = max(root->fs_info->alloc_start, search_start);
653 key.objectid = device->devid;
654 key.offset = search_start;
655 key.type = BTRFS_DEV_EXTENT_KEY;
656 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
659 ret = btrfs_previous_item(root, path, 0, key.type);
663 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
666 slot = path->slots[0];
667 if (slot >= btrfs_header_nritems(l)) {
668 ret = btrfs_next_leaf(root, path);
675 if (search_start >= search_end) {
679 *start = search_start;
683 *start = last_byte > search_start ?
684 last_byte : search_start;
685 if (search_end <= *start) {
691 btrfs_item_key_to_cpu(l, &key, slot);
693 if (key.objectid < device->devid)
696 if (key.objectid > device->devid)
699 if (key.offset >= search_start && key.offset > last_byte &&
701 if (last_byte < search_start)
702 last_byte = search_start;
703 hole_size = key.offset - last_byte;
704 if (key.offset > last_byte &&
705 hole_size >= num_bytes) {
710 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
714 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
715 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
721 /* we have to make sure we didn't find an extent that has already
722 * been allocated by the map tree or the original allocation
724 BUG_ON(*start < search_start);
726 if (*start + num_bytes > search_end) {
730 /* check for pending inserts here */
734 btrfs_free_path(path);
738 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
739 struct btrfs_device *device,
743 struct btrfs_path *path;
744 struct btrfs_root *root = device->dev_root;
745 struct btrfs_key key;
746 struct btrfs_key found_key;
747 struct extent_buffer *leaf = NULL;
748 struct btrfs_dev_extent *extent = NULL;
750 path = btrfs_alloc_path();
754 key.objectid = device->devid;
756 key.type = BTRFS_DEV_EXTENT_KEY;
758 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
760 ret = btrfs_previous_item(root, path, key.objectid,
761 BTRFS_DEV_EXTENT_KEY);
763 leaf = path->nodes[0];
764 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
765 extent = btrfs_item_ptr(leaf, path->slots[0],
766 struct btrfs_dev_extent);
767 BUG_ON(found_key.offset > start || found_key.offset +
768 btrfs_dev_extent_length(leaf, extent) < start);
770 } else if (ret == 0) {
771 leaf = path->nodes[0];
772 extent = btrfs_item_ptr(leaf, path->slots[0],
773 struct btrfs_dev_extent);
777 if (device->bytes_used > 0)
778 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
779 ret = btrfs_del_item(trans, root, path);
782 btrfs_free_path(path);
786 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
787 struct btrfs_device *device,
788 u64 chunk_tree, u64 chunk_objectid,
789 u64 chunk_offset, u64 start, u64 num_bytes)
792 struct btrfs_path *path;
793 struct btrfs_root *root = device->dev_root;
794 struct btrfs_dev_extent *extent;
795 struct extent_buffer *leaf;
796 struct btrfs_key key;
798 WARN_ON(!device->in_fs_metadata);
799 path = btrfs_alloc_path();
803 key.objectid = device->devid;
805 key.type = BTRFS_DEV_EXTENT_KEY;
806 ret = btrfs_insert_empty_item(trans, root, path, &key,
810 leaf = path->nodes[0];
811 extent = btrfs_item_ptr(leaf, path->slots[0],
812 struct btrfs_dev_extent);
813 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
814 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
815 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
817 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
818 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
821 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
822 btrfs_mark_buffer_dirty(leaf);
823 btrfs_free_path(path);
827 static noinline int find_next_chunk(struct btrfs_root *root,
828 u64 objectid, u64 *offset)
830 struct btrfs_path *path;
832 struct btrfs_key key;
833 struct btrfs_chunk *chunk;
834 struct btrfs_key found_key;
836 path = btrfs_alloc_path();
839 key.objectid = objectid;
840 key.offset = (u64)-1;
841 key.type = BTRFS_CHUNK_ITEM_KEY;
843 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
849 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
853 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
855 if (found_key.objectid != objectid)
858 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
860 *offset = found_key.offset +
861 btrfs_chunk_length(path->nodes[0], chunk);
866 btrfs_free_path(path);
870 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
873 struct btrfs_key key;
874 struct btrfs_key found_key;
875 struct btrfs_path *path;
877 root = root->fs_info->chunk_root;
879 path = btrfs_alloc_path();
883 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
884 key.type = BTRFS_DEV_ITEM_KEY;
885 key.offset = (u64)-1;
887 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
893 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
898 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
900 *objectid = found_key.offset + 1;
904 btrfs_free_path(path);
909 * the device information is stored in the chunk root
910 * the btrfs_device struct should be fully filled in
912 int btrfs_add_device(struct btrfs_trans_handle *trans,
913 struct btrfs_root *root,
914 struct btrfs_device *device)
917 struct btrfs_path *path;
918 struct btrfs_dev_item *dev_item;
919 struct extent_buffer *leaf;
920 struct btrfs_key key;
923 root = root->fs_info->chunk_root;
925 path = btrfs_alloc_path();
929 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
930 key.type = BTRFS_DEV_ITEM_KEY;
931 key.offset = device->devid;
933 ret = btrfs_insert_empty_item(trans, root, path, &key,
938 leaf = path->nodes[0];
939 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
941 btrfs_set_device_id(leaf, dev_item, device->devid);
942 btrfs_set_device_generation(leaf, dev_item, 0);
943 btrfs_set_device_type(leaf, dev_item, device->type);
944 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
945 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
946 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
947 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
948 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
949 btrfs_set_device_group(leaf, dev_item, 0);
950 btrfs_set_device_seek_speed(leaf, dev_item, 0);
951 btrfs_set_device_bandwidth(leaf, dev_item, 0);
952 btrfs_set_device_start_offset(leaf, dev_item, 0);
954 ptr = (unsigned long)btrfs_device_uuid(dev_item);
955 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
956 ptr = (unsigned long)btrfs_device_fsid(dev_item);
957 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
958 btrfs_mark_buffer_dirty(leaf);
962 btrfs_free_path(path);
966 static int btrfs_rm_dev_item(struct btrfs_root *root,
967 struct btrfs_device *device)
970 struct btrfs_path *path;
971 struct btrfs_key key;
972 struct btrfs_trans_handle *trans;
974 root = root->fs_info->chunk_root;
976 path = btrfs_alloc_path();
980 trans = btrfs_start_transaction(root, 1);
981 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
982 key.type = BTRFS_DEV_ITEM_KEY;
983 key.offset = device->devid;
986 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
995 ret = btrfs_del_item(trans, root, path);
999 btrfs_free_path(path);
1000 unlock_chunks(root);
1001 btrfs_commit_transaction(trans, root);
1005 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1007 struct btrfs_device *device;
1008 struct btrfs_device *next_device;
1009 struct block_device *bdev;
1010 struct buffer_head *bh = NULL;
1011 struct btrfs_super_block *disk_super;
1018 mutex_lock(&uuid_mutex);
1019 mutex_lock(&root->fs_info->volume_mutex);
1021 all_avail = root->fs_info->avail_data_alloc_bits |
1022 root->fs_info->avail_system_alloc_bits |
1023 root->fs_info->avail_metadata_alloc_bits;
1025 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1026 root->fs_info->fs_devices->rw_devices <= 4) {
1027 printk(KERN_ERR "btrfs: unable to go below four devices "
1033 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1034 root->fs_info->fs_devices->rw_devices <= 2) {
1035 printk(KERN_ERR "btrfs: unable to go below two "
1036 "devices on raid1\n");
1041 if (strcmp(device_path, "missing") == 0) {
1042 struct list_head *devices;
1043 struct btrfs_device *tmp;
1046 devices = &root->fs_info->fs_devices->devices;
1047 list_for_each_entry(tmp, devices, dev_list) {
1048 if (tmp->in_fs_metadata && !tmp->bdev) {
1057 printk(KERN_ERR "btrfs: no missing devices found to "
1062 bdev = open_bdev_exclusive(device_path, FMODE_READ,
1063 root->fs_info->bdev_holder);
1065 ret = PTR_ERR(bdev);
1069 set_blocksize(bdev, 4096);
1070 bh = btrfs_read_dev_super(bdev);
1075 disk_super = (struct btrfs_super_block *)bh->b_data;
1076 devid = le64_to_cpu(disk_super->dev_item.devid);
1077 dev_uuid = disk_super->dev_item.uuid;
1078 device = btrfs_find_device(root, devid, dev_uuid,
1086 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1087 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1093 if (device->writeable) {
1094 list_del_init(&device->dev_alloc_list);
1095 root->fs_info->fs_devices->rw_devices--;
1098 ret = btrfs_shrink_device(device, 0);
1102 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1106 device->in_fs_metadata = 0;
1107 list_del_init(&device->dev_list);
1108 device->fs_devices->num_devices--;
1110 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1111 struct btrfs_device, dev_list);
1112 if (device->bdev == root->fs_info->sb->s_bdev)
1113 root->fs_info->sb->s_bdev = next_device->bdev;
1114 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1115 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1118 close_bdev_exclusive(device->bdev, device->mode);
1119 device->bdev = NULL;
1120 device->fs_devices->open_devices--;
1123 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1124 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1126 if (device->fs_devices->open_devices == 0) {
1127 struct btrfs_fs_devices *fs_devices;
1128 fs_devices = root->fs_info->fs_devices;
1129 while (fs_devices) {
1130 if (fs_devices->seed == device->fs_devices)
1132 fs_devices = fs_devices->seed;
1134 fs_devices->seed = device->fs_devices->seed;
1135 device->fs_devices->seed = NULL;
1136 __btrfs_close_devices(device->fs_devices);
1137 free_fs_devices(device->fs_devices);
1141 * at this point, the device is zero sized. We want to
1142 * remove it from the devices list and zero out the old super
1144 if (device->writeable) {
1145 /* make sure this device isn't detected as part of
1148 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1149 set_buffer_dirty(bh);
1150 sync_dirty_buffer(bh);
1153 kfree(device->name);
1161 close_bdev_exclusive(bdev, FMODE_READ);
1163 mutex_unlock(&root->fs_info->volume_mutex);
1164 mutex_unlock(&uuid_mutex);
1169 * does all the dirty work required for changing file system's UUID.
1171 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1172 struct btrfs_root *root)
1174 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1175 struct btrfs_fs_devices *old_devices;
1176 struct btrfs_fs_devices *seed_devices;
1177 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1178 struct btrfs_device *device;
1181 BUG_ON(!mutex_is_locked(&uuid_mutex));
1182 if (!fs_devices->seeding)
1185 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1189 old_devices = clone_fs_devices(fs_devices);
1190 if (IS_ERR(old_devices)) {
1191 kfree(seed_devices);
1192 return PTR_ERR(old_devices);
1195 list_add(&old_devices->list, &fs_uuids);
1197 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1198 seed_devices->opened = 1;
1199 INIT_LIST_HEAD(&seed_devices->devices);
1200 INIT_LIST_HEAD(&seed_devices->alloc_list);
1201 list_splice_init(&fs_devices->devices, &seed_devices->devices);
1202 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1203 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1204 device->fs_devices = seed_devices;
1207 fs_devices->seeding = 0;
1208 fs_devices->num_devices = 0;
1209 fs_devices->open_devices = 0;
1210 fs_devices->seed = seed_devices;
1212 generate_random_uuid(fs_devices->fsid);
1213 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1214 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1215 super_flags = btrfs_super_flags(disk_super) &
1216 ~BTRFS_SUPER_FLAG_SEEDING;
1217 btrfs_set_super_flags(disk_super, super_flags);
1223 * strore the expected generation for seed devices in device items.
1225 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1226 struct btrfs_root *root)
1228 struct btrfs_path *path;
1229 struct extent_buffer *leaf;
1230 struct btrfs_dev_item *dev_item;
1231 struct btrfs_device *device;
1232 struct btrfs_key key;
1233 u8 fs_uuid[BTRFS_UUID_SIZE];
1234 u8 dev_uuid[BTRFS_UUID_SIZE];
1238 path = btrfs_alloc_path();
1242 root = root->fs_info->chunk_root;
1243 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1245 key.type = BTRFS_DEV_ITEM_KEY;
1248 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1252 leaf = path->nodes[0];
1254 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1255 ret = btrfs_next_leaf(root, path);
1260 leaf = path->nodes[0];
1261 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1262 btrfs_release_path(root, path);
1266 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1267 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1268 key.type != BTRFS_DEV_ITEM_KEY)
1271 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1272 struct btrfs_dev_item);
1273 devid = btrfs_device_id(leaf, dev_item);
1274 read_extent_buffer(leaf, dev_uuid,
1275 (unsigned long)btrfs_device_uuid(dev_item),
1277 read_extent_buffer(leaf, fs_uuid,
1278 (unsigned long)btrfs_device_fsid(dev_item),
1280 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1283 if (device->fs_devices->seeding) {
1284 btrfs_set_device_generation(leaf, dev_item,
1285 device->generation);
1286 btrfs_mark_buffer_dirty(leaf);
1294 btrfs_free_path(path);
1298 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1300 struct btrfs_trans_handle *trans;
1301 struct btrfs_device *device;
1302 struct block_device *bdev;
1303 struct list_head *devices;
1304 struct super_block *sb = root->fs_info->sb;
1306 int seeding_dev = 0;
1309 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1312 bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
1316 if (root->fs_info->fs_devices->seeding) {
1318 down_write(&sb->s_umount);
1319 mutex_lock(&uuid_mutex);
1322 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1323 mutex_lock(&root->fs_info->volume_mutex);
1325 devices = &root->fs_info->fs_devices->devices;
1326 list_for_each_entry(device, devices, dev_list) {
1327 if (device->bdev == bdev) {
1333 device = kzalloc(sizeof(*device), GFP_NOFS);
1335 /* we can safely leave the fs_devices entry around */
1340 device->name = kstrdup(device_path, GFP_NOFS);
1341 if (!device->name) {
1347 ret = find_next_devid(root, &device->devid);
1353 trans = btrfs_start_transaction(root, 1);
1356 device->barriers = 1;
1357 device->writeable = 1;
1358 device->work.func = pending_bios_fn;
1359 generate_random_uuid(device->uuid);
1360 spin_lock_init(&device->io_lock);
1361 device->generation = trans->transid;
1362 device->io_width = root->sectorsize;
1363 device->io_align = root->sectorsize;
1364 device->sector_size = root->sectorsize;
1365 device->total_bytes = i_size_read(bdev->bd_inode);
1366 device->dev_root = root->fs_info->dev_root;
1367 device->bdev = bdev;
1368 device->in_fs_metadata = 1;
1370 set_blocksize(device->bdev, 4096);
1373 sb->s_flags &= ~MS_RDONLY;
1374 ret = btrfs_prepare_sprout(trans, root);
1378 device->fs_devices = root->fs_info->fs_devices;
1379 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1380 list_add(&device->dev_alloc_list,
1381 &root->fs_info->fs_devices->alloc_list);
1382 root->fs_info->fs_devices->num_devices++;
1383 root->fs_info->fs_devices->open_devices++;
1384 root->fs_info->fs_devices->rw_devices++;
1385 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1387 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1388 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1389 total_bytes + device->total_bytes);
1391 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1392 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1396 ret = init_first_rw_device(trans, root, device);
1398 ret = btrfs_finish_sprout(trans, root);
1401 ret = btrfs_add_device(trans, root, device);
1405 * we've got more storage, clear any full flags on the space
1408 btrfs_clear_space_info_full(root->fs_info);
1410 unlock_chunks(root);
1411 btrfs_commit_transaction(trans, root);
1414 mutex_unlock(&uuid_mutex);
1415 up_write(&sb->s_umount);
1417 ret = btrfs_relocate_sys_chunks(root);
1421 mutex_unlock(&root->fs_info->volume_mutex);
1424 close_bdev_exclusive(bdev, 0);
1426 mutex_unlock(&uuid_mutex);
1427 up_write(&sb->s_umount);
1432 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1433 struct btrfs_device *device)
1436 struct btrfs_path *path;
1437 struct btrfs_root *root;
1438 struct btrfs_dev_item *dev_item;
1439 struct extent_buffer *leaf;
1440 struct btrfs_key key;
1442 root = device->dev_root->fs_info->chunk_root;
1444 path = btrfs_alloc_path();
1448 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1449 key.type = BTRFS_DEV_ITEM_KEY;
1450 key.offset = device->devid;
1452 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1461 leaf = path->nodes[0];
1462 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1464 btrfs_set_device_id(leaf, dev_item, device->devid);
1465 btrfs_set_device_type(leaf, dev_item, device->type);
1466 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1467 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1468 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1469 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1470 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1471 btrfs_mark_buffer_dirty(leaf);
1474 btrfs_free_path(path);
1478 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1479 struct btrfs_device *device, u64 new_size)
1481 struct btrfs_super_block *super_copy =
1482 &device->dev_root->fs_info->super_copy;
1483 u64 old_total = btrfs_super_total_bytes(super_copy);
1484 u64 diff = new_size - device->total_bytes;
1486 if (!device->writeable)
1488 if (new_size <= device->total_bytes)
1491 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1492 device->fs_devices->total_rw_bytes += diff;
1494 device->total_bytes = new_size;
1495 btrfs_clear_space_info_full(device->dev_root->fs_info);
1497 return btrfs_update_device(trans, device);
1500 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1501 struct btrfs_device *device, u64 new_size)
1504 lock_chunks(device->dev_root);
1505 ret = __btrfs_grow_device(trans, device, new_size);
1506 unlock_chunks(device->dev_root);
1510 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1511 struct btrfs_root *root,
1512 u64 chunk_tree, u64 chunk_objectid,
1516 struct btrfs_path *path;
1517 struct btrfs_key key;
1519 root = root->fs_info->chunk_root;
1520 path = btrfs_alloc_path();
1524 key.objectid = chunk_objectid;
1525 key.offset = chunk_offset;
1526 key.type = BTRFS_CHUNK_ITEM_KEY;
1528 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1531 ret = btrfs_del_item(trans, root, path);
1534 btrfs_free_path(path);
1538 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1541 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1542 struct btrfs_disk_key *disk_key;
1543 struct btrfs_chunk *chunk;
1550 struct btrfs_key key;
1552 array_size = btrfs_super_sys_array_size(super_copy);
1554 ptr = super_copy->sys_chunk_array;
1557 while (cur < array_size) {
1558 disk_key = (struct btrfs_disk_key *)ptr;
1559 btrfs_disk_key_to_cpu(&key, disk_key);
1561 len = sizeof(*disk_key);
1563 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1564 chunk = (struct btrfs_chunk *)(ptr + len);
1565 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1566 len += btrfs_chunk_item_size(num_stripes);
1571 if (key.objectid == chunk_objectid &&
1572 key.offset == chunk_offset) {
1573 memmove(ptr, ptr + len, array_size - (cur + len));
1575 btrfs_set_super_sys_array_size(super_copy, array_size);
1584 static int btrfs_relocate_chunk(struct btrfs_root *root,
1585 u64 chunk_tree, u64 chunk_objectid,
1588 struct extent_map_tree *em_tree;
1589 struct btrfs_root *extent_root;
1590 struct btrfs_trans_handle *trans;
1591 struct extent_map *em;
1592 struct map_lookup *map;
1596 printk(KERN_INFO "btrfs relocating chunk %llu\n",
1597 (unsigned long long)chunk_offset);
1598 root = root->fs_info->chunk_root;
1599 extent_root = root->fs_info->extent_root;
1600 em_tree = &root->fs_info->mapping_tree.map_tree;
1602 /* step one, relocate all the extents inside this chunk */
1603 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1606 trans = btrfs_start_transaction(root, 1);
1612 * step two, delete the device extents and the
1613 * chunk tree entries
1615 spin_lock(&em_tree->lock);
1616 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1617 spin_unlock(&em_tree->lock);
1619 BUG_ON(em->start > chunk_offset ||
1620 em->start + em->len < chunk_offset);
1621 map = (struct map_lookup *)em->bdev;
1623 for (i = 0; i < map->num_stripes; i++) {
1624 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1625 map->stripes[i].physical);
1628 if (map->stripes[i].dev) {
1629 ret = btrfs_update_device(trans, map->stripes[i].dev);
1633 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1638 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1639 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1643 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1646 spin_lock(&em_tree->lock);
1647 remove_extent_mapping(em_tree, em);
1648 spin_unlock(&em_tree->lock);
1653 /* once for the tree */
1654 free_extent_map(em);
1656 free_extent_map(em);
1658 unlock_chunks(root);
1659 btrfs_end_transaction(trans, root);
1663 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1665 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1666 struct btrfs_path *path;
1667 struct extent_buffer *leaf;
1668 struct btrfs_chunk *chunk;
1669 struct btrfs_key key;
1670 struct btrfs_key found_key;
1671 u64 chunk_tree = chunk_root->root_key.objectid;
1675 path = btrfs_alloc_path();
1679 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1680 key.offset = (u64)-1;
1681 key.type = BTRFS_CHUNK_ITEM_KEY;
1684 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1689 ret = btrfs_previous_item(chunk_root, path, key.objectid,
1696 leaf = path->nodes[0];
1697 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1699 chunk = btrfs_item_ptr(leaf, path->slots[0],
1700 struct btrfs_chunk);
1701 chunk_type = btrfs_chunk_type(leaf, chunk);
1702 btrfs_release_path(chunk_root, path);
1704 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1705 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1711 if (found_key.offset == 0)
1713 key.offset = found_key.offset - 1;
1717 btrfs_free_path(path);
1721 static u64 div_factor(u64 num, int factor)
1730 int btrfs_balance(struct btrfs_root *dev_root)
1733 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1734 struct btrfs_device *device;
1737 struct btrfs_path *path;
1738 struct btrfs_key key;
1739 struct btrfs_chunk *chunk;
1740 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1741 struct btrfs_trans_handle *trans;
1742 struct btrfs_key found_key;
1744 if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
1747 mutex_lock(&dev_root->fs_info->volume_mutex);
1748 dev_root = dev_root->fs_info->dev_root;
1750 /* step one make some room on all the devices */
1751 list_for_each_entry(device, devices, dev_list) {
1752 old_size = device->total_bytes;
1753 size_to_free = div_factor(old_size, 1);
1754 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1755 if (!device->writeable ||
1756 device->total_bytes - device->bytes_used > size_to_free)
1759 ret = btrfs_shrink_device(device, old_size - size_to_free);
1762 trans = btrfs_start_transaction(dev_root, 1);
1765 ret = btrfs_grow_device(trans, device, old_size);
1768 btrfs_end_transaction(trans, dev_root);
1771 /* step two, relocate all the chunks */
1772 path = btrfs_alloc_path();
1775 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1776 key.offset = (u64)-1;
1777 key.type = BTRFS_CHUNK_ITEM_KEY;
1780 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1785 * this shouldn't happen, it means the last relocate
1791 ret = btrfs_previous_item(chunk_root, path, 0,
1792 BTRFS_CHUNK_ITEM_KEY);
1796 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1798 if (found_key.objectid != key.objectid)
1801 chunk = btrfs_item_ptr(path->nodes[0],
1803 struct btrfs_chunk);
1804 key.offset = found_key.offset;
1805 /* chunk zero is special */
1806 if (key.offset == 0)
1809 btrfs_release_path(chunk_root, path);
1810 ret = btrfs_relocate_chunk(chunk_root,
1811 chunk_root->root_key.objectid,
1818 btrfs_free_path(path);
1819 mutex_unlock(&dev_root->fs_info->volume_mutex);
1824 * shrinking a device means finding all of the device extents past
1825 * the new size, and then following the back refs to the chunks.
1826 * The chunk relocation code actually frees the device extent
1828 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1830 struct btrfs_trans_handle *trans;
1831 struct btrfs_root *root = device->dev_root;
1832 struct btrfs_dev_extent *dev_extent = NULL;
1833 struct btrfs_path *path;
1840 struct extent_buffer *l;
1841 struct btrfs_key key;
1842 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1843 u64 old_total = btrfs_super_total_bytes(super_copy);
1844 u64 diff = device->total_bytes - new_size;
1846 if (new_size >= device->total_bytes)
1849 path = btrfs_alloc_path();
1853 trans = btrfs_start_transaction(root, 1);
1863 device->total_bytes = new_size;
1864 if (device->writeable)
1865 device->fs_devices->total_rw_bytes -= diff;
1866 ret = btrfs_update_device(trans, device);
1868 unlock_chunks(root);
1869 btrfs_end_transaction(trans, root);
1872 WARN_ON(diff > old_total);
1873 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1874 unlock_chunks(root);
1875 btrfs_end_transaction(trans, root);
1877 key.objectid = device->devid;
1878 key.offset = (u64)-1;
1879 key.type = BTRFS_DEV_EXTENT_KEY;
1882 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1886 ret = btrfs_previous_item(root, path, 0, key.type);
1895 slot = path->slots[0];
1896 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1898 if (key.objectid != device->devid)
1901 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1902 length = btrfs_dev_extent_length(l, dev_extent);
1904 if (key.offset + length <= new_size)
1907 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1908 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1909 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1910 btrfs_release_path(root, path);
1912 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1919 btrfs_free_path(path);
1923 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1924 struct btrfs_root *root,
1925 struct btrfs_key *key,
1926 struct btrfs_chunk *chunk, int item_size)
1928 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1929 struct btrfs_disk_key disk_key;
1933 array_size = btrfs_super_sys_array_size(super_copy);
1934 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1937 ptr = super_copy->sys_chunk_array + array_size;
1938 btrfs_cpu_key_to_disk(&disk_key, key);
1939 memcpy(ptr, &disk_key, sizeof(disk_key));
1940 ptr += sizeof(disk_key);
1941 memcpy(ptr, chunk, item_size);
1942 item_size += sizeof(disk_key);
1943 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1947 static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
1948 int num_stripes, int sub_stripes)
1950 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1952 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1953 return calc_size * (num_stripes / sub_stripes);
1955 return calc_size * num_stripes;
1958 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1959 struct btrfs_root *extent_root,
1960 struct map_lookup **map_ret,
1961 u64 *num_bytes, u64 *stripe_size,
1962 u64 start, u64 type)
1964 struct btrfs_fs_info *info = extent_root->fs_info;
1965 struct btrfs_device *device = NULL;
1966 struct btrfs_fs_devices *fs_devices = info->fs_devices;
1967 struct list_head *cur;
1968 struct map_lookup *map = NULL;
1969 struct extent_map_tree *em_tree;
1970 struct extent_map *em;
1971 struct list_head private_devs;
1972 int min_stripe_size = 1 * 1024 * 1024;
1973 u64 calc_size = 1024 * 1024 * 1024;
1974 u64 max_chunk_size = calc_size;
1979 int num_stripes = 1;
1980 int min_stripes = 1;
1981 int sub_stripes = 0;
1985 int stripe_len = 64 * 1024;
1987 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1988 (type & BTRFS_BLOCK_GROUP_DUP)) {
1990 type &= ~BTRFS_BLOCK_GROUP_DUP;
1992 if (list_empty(&fs_devices->alloc_list))
1995 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1996 num_stripes = fs_devices->rw_devices;
1999 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
2003 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
2004 num_stripes = min_t(u64, 2, fs_devices->rw_devices);
2005 if (num_stripes < 2)
2009 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2010 num_stripes = fs_devices->rw_devices;
2011 if (num_stripes < 4)
2013 num_stripes &= ~(u32)1;
2018 if (type & BTRFS_BLOCK_GROUP_DATA) {
2019 max_chunk_size = 10 * calc_size;
2020 min_stripe_size = 64 * 1024 * 1024;
2021 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
2022 max_chunk_size = 4 * calc_size;
2023 min_stripe_size = 32 * 1024 * 1024;
2024 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2025 calc_size = 8 * 1024 * 1024;
2026 max_chunk_size = calc_size * 2;
2027 min_stripe_size = 1 * 1024 * 1024;
2030 /* we don't want a chunk larger than 10% of writeable space */
2031 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2035 if (!map || map->num_stripes != num_stripes) {
2037 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2040 map->num_stripes = num_stripes;
2043 if (calc_size * num_stripes > max_chunk_size) {
2044 calc_size = max_chunk_size;
2045 do_div(calc_size, num_stripes);
2046 do_div(calc_size, stripe_len);
2047 calc_size *= stripe_len;
2049 /* we don't want tiny stripes */
2050 calc_size = max_t(u64, min_stripe_size, calc_size);
2052 do_div(calc_size, stripe_len);
2053 calc_size *= stripe_len;
2055 cur = fs_devices->alloc_list.next;
2058 if (type & BTRFS_BLOCK_GROUP_DUP)
2059 min_free = calc_size * 2;
2061 min_free = calc_size;
2064 * we add 1MB because we never use the first 1MB of the device, unless
2065 * we've looped, then we are likely allocating the maximum amount of
2066 * space left already
2069 min_free += 1024 * 1024;
2071 INIT_LIST_HEAD(&private_devs);
2072 while (index < num_stripes) {
2073 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2074 BUG_ON(!device->writeable);
2075 if (device->total_bytes > device->bytes_used)
2076 avail = device->total_bytes - device->bytes_used;
2081 if (device->in_fs_metadata && avail >= min_free) {
2082 ret = find_free_dev_extent(trans, device,
2083 min_free, &dev_offset);
2085 list_move_tail(&device->dev_alloc_list,
2087 map->stripes[index].dev = device;
2088 map->stripes[index].physical = dev_offset;
2090 if (type & BTRFS_BLOCK_GROUP_DUP) {
2091 map->stripes[index].dev = device;
2092 map->stripes[index].physical =
2093 dev_offset + calc_size;
2097 } else if (device->in_fs_metadata && avail > max_avail)
2099 if (cur == &fs_devices->alloc_list)
2102 list_splice(&private_devs, &fs_devices->alloc_list);
2103 if (index < num_stripes) {
2104 if (index >= min_stripes) {
2105 num_stripes = index;
2106 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2107 num_stripes /= sub_stripes;
2108 num_stripes *= sub_stripes;
2113 if (!looped && max_avail > 0) {
2115 calc_size = max_avail;
2121 map->sector_size = extent_root->sectorsize;
2122 map->stripe_len = stripe_len;
2123 map->io_align = stripe_len;
2124 map->io_width = stripe_len;
2126 map->num_stripes = num_stripes;
2127 map->sub_stripes = sub_stripes;
2130 *stripe_size = calc_size;
2131 *num_bytes = chunk_bytes_by_type(type, calc_size,
2132 num_stripes, sub_stripes);
2134 em = alloc_extent_map(GFP_NOFS);
2139 em->bdev = (struct block_device *)map;
2141 em->len = *num_bytes;
2142 em->block_start = 0;
2143 em->block_len = em->len;
2145 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2146 spin_lock(&em_tree->lock);
2147 ret = add_extent_mapping(em_tree, em);
2148 spin_unlock(&em_tree->lock);
2150 free_extent_map(em);
2152 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2153 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2158 while (index < map->num_stripes) {
2159 device = map->stripes[index].dev;
2160 dev_offset = map->stripes[index].physical;
2162 ret = btrfs_alloc_dev_extent(trans, device,
2163 info->chunk_root->root_key.objectid,
2164 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2165 start, dev_offset, calc_size);
2173 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2174 struct btrfs_root *extent_root,
2175 struct map_lookup *map, u64 chunk_offset,
2176 u64 chunk_size, u64 stripe_size)
2179 struct btrfs_key key;
2180 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2181 struct btrfs_device *device;
2182 struct btrfs_chunk *chunk;
2183 struct btrfs_stripe *stripe;
2184 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2188 chunk = kzalloc(item_size, GFP_NOFS);
2193 while (index < map->num_stripes) {
2194 device = map->stripes[index].dev;
2195 device->bytes_used += stripe_size;
2196 ret = btrfs_update_device(trans, device);
2202 stripe = &chunk->stripe;
2203 while (index < map->num_stripes) {
2204 device = map->stripes[index].dev;
2205 dev_offset = map->stripes[index].physical;
2207 btrfs_set_stack_stripe_devid(stripe, device->devid);
2208 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2209 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2214 btrfs_set_stack_chunk_length(chunk, chunk_size);
2215 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2216 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2217 btrfs_set_stack_chunk_type(chunk, map->type);
2218 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2219 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2220 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2221 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2222 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2224 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2225 key.type = BTRFS_CHUNK_ITEM_KEY;
2226 key.offset = chunk_offset;
2228 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2231 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2232 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2241 * Chunk allocation falls into two parts. The first part does works
2242 * that make the new allocated chunk useable, but not do any operation
2243 * that modifies the chunk tree. The second part does the works that
2244 * require modifying the chunk tree. This division is important for the
2245 * bootstrap process of adding storage to a seed btrfs.
2247 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2248 struct btrfs_root *extent_root, u64 type)
2253 struct map_lookup *map;
2254 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2257 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2262 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2263 &stripe_size, chunk_offset, type);
2267 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2268 chunk_size, stripe_size);
2273 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2274 struct btrfs_root *root,
2275 struct btrfs_device *device)
2278 u64 sys_chunk_offset;
2282 u64 sys_stripe_size;
2284 struct map_lookup *map;
2285 struct map_lookup *sys_map;
2286 struct btrfs_fs_info *fs_info = root->fs_info;
2287 struct btrfs_root *extent_root = fs_info->extent_root;
2290 ret = find_next_chunk(fs_info->chunk_root,
2291 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2294 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2295 (fs_info->metadata_alloc_profile &
2296 fs_info->avail_metadata_alloc_bits);
2297 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2299 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2300 &stripe_size, chunk_offset, alloc_profile);
2303 sys_chunk_offset = chunk_offset + chunk_size;
2305 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2306 (fs_info->system_alloc_profile &
2307 fs_info->avail_system_alloc_bits);
2308 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2310 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2311 &sys_chunk_size, &sys_stripe_size,
2312 sys_chunk_offset, alloc_profile);
2315 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2319 * Modifying chunk tree needs allocating new blocks from both
2320 * system block group and metadata block group. So we only can
2321 * do operations require modifying the chunk tree after both
2322 * block groups were created.
2324 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2325 chunk_size, stripe_size);
2328 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2329 sys_chunk_offset, sys_chunk_size,
2335 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2337 struct extent_map *em;
2338 struct map_lookup *map;
2339 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2343 spin_lock(&map_tree->map_tree.lock);
2344 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2345 spin_unlock(&map_tree->map_tree.lock);
2349 map = (struct map_lookup *)em->bdev;
2350 for (i = 0; i < map->num_stripes; i++) {
2351 if (!map->stripes[i].dev->writeable) {
2356 free_extent_map(em);
2360 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2362 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
2365 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2367 struct extent_map *em;
2370 spin_lock(&tree->map_tree.lock);
2371 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2373 remove_extent_mapping(&tree->map_tree, em);
2374 spin_unlock(&tree->map_tree.lock);
2379 free_extent_map(em);
2380 /* once for the tree */
2381 free_extent_map(em);
2385 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2387 struct extent_map *em;
2388 struct map_lookup *map;
2389 struct extent_map_tree *em_tree = &map_tree->map_tree;
2392 spin_lock(&em_tree->lock);
2393 em = lookup_extent_mapping(em_tree, logical, len);
2394 spin_unlock(&em_tree->lock);
2397 BUG_ON(em->start > logical || em->start + em->len < logical);
2398 map = (struct map_lookup *)em->bdev;
2399 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2400 ret = map->num_stripes;
2401 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2402 ret = map->sub_stripes;
2405 free_extent_map(em);
2409 static int find_live_mirror(struct map_lookup *map, int first, int num,
2413 if (map->stripes[optimal].dev->bdev)
2415 for (i = first; i < first + num; i++) {
2416 if (map->stripes[i].dev->bdev)
2419 /* we couldn't find one that doesn't fail. Just return something
2420 * and the io error handling code will clean up eventually
2425 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2426 u64 logical, u64 *length,
2427 struct btrfs_multi_bio **multi_ret,
2428 int mirror_num, struct page *unplug_page)
2430 struct extent_map *em;
2431 struct map_lookup *map;
2432 struct extent_map_tree *em_tree = &map_tree->map_tree;
2436 int stripes_allocated = 8;
2437 int stripes_required = 1;
2442 struct btrfs_multi_bio *multi = NULL;
2444 if (multi_ret && !(rw & (1 << BIO_RW)))
2445 stripes_allocated = 1;
2448 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2453 atomic_set(&multi->error, 0);
2456 spin_lock(&em_tree->lock);
2457 em = lookup_extent_mapping(em_tree, logical, *length);
2458 spin_unlock(&em_tree->lock);
2460 if (!em && unplug_page)
2464 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2465 (unsigned long long)logical,
2466 (unsigned long long)*length);
2470 BUG_ON(em->start > logical || em->start + em->len < logical);
2471 map = (struct map_lookup *)em->bdev;
2472 offset = logical - em->start;
2474 if (mirror_num > map->num_stripes)
2477 /* if our multi bio struct is too small, back off and try again */
2478 if (rw & (1 << BIO_RW)) {
2479 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2480 BTRFS_BLOCK_GROUP_DUP)) {
2481 stripes_required = map->num_stripes;
2483 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2484 stripes_required = map->sub_stripes;
2488 if (multi_ret && rw == WRITE &&
2489 stripes_allocated < stripes_required) {
2490 stripes_allocated = map->num_stripes;
2491 free_extent_map(em);
2497 * stripe_nr counts the total number of stripes we have to stride
2498 * to get to this block
2500 do_div(stripe_nr, map->stripe_len);
2502 stripe_offset = stripe_nr * map->stripe_len;
2503 BUG_ON(offset < stripe_offset);
2505 /* stripe_offset is the offset of this block in its stripe*/
2506 stripe_offset = offset - stripe_offset;
2508 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2509 BTRFS_BLOCK_GROUP_RAID10 |
2510 BTRFS_BLOCK_GROUP_DUP)) {
2511 /* we limit the length of each bio to what fits in a stripe */
2512 *length = min_t(u64, em->len - offset,
2513 map->stripe_len - stripe_offset);
2515 *length = em->len - offset;
2518 if (!multi_ret && !unplug_page)
2523 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2524 if (unplug_page || (rw & (1 << BIO_RW)))
2525 num_stripes = map->num_stripes;
2526 else if (mirror_num)
2527 stripe_index = mirror_num - 1;
2529 stripe_index = find_live_mirror(map, 0,
2531 current->pid % map->num_stripes);
2534 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2535 if (rw & (1 << BIO_RW))
2536 num_stripes = map->num_stripes;
2537 else if (mirror_num)
2538 stripe_index = mirror_num - 1;
2540 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2541 int factor = map->num_stripes / map->sub_stripes;
2543 stripe_index = do_div(stripe_nr, factor);
2544 stripe_index *= map->sub_stripes;
2546 if (unplug_page || (rw & (1 << BIO_RW)))
2547 num_stripes = map->sub_stripes;
2548 else if (mirror_num)
2549 stripe_index += mirror_num - 1;
2551 stripe_index = find_live_mirror(map, stripe_index,
2552 map->sub_stripes, stripe_index +
2553 current->pid % map->sub_stripes);
2557 * after this do_div call, stripe_nr is the number of stripes
2558 * on this device we have to walk to find the data, and
2559 * stripe_index is the number of our device in the stripe array
2561 stripe_index = do_div(stripe_nr, map->num_stripes);
2563 BUG_ON(stripe_index >= map->num_stripes);
2565 for (i = 0; i < num_stripes; i++) {
2567 struct btrfs_device *device;
2568 struct backing_dev_info *bdi;
2570 device = map->stripes[stripe_index].dev;
2572 bdi = blk_get_backing_dev_info(device->bdev);
2573 if (bdi->unplug_io_fn)
2574 bdi->unplug_io_fn(bdi, unplug_page);
2577 multi->stripes[i].physical =
2578 map->stripes[stripe_index].physical +
2579 stripe_offset + stripe_nr * map->stripe_len;
2580 multi->stripes[i].dev = map->stripes[stripe_index].dev;
2586 multi->num_stripes = num_stripes;
2587 multi->max_errors = max_errors;
2590 free_extent_map(em);
2594 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2595 u64 logical, u64 *length,
2596 struct btrfs_multi_bio **multi_ret, int mirror_num)
2598 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2602 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2603 u64 chunk_start, u64 physical, u64 devid,
2604 u64 **logical, int *naddrs, int *stripe_len)
2606 struct extent_map_tree *em_tree = &map_tree->map_tree;
2607 struct extent_map *em;
2608 struct map_lookup *map;
2615 spin_lock(&em_tree->lock);
2616 em = lookup_extent_mapping(em_tree, chunk_start, 1);
2617 spin_unlock(&em_tree->lock);
2619 BUG_ON(!em || em->start != chunk_start);
2620 map = (struct map_lookup *)em->bdev;
2623 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2624 do_div(length, map->num_stripes / map->sub_stripes);
2625 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
2626 do_div(length, map->num_stripes);
2628 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
2631 for (i = 0; i < map->num_stripes; i++) {
2632 if (devid && map->stripes[i].dev->devid != devid)
2634 if (map->stripes[i].physical > physical ||
2635 map->stripes[i].physical + length <= physical)
2638 stripe_nr = physical - map->stripes[i].physical;
2639 do_div(stripe_nr, map->stripe_len);
2641 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2642 stripe_nr = stripe_nr * map->num_stripes + i;
2643 do_div(stripe_nr, map->sub_stripes);
2644 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2645 stripe_nr = stripe_nr * map->num_stripes + i;
2647 bytenr = chunk_start + stripe_nr * map->stripe_len;
2648 WARN_ON(nr >= map->num_stripes);
2649 for (j = 0; j < nr; j++) {
2650 if (buf[j] == bytenr)
2654 WARN_ON(nr >= map->num_stripes);
2659 for (i = 0; i > nr; i++) {
2660 struct btrfs_multi_bio *multi;
2661 struct btrfs_bio_stripe *stripe;
2665 ret = btrfs_map_block(map_tree, WRITE, buf[i],
2666 &length, &multi, 0);
2669 stripe = multi->stripes;
2670 for (j = 0; j < multi->num_stripes; j++) {
2671 if (stripe->physical >= physical &&
2672 physical < stripe->physical + length)
2675 BUG_ON(j >= multi->num_stripes);
2681 *stripe_len = map->stripe_len;
2683 free_extent_map(em);
2687 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2688 u64 logical, struct page *page)
2690 u64 length = PAGE_CACHE_SIZE;
2691 return __btrfs_map_block(map_tree, READ, logical, &length,
2695 static void end_bio_multi_stripe(struct bio *bio, int err)
2697 struct btrfs_multi_bio *multi = bio->bi_private;
2698 int is_orig_bio = 0;
2701 atomic_inc(&multi->error);
2703 if (bio == multi->orig_bio)
2706 if (atomic_dec_and_test(&multi->stripes_pending)) {
2709 bio = multi->orig_bio;
2711 bio->bi_private = multi->private;
2712 bio->bi_end_io = multi->end_io;
2713 /* only send an error to the higher layers if it is
2714 * beyond the tolerance of the multi-bio
2716 if (atomic_read(&multi->error) > multi->max_errors) {
2720 * this bio is actually up to date, we didn't
2721 * go over the max number of errors
2723 set_bit(BIO_UPTODATE, &bio->bi_flags);
2728 bio_endio(bio, err);
2729 } else if (!is_orig_bio) {
2734 struct async_sched {
2737 struct btrfs_fs_info *info;
2738 struct btrfs_work work;
2742 * see run_scheduled_bios for a description of why bios are collected for
2745 * This will add one bio to the pending list for a device and make sure
2746 * the work struct is scheduled.
2748 static noinline int schedule_bio(struct btrfs_root *root,
2749 struct btrfs_device *device,
2750 int rw, struct bio *bio)
2752 int should_queue = 1;
2754 /* don't bother with additional async steps for reads, right now */
2755 if (!(rw & (1 << BIO_RW))) {
2757 submit_bio(rw, bio);
2763 * nr_async_bios allows us to reliably return congestion to the
2764 * higher layers. Otherwise, the async bio makes it appear we have
2765 * made progress against dirty pages when we've really just put it
2766 * on a queue for later
2768 atomic_inc(&root->fs_info->nr_async_bios);
2769 WARN_ON(bio->bi_next);
2770 bio->bi_next = NULL;
2773 spin_lock(&device->io_lock);
2775 if (device->pending_bio_tail)
2776 device->pending_bio_tail->bi_next = bio;
2778 device->pending_bio_tail = bio;
2779 if (!device->pending_bios)
2780 device->pending_bios = bio;
2781 if (device->running_pending)
2784 spin_unlock(&device->io_lock);
2787 btrfs_queue_worker(&root->fs_info->submit_workers,
2792 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2793 int mirror_num, int async_submit)
2795 struct btrfs_mapping_tree *map_tree;
2796 struct btrfs_device *dev;
2797 struct bio *first_bio = bio;
2798 u64 logical = (u64)bio->bi_sector << 9;
2801 struct btrfs_multi_bio *multi = NULL;
2806 length = bio->bi_size;
2807 map_tree = &root->fs_info->mapping_tree;
2808 map_length = length;
2810 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2814 total_devs = multi->num_stripes;
2815 if (map_length < length) {
2816 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
2817 "len %llu\n", (unsigned long long)logical,
2818 (unsigned long long)length,
2819 (unsigned long long)map_length);
2822 multi->end_io = first_bio->bi_end_io;
2823 multi->private = first_bio->bi_private;
2824 multi->orig_bio = first_bio;
2825 atomic_set(&multi->stripes_pending, multi->num_stripes);
2827 while (dev_nr < total_devs) {
2828 if (total_devs > 1) {
2829 if (dev_nr < total_devs - 1) {
2830 bio = bio_clone(first_bio, GFP_NOFS);
2835 bio->bi_private = multi;
2836 bio->bi_end_io = end_bio_multi_stripe;
2838 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2839 dev = multi->stripes[dev_nr].dev;
2840 BUG_ON(rw == WRITE && !dev->writeable);
2841 if (dev && dev->bdev) {
2842 bio->bi_bdev = dev->bdev;
2844 schedule_bio(root, dev, rw, bio);
2846 submit_bio(rw, bio);
2848 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2849 bio->bi_sector = logical >> 9;
2850 bio_endio(bio, -EIO);
2854 if (total_devs == 1)
2859 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2862 struct btrfs_device *device;
2863 struct btrfs_fs_devices *cur_devices;
2865 cur_devices = root->fs_info->fs_devices;
2866 while (cur_devices) {
2868 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
2869 device = __find_device(&cur_devices->devices,
2874 cur_devices = cur_devices->seed;
2879 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2880 u64 devid, u8 *dev_uuid)
2882 struct btrfs_device *device;
2883 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2885 device = kzalloc(sizeof(*device), GFP_NOFS);
2888 list_add(&device->dev_list,
2889 &fs_devices->devices);
2890 device->barriers = 1;
2891 device->dev_root = root->fs_info->dev_root;
2892 device->devid = devid;
2893 device->work.func = pending_bios_fn;
2894 device->fs_devices = fs_devices;
2895 fs_devices->num_devices++;
2896 spin_lock_init(&device->io_lock);
2897 INIT_LIST_HEAD(&device->dev_alloc_list);
2898 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2902 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2903 struct extent_buffer *leaf,
2904 struct btrfs_chunk *chunk)
2906 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2907 struct map_lookup *map;
2908 struct extent_map *em;
2912 u8 uuid[BTRFS_UUID_SIZE];
2917 logical = key->offset;
2918 length = btrfs_chunk_length(leaf, chunk);
2920 spin_lock(&map_tree->map_tree.lock);
2921 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2922 spin_unlock(&map_tree->map_tree.lock);
2924 /* already mapped? */
2925 if (em && em->start <= logical && em->start + em->len > logical) {
2926 free_extent_map(em);
2929 free_extent_map(em);
2932 em = alloc_extent_map(GFP_NOFS);
2935 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2936 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2938 free_extent_map(em);
2942 em->bdev = (struct block_device *)map;
2943 em->start = logical;
2945 em->block_start = 0;
2946 em->block_len = em->len;
2948 map->num_stripes = num_stripes;
2949 map->io_width = btrfs_chunk_io_width(leaf, chunk);
2950 map->io_align = btrfs_chunk_io_align(leaf, chunk);
2951 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2952 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2953 map->type = btrfs_chunk_type(leaf, chunk);
2954 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2955 for (i = 0; i < num_stripes; i++) {
2956 map->stripes[i].physical =
2957 btrfs_stripe_offset_nr(leaf, chunk, i);
2958 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2959 read_extent_buffer(leaf, uuid, (unsigned long)
2960 btrfs_stripe_dev_uuid_nr(chunk, i),
2962 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
2964 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2966 free_extent_map(em);
2969 if (!map->stripes[i].dev) {
2970 map->stripes[i].dev =
2971 add_missing_dev(root, devid, uuid);
2972 if (!map->stripes[i].dev) {
2974 free_extent_map(em);
2978 map->stripes[i].dev->in_fs_metadata = 1;
2981 spin_lock(&map_tree->map_tree.lock);
2982 ret = add_extent_mapping(&map_tree->map_tree, em);
2983 spin_unlock(&map_tree->map_tree.lock);
2985 free_extent_map(em);
2990 static int fill_device_from_item(struct extent_buffer *leaf,
2991 struct btrfs_dev_item *dev_item,
2992 struct btrfs_device *device)
2996 device->devid = btrfs_device_id(leaf, dev_item);
2997 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2998 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2999 device->type = btrfs_device_type(leaf, dev_item);
3000 device->io_align = btrfs_device_io_align(leaf, dev_item);
3001 device->io_width = btrfs_device_io_width(leaf, dev_item);
3002 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
3004 ptr = (unsigned long)btrfs_device_uuid(dev_item);
3005 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
3010 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
3012 struct btrfs_fs_devices *fs_devices;
3015 mutex_lock(&uuid_mutex);
3017 fs_devices = root->fs_info->fs_devices->seed;
3018 while (fs_devices) {
3019 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
3023 fs_devices = fs_devices->seed;
3026 fs_devices = find_fsid(fsid);
3032 fs_devices = clone_fs_devices(fs_devices);
3033 if (IS_ERR(fs_devices)) {
3034 ret = PTR_ERR(fs_devices);
3038 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3039 root->fs_info->bdev_holder);
3043 if (!fs_devices->seeding) {
3044 __btrfs_close_devices(fs_devices);
3045 free_fs_devices(fs_devices);
3050 fs_devices->seed = root->fs_info->fs_devices->seed;
3051 root->fs_info->fs_devices->seed = fs_devices;
3053 mutex_unlock(&uuid_mutex);
3057 static int read_one_dev(struct btrfs_root *root,
3058 struct extent_buffer *leaf,
3059 struct btrfs_dev_item *dev_item)
3061 struct btrfs_device *device;
3064 u8 fs_uuid[BTRFS_UUID_SIZE];
3065 u8 dev_uuid[BTRFS_UUID_SIZE];
3067 devid = btrfs_device_id(leaf, dev_item);
3068 read_extent_buffer(leaf, dev_uuid,
3069 (unsigned long)btrfs_device_uuid(dev_item),
3071 read_extent_buffer(leaf, fs_uuid,
3072 (unsigned long)btrfs_device_fsid(dev_item),
3075 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3076 ret = open_seed_devices(root, fs_uuid);
3077 if (ret && !btrfs_test_opt(root, DEGRADED))
3081 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3082 if (!device || !device->bdev) {
3083 if (!btrfs_test_opt(root, DEGRADED))
3087 printk(KERN_WARNING "warning devid %llu missing\n",
3088 (unsigned long long)devid);
3089 device = add_missing_dev(root, devid, dev_uuid);
3095 if (device->fs_devices != root->fs_info->fs_devices) {
3096 BUG_ON(device->writeable);
3097 if (device->generation !=
3098 btrfs_device_generation(leaf, dev_item))
3102 fill_device_from_item(leaf, dev_item, device);
3103 device->dev_root = root->fs_info->dev_root;
3104 device->in_fs_metadata = 1;
3105 if (device->writeable)
3106 device->fs_devices->total_rw_bytes += device->total_bytes;
3111 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3113 struct btrfs_dev_item *dev_item;
3115 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3117 return read_one_dev(root, buf, dev_item);
3120 int btrfs_read_sys_array(struct btrfs_root *root)
3122 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3123 struct extent_buffer *sb;
3124 struct btrfs_disk_key *disk_key;
3125 struct btrfs_chunk *chunk;
3127 unsigned long sb_ptr;
3133 struct btrfs_key key;
3135 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3136 BTRFS_SUPER_INFO_SIZE);
3139 btrfs_set_buffer_uptodate(sb);
3140 btrfs_set_buffer_lockdep_class(sb, 0);
3142 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3143 array_size = btrfs_super_sys_array_size(super_copy);
3145 ptr = super_copy->sys_chunk_array;
3146 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3149 while (cur < array_size) {
3150 disk_key = (struct btrfs_disk_key *)ptr;
3151 btrfs_disk_key_to_cpu(&key, disk_key);
3153 len = sizeof(*disk_key); ptr += len;
3157 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3158 chunk = (struct btrfs_chunk *)sb_ptr;
3159 ret = read_one_chunk(root, &key, sb, chunk);
3162 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3163 len = btrfs_chunk_item_size(num_stripes);
3172 free_extent_buffer(sb);
3176 int btrfs_read_chunk_tree(struct btrfs_root *root)
3178 struct btrfs_path *path;
3179 struct extent_buffer *leaf;
3180 struct btrfs_key key;
3181 struct btrfs_key found_key;
3185 root = root->fs_info->chunk_root;
3187 path = btrfs_alloc_path();
3191 /* first we search for all of the device items, and then we
3192 * read in all of the chunk items. This way we can create chunk
3193 * mappings that reference all of the devices that are afound
3195 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3199 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3201 leaf = path->nodes[0];
3202 slot = path->slots[0];
3203 if (slot >= btrfs_header_nritems(leaf)) {
3204 ret = btrfs_next_leaf(root, path);
3211 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3212 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3213 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3215 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3216 struct btrfs_dev_item *dev_item;
3217 dev_item = btrfs_item_ptr(leaf, slot,
3218 struct btrfs_dev_item);
3219 ret = read_one_dev(root, leaf, dev_item);
3223 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3224 struct btrfs_chunk *chunk;
3225 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3226 ret = read_one_chunk(root, &found_key, leaf, chunk);
3232 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3234 btrfs_release_path(root, path);
3239 btrfs_free_path(path);