2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <asm/div64.h>
32 #include "extent_map.h"
34 #include "transaction.h"
35 #include "print-tree.h"
38 #include "async-thread.h"
39 #include "check-integrity.h"
40 #include "rcu-string.h"
42 #include "dev-replace.h"
44 static int init_first_rw_device(struct btrfs_trans_handle *trans,
45 struct btrfs_root *root,
46 struct btrfs_device *device);
47 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
48 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
49 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
50 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
52 static DEFINE_MUTEX(uuid_mutex);
53 static LIST_HEAD(fs_uuids);
55 static void lock_chunks(struct btrfs_root *root)
57 mutex_lock(&root->fs_info->chunk_mutex);
60 static void unlock_chunks(struct btrfs_root *root)
62 mutex_unlock(&root->fs_info->chunk_mutex);
65 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
67 struct btrfs_device *device;
68 WARN_ON(fs_devices->opened);
69 while (!list_empty(&fs_devices->devices)) {
70 device = list_entry(fs_devices->devices.next,
71 struct btrfs_device, dev_list);
72 list_del(&device->dev_list);
73 rcu_string_free(device->name);
79 static void btrfs_kobject_uevent(struct block_device *bdev,
80 enum kobject_action action)
84 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
86 pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
88 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
89 &disk_to_dev(bdev->bd_disk)->kobj);
92 void btrfs_cleanup_fs_uuids(void)
94 struct btrfs_fs_devices *fs_devices;
96 while (!list_empty(&fs_uuids)) {
97 fs_devices = list_entry(fs_uuids.next,
98 struct btrfs_fs_devices, list);
99 list_del(&fs_devices->list);
100 free_fs_devices(fs_devices);
104 static noinline struct btrfs_device *__find_device(struct list_head *head,
107 struct btrfs_device *dev;
109 list_for_each_entry(dev, head, dev_list) {
110 if (dev->devid == devid &&
111 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
118 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
120 struct btrfs_fs_devices *fs_devices;
122 list_for_each_entry(fs_devices, &fs_uuids, list) {
123 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
130 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
131 int flush, struct block_device **bdev,
132 struct buffer_head **bh)
136 *bdev = blkdev_get_by_path(device_path, flags, holder);
139 ret = PTR_ERR(*bdev);
140 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
145 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
146 ret = set_blocksize(*bdev, 4096);
148 blkdev_put(*bdev, flags);
151 invalidate_bdev(*bdev);
152 *bh = btrfs_read_dev_super(*bdev);
155 blkdev_put(*bdev, flags);
167 static void requeue_list(struct btrfs_pending_bios *pending_bios,
168 struct bio *head, struct bio *tail)
171 struct bio *old_head;
173 old_head = pending_bios->head;
174 pending_bios->head = head;
175 if (pending_bios->tail)
176 tail->bi_next = old_head;
178 pending_bios->tail = tail;
182 * we try to collect pending bios for a device so we don't get a large
183 * number of procs sending bios down to the same device. This greatly
184 * improves the schedulers ability to collect and merge the bios.
186 * But, it also turns into a long list of bios to process and that is sure
187 * to eventually make the worker thread block. The solution here is to
188 * make some progress and then put this work struct back at the end of
189 * the list if the block device is congested. This way, multiple devices
190 * can make progress from a single worker thread.
192 static noinline void run_scheduled_bios(struct btrfs_device *device)
195 struct backing_dev_info *bdi;
196 struct btrfs_fs_info *fs_info;
197 struct btrfs_pending_bios *pending_bios;
201 unsigned long num_run;
202 unsigned long batch_run = 0;
204 unsigned long last_waited = 0;
206 int sync_pending = 0;
207 struct blk_plug plug;
210 * this function runs all the bios we've collected for
211 * a particular device. We don't want to wander off to
212 * another device without first sending all of these down.
213 * So, setup a plug here and finish it off before we return
215 blk_start_plug(&plug);
217 bdi = blk_get_backing_dev_info(device->bdev);
218 fs_info = device->dev_root->fs_info;
219 limit = btrfs_async_submit_limit(fs_info);
220 limit = limit * 2 / 3;
223 spin_lock(&device->io_lock);
228 /* take all the bios off the list at once and process them
229 * later on (without the lock held). But, remember the
230 * tail and other pointers so the bios can be properly reinserted
231 * into the list if we hit congestion
233 if (!force_reg && device->pending_sync_bios.head) {
234 pending_bios = &device->pending_sync_bios;
237 pending_bios = &device->pending_bios;
241 pending = pending_bios->head;
242 tail = pending_bios->tail;
243 WARN_ON(pending && !tail);
246 * if pending was null this time around, no bios need processing
247 * at all and we can stop. Otherwise it'll loop back up again
248 * and do an additional check so no bios are missed.
250 * device->running_pending is used to synchronize with the
253 if (device->pending_sync_bios.head == NULL &&
254 device->pending_bios.head == NULL) {
256 device->running_pending = 0;
259 device->running_pending = 1;
262 pending_bios->head = NULL;
263 pending_bios->tail = NULL;
265 spin_unlock(&device->io_lock);
270 /* we want to work on both lists, but do more bios on the
271 * sync list than the regular list
274 pending_bios != &device->pending_sync_bios &&
275 device->pending_sync_bios.head) ||
276 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
277 device->pending_bios.head)) {
278 spin_lock(&device->io_lock);
279 requeue_list(pending_bios, pending, tail);
284 pending = pending->bi_next;
287 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
288 waitqueue_active(&fs_info->async_submit_wait))
289 wake_up(&fs_info->async_submit_wait);
291 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
294 * if we're doing the sync list, record that our
295 * plug has some sync requests on it
297 * If we're doing the regular list and there are
298 * sync requests sitting around, unplug before
301 if (pending_bios == &device->pending_sync_bios) {
303 } else if (sync_pending) {
304 blk_finish_plug(&plug);
305 blk_start_plug(&plug);
309 btrfsic_submit_bio(cur->bi_rw, cur);
316 * we made progress, there is more work to do and the bdi
317 * is now congested. Back off and let other work structs
320 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
321 fs_info->fs_devices->open_devices > 1) {
322 struct io_context *ioc;
324 ioc = current->io_context;
327 * the main goal here is that we don't want to
328 * block if we're going to be able to submit
329 * more requests without blocking.
331 * This code does two great things, it pokes into
332 * the elevator code from a filesystem _and_
333 * it makes assumptions about how batching works.
335 if (ioc && ioc->nr_batch_requests > 0 &&
336 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
338 ioc->last_waited == last_waited)) {
340 * we want to go through our batch of
341 * requests and stop. So, we copy out
342 * the ioc->last_waited time and test
343 * against it before looping
345 last_waited = ioc->last_waited;
350 spin_lock(&device->io_lock);
351 requeue_list(pending_bios, pending, tail);
352 device->running_pending = 1;
354 spin_unlock(&device->io_lock);
355 btrfs_requeue_work(&device->work);
358 /* unplug every 64 requests just for good measure */
359 if (batch_run % 64 == 0) {
360 blk_finish_plug(&plug);
361 blk_start_plug(&plug);
370 spin_lock(&device->io_lock);
371 if (device->pending_bios.head || device->pending_sync_bios.head)
373 spin_unlock(&device->io_lock);
376 blk_finish_plug(&plug);
379 static void pending_bios_fn(struct btrfs_work *work)
381 struct btrfs_device *device;
383 device = container_of(work, struct btrfs_device, work);
384 run_scheduled_bios(device);
387 static noinline int device_list_add(const char *path,
388 struct btrfs_super_block *disk_super,
389 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
391 struct btrfs_device *device;
392 struct btrfs_fs_devices *fs_devices;
393 struct rcu_string *name;
394 u64 found_transid = btrfs_super_generation(disk_super);
396 fs_devices = find_fsid(disk_super->fsid);
398 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
401 INIT_LIST_HEAD(&fs_devices->devices);
402 INIT_LIST_HEAD(&fs_devices->alloc_list);
403 list_add(&fs_devices->list, &fs_uuids);
404 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
405 fs_devices->latest_devid = devid;
406 fs_devices->latest_trans = found_transid;
407 mutex_init(&fs_devices->device_list_mutex);
410 device = __find_device(&fs_devices->devices, devid,
411 disk_super->dev_item.uuid);
414 if (fs_devices->opened)
417 device = kzalloc(sizeof(*device), GFP_NOFS);
419 /* we can safely leave the fs_devices entry around */
422 device->devid = devid;
423 device->dev_stats_valid = 0;
424 device->work.func = pending_bios_fn;
425 memcpy(device->uuid, disk_super->dev_item.uuid,
427 spin_lock_init(&device->io_lock);
429 name = rcu_string_strdup(path, GFP_NOFS);
434 rcu_assign_pointer(device->name, name);
435 INIT_LIST_HEAD(&device->dev_alloc_list);
437 /* init readahead state */
438 spin_lock_init(&device->reada_lock);
439 device->reada_curr_zone = NULL;
440 atomic_set(&device->reada_in_flight, 0);
441 device->reada_next = 0;
442 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
443 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
445 mutex_lock(&fs_devices->device_list_mutex);
446 list_add_rcu(&device->dev_list, &fs_devices->devices);
447 mutex_unlock(&fs_devices->device_list_mutex);
449 device->fs_devices = fs_devices;
450 fs_devices->num_devices++;
451 } else if (!device->name || strcmp(device->name->str, path)) {
452 name = rcu_string_strdup(path, GFP_NOFS);
455 rcu_string_free(device->name);
456 rcu_assign_pointer(device->name, name);
457 if (device->missing) {
458 fs_devices->missing_devices--;
463 if (found_transid > fs_devices->latest_trans) {
464 fs_devices->latest_devid = devid;
465 fs_devices->latest_trans = found_transid;
467 *fs_devices_ret = fs_devices;
471 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
473 struct btrfs_fs_devices *fs_devices;
474 struct btrfs_device *device;
475 struct btrfs_device *orig_dev;
477 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
479 return ERR_PTR(-ENOMEM);
481 INIT_LIST_HEAD(&fs_devices->devices);
482 INIT_LIST_HEAD(&fs_devices->alloc_list);
483 INIT_LIST_HEAD(&fs_devices->list);
484 mutex_init(&fs_devices->device_list_mutex);
485 fs_devices->latest_devid = orig->latest_devid;
486 fs_devices->latest_trans = orig->latest_trans;
487 fs_devices->total_devices = orig->total_devices;
488 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
490 /* We have held the volume lock, it is safe to get the devices. */
491 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
492 struct rcu_string *name;
494 device = kzalloc(sizeof(*device), GFP_NOFS);
499 * This is ok to do without rcu read locked because we hold the
500 * uuid mutex so nothing we touch in here is going to disappear.
502 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
507 rcu_assign_pointer(device->name, name);
509 device->devid = orig_dev->devid;
510 device->work.func = pending_bios_fn;
511 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
512 spin_lock_init(&device->io_lock);
513 INIT_LIST_HEAD(&device->dev_list);
514 INIT_LIST_HEAD(&device->dev_alloc_list);
516 list_add(&device->dev_list, &fs_devices->devices);
517 device->fs_devices = fs_devices;
518 fs_devices->num_devices++;
522 free_fs_devices(fs_devices);
523 return ERR_PTR(-ENOMEM);
526 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
527 struct btrfs_fs_devices *fs_devices, int step)
529 struct btrfs_device *device, *next;
531 struct block_device *latest_bdev = NULL;
532 u64 latest_devid = 0;
533 u64 latest_transid = 0;
535 mutex_lock(&uuid_mutex);
537 /* This is the initialized path, it is safe to release the devices. */
538 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
539 if (device->in_fs_metadata) {
540 if (!device->is_tgtdev_for_dev_replace &&
542 device->generation > latest_transid)) {
543 latest_devid = device->devid;
544 latest_transid = device->generation;
545 latest_bdev = device->bdev;
550 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
552 * In the first step, keep the device which has
553 * the correct fsid and the devid that is used
554 * for the dev_replace procedure.
555 * In the second step, the dev_replace state is
556 * read from the device tree and it is known
557 * whether the procedure is really active or
558 * not, which means whether this device is
559 * used or whether it should be removed.
561 if (step == 0 || device->is_tgtdev_for_dev_replace) {
566 blkdev_put(device->bdev, device->mode);
568 fs_devices->open_devices--;
570 if (device->writeable) {
571 list_del_init(&device->dev_alloc_list);
572 device->writeable = 0;
573 if (!device->is_tgtdev_for_dev_replace)
574 fs_devices->rw_devices--;
576 list_del_init(&device->dev_list);
577 fs_devices->num_devices--;
578 rcu_string_free(device->name);
582 if (fs_devices->seed) {
583 fs_devices = fs_devices->seed;
587 fs_devices->latest_bdev = latest_bdev;
588 fs_devices->latest_devid = latest_devid;
589 fs_devices->latest_trans = latest_transid;
591 mutex_unlock(&uuid_mutex);
594 static void __free_device(struct work_struct *work)
596 struct btrfs_device *device;
598 device = container_of(work, struct btrfs_device, rcu_work);
601 blkdev_put(device->bdev, device->mode);
603 rcu_string_free(device->name);
607 static void free_device(struct rcu_head *head)
609 struct btrfs_device *device;
611 device = container_of(head, struct btrfs_device, rcu);
613 INIT_WORK(&device->rcu_work, __free_device);
614 schedule_work(&device->rcu_work);
617 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
619 struct btrfs_device *device;
621 if (--fs_devices->opened > 0)
624 mutex_lock(&fs_devices->device_list_mutex);
625 list_for_each_entry(device, &fs_devices->devices, dev_list) {
626 struct btrfs_device *new_device;
627 struct rcu_string *name;
630 fs_devices->open_devices--;
632 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
633 list_del_init(&device->dev_alloc_list);
634 fs_devices->rw_devices--;
637 if (device->can_discard)
638 fs_devices->num_can_discard--;
640 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
641 BUG_ON(!new_device); /* -ENOMEM */
642 memcpy(new_device, device, sizeof(*new_device));
644 /* Safe because we are under uuid_mutex */
646 name = rcu_string_strdup(device->name->str, GFP_NOFS);
647 BUG_ON(device->name && !name); /* -ENOMEM */
648 rcu_assign_pointer(new_device->name, name);
650 new_device->bdev = NULL;
651 new_device->writeable = 0;
652 new_device->in_fs_metadata = 0;
653 new_device->can_discard = 0;
654 spin_lock_init(&new_device->io_lock);
655 list_replace_rcu(&device->dev_list, &new_device->dev_list);
657 call_rcu(&device->rcu, free_device);
659 mutex_unlock(&fs_devices->device_list_mutex);
661 WARN_ON(fs_devices->open_devices);
662 WARN_ON(fs_devices->rw_devices);
663 fs_devices->opened = 0;
664 fs_devices->seeding = 0;
669 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
671 struct btrfs_fs_devices *seed_devices = NULL;
674 mutex_lock(&uuid_mutex);
675 ret = __btrfs_close_devices(fs_devices);
676 if (!fs_devices->opened) {
677 seed_devices = fs_devices->seed;
678 fs_devices->seed = NULL;
680 mutex_unlock(&uuid_mutex);
682 while (seed_devices) {
683 fs_devices = seed_devices;
684 seed_devices = fs_devices->seed;
685 __btrfs_close_devices(fs_devices);
686 free_fs_devices(fs_devices);
689 * Wait for rcu kworkers under __btrfs_close_devices
690 * to finish all blkdev_puts so device is really
691 * free when umount is done.
697 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
698 fmode_t flags, void *holder)
700 struct request_queue *q;
701 struct block_device *bdev;
702 struct list_head *head = &fs_devices->devices;
703 struct btrfs_device *device;
704 struct block_device *latest_bdev = NULL;
705 struct buffer_head *bh;
706 struct btrfs_super_block *disk_super;
707 u64 latest_devid = 0;
708 u64 latest_transid = 0;
715 list_for_each_entry(device, head, dev_list) {
721 /* Just open everything we can; ignore failures here */
722 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
726 disk_super = (struct btrfs_super_block *)bh->b_data;
727 devid = btrfs_stack_device_id(&disk_super->dev_item);
728 if (devid != device->devid)
731 if (memcmp(device->uuid, disk_super->dev_item.uuid,
735 device->generation = btrfs_super_generation(disk_super);
736 if (!latest_transid || device->generation > latest_transid) {
737 latest_devid = devid;
738 latest_transid = device->generation;
742 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
743 device->writeable = 0;
745 device->writeable = !bdev_read_only(bdev);
749 q = bdev_get_queue(bdev);
750 if (blk_queue_discard(q)) {
751 device->can_discard = 1;
752 fs_devices->num_can_discard++;
756 device->in_fs_metadata = 0;
757 device->mode = flags;
759 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
760 fs_devices->rotating = 1;
762 fs_devices->open_devices++;
763 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
764 fs_devices->rw_devices++;
765 list_add(&device->dev_alloc_list,
766 &fs_devices->alloc_list);
773 blkdev_put(bdev, flags);
776 if (fs_devices->open_devices == 0) {
780 fs_devices->seeding = seeding;
781 fs_devices->opened = 1;
782 fs_devices->latest_bdev = latest_bdev;
783 fs_devices->latest_devid = latest_devid;
784 fs_devices->latest_trans = latest_transid;
785 fs_devices->total_rw_bytes = 0;
790 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
791 fmode_t flags, void *holder)
795 mutex_lock(&uuid_mutex);
796 if (fs_devices->opened) {
797 fs_devices->opened++;
800 ret = __btrfs_open_devices(fs_devices, flags, holder);
802 mutex_unlock(&uuid_mutex);
807 * Look for a btrfs signature on a device. This may be called out of the mount path
808 * and we are not allowed to call set_blocksize during the scan. The superblock
809 * is read via pagecache
811 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
812 struct btrfs_fs_devices **fs_devices_ret)
814 struct btrfs_super_block *disk_super;
815 struct block_device *bdev;
826 * we would like to check all the supers, but that would make
827 * a btrfs mount succeed after a mkfs from a different FS.
828 * So, we need to add a special mount option to scan for
829 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
831 bytenr = btrfs_sb_offset(0);
833 mutex_lock(&uuid_mutex);
835 bdev = blkdev_get_by_path(path, flags, holder);
842 /* make sure our super fits in the device */
843 if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
846 /* make sure our super fits in the page */
847 if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
850 /* make sure our super doesn't straddle pages on disk */
851 index = bytenr >> PAGE_CACHE_SHIFT;
852 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
855 /* pull in the page with our super */
856 page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
859 if (IS_ERR_OR_NULL(page))
864 /* align our pointer to the offset of the super block */
865 disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
867 if (btrfs_super_bytenr(disk_super) != bytenr ||
868 disk_super->magic != cpu_to_le64(BTRFS_MAGIC))
871 devid = btrfs_stack_device_id(&disk_super->dev_item);
872 transid = btrfs_super_generation(disk_super);
873 total_devices = btrfs_super_num_devices(disk_super);
875 if (disk_super->label[0]) {
876 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
877 disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
878 printk(KERN_INFO "device label %s ", disk_super->label);
880 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
883 printk(KERN_CONT "devid %llu transid %llu %s\n",
884 (unsigned long long)devid, (unsigned long long)transid, path);
886 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
887 if (!ret && fs_devices_ret)
888 (*fs_devices_ret)->total_devices = total_devices;
892 page_cache_release(page);
895 blkdev_put(bdev, flags);
897 mutex_unlock(&uuid_mutex);
901 /* helper to account the used device space in the range */
902 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
903 u64 end, u64 *length)
905 struct btrfs_key key;
906 struct btrfs_root *root = device->dev_root;
907 struct btrfs_dev_extent *dev_extent;
908 struct btrfs_path *path;
912 struct extent_buffer *l;
916 if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
919 path = btrfs_alloc_path();
924 key.objectid = device->devid;
926 key.type = BTRFS_DEV_EXTENT_KEY;
928 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
932 ret = btrfs_previous_item(root, path, key.objectid, key.type);
939 slot = path->slots[0];
940 if (slot >= btrfs_header_nritems(l)) {
941 ret = btrfs_next_leaf(root, path);
949 btrfs_item_key_to_cpu(l, &key, slot);
951 if (key.objectid < device->devid)
954 if (key.objectid > device->devid)
957 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
960 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
961 extent_end = key.offset + btrfs_dev_extent_length(l,
963 if (key.offset <= start && extent_end > end) {
964 *length = end - start + 1;
966 } else if (key.offset <= start && extent_end > start)
967 *length += extent_end - start;
968 else if (key.offset > start && extent_end <= end)
969 *length += extent_end - key.offset;
970 else if (key.offset > start && key.offset <= end) {
971 *length += end - key.offset + 1;
973 } else if (key.offset > end)
981 btrfs_free_path(path);
985 static int contains_pending_extent(struct btrfs_trans_handle *trans,
986 struct btrfs_device *device,
989 struct extent_map *em;
992 list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
993 struct map_lookup *map;
996 map = (struct map_lookup *)em->bdev;
997 for (i = 0; i < map->num_stripes; i++) {
998 if (map->stripes[i].dev != device)
1000 if (map->stripes[i].physical >= *start + len ||
1001 map->stripes[i].physical + em->orig_block_len <=
1004 *start = map->stripes[i].physical +
1015 * find_free_dev_extent - find free space in the specified device
1016 * @device: the device which we search the free space in
1017 * @num_bytes: the size of the free space that we need
1018 * @start: store the start of the free space.
1019 * @len: the size of the free space. that we find, or the size of the max
1020 * free space if we don't find suitable free space
1022 * this uses a pretty simple search, the expectation is that it is
1023 * called very infrequently and that a given device has a small number
1026 * @start is used to store the start of the free space if we find. But if we
1027 * don't find suitable free space, it will be used to store the start position
1028 * of the max free space.
1030 * @len is used to store the size of the free space that we find.
1031 * But if we don't find suitable free space, it is used to store the size of
1032 * the max free space.
1034 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1035 struct btrfs_device *device, u64 num_bytes,
1036 u64 *start, u64 *len)
1038 struct btrfs_key key;
1039 struct btrfs_root *root = device->dev_root;
1040 struct btrfs_dev_extent *dev_extent;
1041 struct btrfs_path *path;
1047 u64 search_end = device->total_bytes;
1050 struct extent_buffer *l;
1052 /* FIXME use last free of some kind */
1054 /* we don't want to overwrite the superblock on the drive,
1055 * so we make sure to start at an offset of at least 1MB
1057 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1059 path = btrfs_alloc_path();
1063 max_hole_start = search_start;
1067 if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1073 path->search_commit_root = 1;
1074 path->skip_locking = 1;
1076 key.objectid = device->devid;
1077 key.offset = search_start;
1078 key.type = BTRFS_DEV_EXTENT_KEY;
1080 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1084 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1091 slot = path->slots[0];
1092 if (slot >= btrfs_header_nritems(l)) {
1093 ret = btrfs_next_leaf(root, path);
1101 btrfs_item_key_to_cpu(l, &key, slot);
1103 if (key.objectid < device->devid)
1106 if (key.objectid > device->devid)
1109 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1112 if (key.offset > search_start) {
1113 hole_size = key.offset - search_start;
1116 * Have to check before we set max_hole_start, otherwise
1117 * we could end up sending back this offset anyway.
1119 if (contains_pending_extent(trans, device,
1124 if (hole_size > max_hole_size) {
1125 max_hole_start = search_start;
1126 max_hole_size = hole_size;
1130 * If this free space is greater than which we need,
1131 * it must be the max free space that we have found
1132 * until now, so max_hole_start must point to the start
1133 * of this free space and the length of this free space
1134 * is stored in max_hole_size. Thus, we return
1135 * max_hole_start and max_hole_size and go back to the
1138 if (hole_size >= num_bytes) {
1144 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1145 extent_end = key.offset + btrfs_dev_extent_length(l,
1147 if (extent_end > search_start)
1148 search_start = extent_end;
1155 * At this point, search_start should be the end of
1156 * allocated dev extents, and when shrinking the device,
1157 * search_end may be smaller than search_start.
1159 if (search_end > search_start)
1160 hole_size = search_end - search_start;
1162 if (hole_size > max_hole_size) {
1163 max_hole_start = search_start;
1164 max_hole_size = hole_size;
1167 if (contains_pending_extent(trans, device, &search_start, hole_size)) {
1168 btrfs_release_path(path);
1173 if (hole_size < num_bytes)
1179 btrfs_free_path(path);
1180 *start = max_hole_start;
1182 *len = max_hole_size;
1186 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1187 struct btrfs_device *device,
1191 struct btrfs_path *path;
1192 struct btrfs_root *root = device->dev_root;
1193 struct btrfs_key key;
1194 struct btrfs_key found_key;
1195 struct extent_buffer *leaf = NULL;
1196 struct btrfs_dev_extent *extent = NULL;
1198 path = btrfs_alloc_path();
1202 key.objectid = device->devid;
1204 key.type = BTRFS_DEV_EXTENT_KEY;
1206 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1208 ret = btrfs_previous_item(root, path, key.objectid,
1209 BTRFS_DEV_EXTENT_KEY);
1212 leaf = path->nodes[0];
1213 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1214 extent = btrfs_item_ptr(leaf, path->slots[0],
1215 struct btrfs_dev_extent);
1216 BUG_ON(found_key.offset > start || found_key.offset +
1217 btrfs_dev_extent_length(leaf, extent) < start);
1219 btrfs_release_path(path);
1221 } else if (ret == 0) {
1222 leaf = path->nodes[0];
1223 extent = btrfs_item_ptr(leaf, path->slots[0],
1224 struct btrfs_dev_extent);
1226 btrfs_error(root->fs_info, ret, "Slot search failed");
1230 if (device->bytes_used > 0) {
1231 u64 len = btrfs_dev_extent_length(leaf, extent);
1232 device->bytes_used -= len;
1233 spin_lock(&root->fs_info->free_chunk_lock);
1234 root->fs_info->free_chunk_space += len;
1235 spin_unlock(&root->fs_info->free_chunk_lock);
1237 ret = btrfs_del_item(trans, root, path);
1239 btrfs_error(root->fs_info, ret,
1240 "Failed to remove dev extent item");
1243 btrfs_free_path(path);
1247 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1248 struct btrfs_device *device,
1249 u64 chunk_tree, u64 chunk_objectid,
1250 u64 chunk_offset, u64 start, u64 num_bytes)
1253 struct btrfs_path *path;
1254 struct btrfs_root *root = device->dev_root;
1255 struct btrfs_dev_extent *extent;
1256 struct extent_buffer *leaf;
1257 struct btrfs_key key;
1259 WARN_ON(!device->in_fs_metadata);
1260 WARN_ON(device->is_tgtdev_for_dev_replace);
1261 path = btrfs_alloc_path();
1265 key.objectid = device->devid;
1267 key.type = BTRFS_DEV_EXTENT_KEY;
1268 ret = btrfs_insert_empty_item(trans, root, path, &key,
1273 leaf = path->nodes[0];
1274 extent = btrfs_item_ptr(leaf, path->slots[0],
1275 struct btrfs_dev_extent);
1276 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1277 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1278 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1280 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1281 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1284 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1285 btrfs_mark_buffer_dirty(leaf);
1287 btrfs_free_path(path);
1291 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1293 struct extent_map_tree *em_tree;
1294 struct extent_map *em;
1298 em_tree = &fs_info->mapping_tree.map_tree;
1299 read_lock(&em_tree->lock);
1300 n = rb_last(&em_tree->map);
1302 em = rb_entry(n, struct extent_map, rb_node);
1303 ret = em->start + em->len;
1305 read_unlock(&em_tree->lock);
1310 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1313 struct btrfs_key key;
1314 struct btrfs_key found_key;
1315 struct btrfs_path *path;
1317 root = root->fs_info->chunk_root;
1319 path = btrfs_alloc_path();
1323 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1324 key.type = BTRFS_DEV_ITEM_KEY;
1325 key.offset = (u64)-1;
1327 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1331 BUG_ON(ret == 0); /* Corruption */
1333 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1334 BTRFS_DEV_ITEM_KEY);
1338 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1340 *objectid = found_key.offset + 1;
1344 btrfs_free_path(path);
1349 * the device information is stored in the chunk root
1350 * the btrfs_device struct should be fully filled in
1352 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1353 struct btrfs_root *root,
1354 struct btrfs_device *device)
1357 struct btrfs_path *path;
1358 struct btrfs_dev_item *dev_item;
1359 struct extent_buffer *leaf;
1360 struct btrfs_key key;
1363 root = root->fs_info->chunk_root;
1365 path = btrfs_alloc_path();
1369 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1370 key.type = BTRFS_DEV_ITEM_KEY;
1371 key.offset = device->devid;
1373 ret = btrfs_insert_empty_item(trans, root, path, &key,
1378 leaf = path->nodes[0];
1379 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1381 btrfs_set_device_id(leaf, dev_item, device->devid);
1382 btrfs_set_device_generation(leaf, dev_item, 0);
1383 btrfs_set_device_type(leaf, dev_item, device->type);
1384 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1385 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1386 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1387 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1388 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1389 btrfs_set_device_group(leaf, dev_item, 0);
1390 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1391 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1392 btrfs_set_device_start_offset(leaf, dev_item, 0);
1394 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1395 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1396 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1397 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1398 btrfs_mark_buffer_dirty(leaf);
1402 btrfs_free_path(path);
1406 static int btrfs_rm_dev_item(struct btrfs_root *root,
1407 struct btrfs_device *device)
1410 struct btrfs_path *path;
1411 struct btrfs_key key;
1412 struct btrfs_trans_handle *trans;
1414 root = root->fs_info->chunk_root;
1416 path = btrfs_alloc_path();
1420 trans = btrfs_start_transaction(root, 0);
1421 if (IS_ERR(trans)) {
1422 btrfs_free_path(path);
1423 return PTR_ERR(trans);
1425 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1426 key.type = BTRFS_DEV_ITEM_KEY;
1427 key.offset = device->devid;
1430 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1439 ret = btrfs_del_item(trans, root, path);
1443 btrfs_free_path(path);
1444 unlock_chunks(root);
1445 btrfs_commit_transaction(trans, root);
1449 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1451 struct btrfs_device *device;
1452 struct btrfs_device *next_device;
1453 struct block_device *bdev;
1454 struct buffer_head *bh = NULL;
1455 struct btrfs_super_block *disk_super;
1456 struct btrfs_fs_devices *cur_devices;
1463 bool clear_super = false;
1465 mutex_lock(&uuid_mutex);
1468 seq = read_seqbegin(&root->fs_info->profiles_lock);
1470 all_avail = root->fs_info->avail_data_alloc_bits |
1471 root->fs_info->avail_system_alloc_bits |
1472 root->fs_info->avail_metadata_alloc_bits;
1473 } while (read_seqretry(&root->fs_info->profiles_lock, seq));
1475 num_devices = root->fs_info->fs_devices->num_devices;
1476 btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1477 if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1478 WARN_ON(num_devices < 1);
1481 btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1483 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1484 ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1488 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1489 ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1493 if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1494 root->fs_info->fs_devices->rw_devices <= 2) {
1495 ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1498 if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1499 root->fs_info->fs_devices->rw_devices <= 3) {
1500 ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1504 if (strcmp(device_path, "missing") == 0) {
1505 struct list_head *devices;
1506 struct btrfs_device *tmp;
1509 devices = &root->fs_info->fs_devices->devices;
1511 * It is safe to read the devices since the volume_mutex
1514 list_for_each_entry(tmp, devices, dev_list) {
1515 if (tmp->in_fs_metadata &&
1516 !tmp->is_tgtdev_for_dev_replace &&
1526 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1530 ret = btrfs_get_bdev_and_sb(device_path,
1531 FMODE_WRITE | FMODE_EXCL,
1532 root->fs_info->bdev_holder, 0,
1536 disk_super = (struct btrfs_super_block *)bh->b_data;
1537 devid = btrfs_stack_device_id(&disk_super->dev_item);
1538 dev_uuid = disk_super->dev_item.uuid;
1539 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1547 if (device->is_tgtdev_for_dev_replace) {
1548 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1552 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1553 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1557 if (device->writeable) {
1559 list_del_init(&device->dev_alloc_list);
1560 unlock_chunks(root);
1561 root->fs_info->fs_devices->rw_devices--;
1565 mutex_unlock(&uuid_mutex);
1566 ret = btrfs_shrink_device(device, 0);
1567 mutex_lock(&uuid_mutex);
1572 * TODO: the superblock still includes this device in its num_devices
1573 * counter although write_all_supers() is not locked out. This
1574 * could give a filesystem state which requires a degraded mount.
1576 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1580 spin_lock(&root->fs_info->free_chunk_lock);
1581 root->fs_info->free_chunk_space = device->total_bytes -
1583 spin_unlock(&root->fs_info->free_chunk_lock);
1585 device->in_fs_metadata = 0;
1586 btrfs_scrub_cancel_dev(root->fs_info, device);
1589 * the device list mutex makes sure that we don't change
1590 * the device list while someone else is writing out all
1591 * the device supers.
1594 cur_devices = device->fs_devices;
1595 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1596 list_del_rcu(&device->dev_list);
1598 device->fs_devices->num_devices--;
1599 device->fs_devices->total_devices--;
1601 if (device->missing)
1602 root->fs_info->fs_devices->missing_devices--;
1604 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1605 struct btrfs_device, dev_list);
1606 if (device->bdev == root->fs_info->sb->s_bdev)
1607 root->fs_info->sb->s_bdev = next_device->bdev;
1608 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1609 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1612 device->fs_devices->open_devices--;
1614 call_rcu(&device->rcu, free_device);
1615 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1617 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1618 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1620 if (cur_devices->open_devices == 0) {
1621 struct btrfs_fs_devices *fs_devices;
1622 fs_devices = root->fs_info->fs_devices;
1623 while (fs_devices) {
1624 if (fs_devices->seed == cur_devices)
1626 fs_devices = fs_devices->seed;
1628 fs_devices->seed = cur_devices->seed;
1629 cur_devices->seed = NULL;
1631 __btrfs_close_devices(cur_devices);
1632 unlock_chunks(root);
1633 free_fs_devices(cur_devices);
1636 root->fs_info->num_tolerated_disk_barrier_failures =
1637 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1640 * at this point, the device is zero sized. We want to
1641 * remove it from the devices list and zero out the old super
1643 if (clear_super && disk_super) {
1644 /* make sure this device isn't detected as part of
1647 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1648 set_buffer_dirty(bh);
1649 sync_dirty_buffer(bh);
1654 /* Notify udev that device has changed */
1656 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1661 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1663 mutex_unlock(&uuid_mutex);
1666 if (device->writeable) {
1668 list_add(&device->dev_alloc_list,
1669 &root->fs_info->fs_devices->alloc_list);
1670 unlock_chunks(root);
1671 root->fs_info->fs_devices->rw_devices++;
1676 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1677 struct btrfs_device *srcdev)
1679 WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1680 list_del_rcu(&srcdev->dev_list);
1681 list_del_rcu(&srcdev->dev_alloc_list);
1682 fs_info->fs_devices->num_devices--;
1683 if (srcdev->missing) {
1684 fs_info->fs_devices->missing_devices--;
1685 fs_info->fs_devices->rw_devices++;
1687 if (srcdev->can_discard)
1688 fs_info->fs_devices->num_can_discard--;
1690 fs_info->fs_devices->open_devices--;
1692 call_rcu(&srcdev->rcu, free_device);
1695 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1696 struct btrfs_device *tgtdev)
1698 struct btrfs_device *next_device;
1701 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1703 btrfs_scratch_superblock(tgtdev);
1704 fs_info->fs_devices->open_devices--;
1706 fs_info->fs_devices->num_devices--;
1707 if (tgtdev->can_discard)
1708 fs_info->fs_devices->num_can_discard++;
1710 next_device = list_entry(fs_info->fs_devices->devices.next,
1711 struct btrfs_device, dev_list);
1712 if (tgtdev->bdev == fs_info->sb->s_bdev)
1713 fs_info->sb->s_bdev = next_device->bdev;
1714 if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1715 fs_info->fs_devices->latest_bdev = next_device->bdev;
1716 list_del_rcu(&tgtdev->dev_list);
1718 call_rcu(&tgtdev->rcu, free_device);
1720 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1723 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1724 struct btrfs_device **device)
1727 struct btrfs_super_block *disk_super;
1730 struct block_device *bdev;
1731 struct buffer_head *bh;
1734 ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1735 root->fs_info->bdev_holder, 0, &bdev, &bh);
1738 disk_super = (struct btrfs_super_block *)bh->b_data;
1739 devid = btrfs_stack_device_id(&disk_super->dev_item);
1740 dev_uuid = disk_super->dev_item.uuid;
1741 *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1746 blkdev_put(bdev, FMODE_READ);
1750 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1752 struct btrfs_device **device)
1755 if (strcmp(device_path, "missing") == 0) {
1756 struct list_head *devices;
1757 struct btrfs_device *tmp;
1759 devices = &root->fs_info->fs_devices->devices;
1761 * It is safe to read the devices since the volume_mutex
1762 * is held by the caller.
1764 list_for_each_entry(tmp, devices, dev_list) {
1765 if (tmp->in_fs_metadata && !tmp->bdev) {
1772 pr_err("btrfs: no missing device found\n");
1778 return btrfs_find_device_by_path(root, device_path, device);
1783 * does all the dirty work required for changing file system's UUID.
1785 static int btrfs_prepare_sprout(struct btrfs_root *root)
1787 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1788 struct btrfs_fs_devices *old_devices;
1789 struct btrfs_fs_devices *seed_devices;
1790 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1791 struct btrfs_device *device;
1794 BUG_ON(!mutex_is_locked(&uuid_mutex));
1795 if (!fs_devices->seeding)
1798 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1802 old_devices = clone_fs_devices(fs_devices);
1803 if (IS_ERR(old_devices)) {
1804 kfree(seed_devices);
1805 return PTR_ERR(old_devices);
1808 list_add(&old_devices->list, &fs_uuids);
1810 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1811 seed_devices->opened = 1;
1812 INIT_LIST_HEAD(&seed_devices->devices);
1813 INIT_LIST_HEAD(&seed_devices->alloc_list);
1814 mutex_init(&seed_devices->device_list_mutex);
1816 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1817 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1819 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1821 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1822 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1823 device->fs_devices = seed_devices;
1826 fs_devices->seeding = 0;
1827 fs_devices->num_devices = 0;
1828 fs_devices->open_devices = 0;
1829 fs_devices->total_devices = 0;
1830 fs_devices->seed = seed_devices;
1832 generate_random_uuid(fs_devices->fsid);
1833 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1834 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1835 super_flags = btrfs_super_flags(disk_super) &
1836 ~BTRFS_SUPER_FLAG_SEEDING;
1837 btrfs_set_super_flags(disk_super, super_flags);
1843 * strore the expected generation for seed devices in device items.
1845 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1846 struct btrfs_root *root)
1848 struct btrfs_path *path;
1849 struct extent_buffer *leaf;
1850 struct btrfs_dev_item *dev_item;
1851 struct btrfs_device *device;
1852 struct btrfs_key key;
1853 u8 fs_uuid[BTRFS_UUID_SIZE];
1854 u8 dev_uuid[BTRFS_UUID_SIZE];
1858 path = btrfs_alloc_path();
1862 root = root->fs_info->chunk_root;
1863 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1865 key.type = BTRFS_DEV_ITEM_KEY;
1868 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1872 leaf = path->nodes[0];
1874 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1875 ret = btrfs_next_leaf(root, path);
1880 leaf = path->nodes[0];
1881 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1882 btrfs_release_path(path);
1886 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1887 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1888 key.type != BTRFS_DEV_ITEM_KEY)
1891 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1892 struct btrfs_dev_item);
1893 devid = btrfs_device_id(leaf, dev_item);
1894 read_extent_buffer(leaf, dev_uuid,
1895 (unsigned long)btrfs_device_uuid(dev_item),
1897 read_extent_buffer(leaf, fs_uuid,
1898 (unsigned long)btrfs_device_fsid(dev_item),
1900 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1902 BUG_ON(!device); /* Logic error */
1904 if (device->fs_devices->seeding) {
1905 btrfs_set_device_generation(leaf, dev_item,
1906 device->generation);
1907 btrfs_mark_buffer_dirty(leaf);
1915 btrfs_free_path(path);
1919 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1921 struct request_queue *q;
1922 struct btrfs_trans_handle *trans;
1923 struct btrfs_device *device;
1924 struct block_device *bdev;
1925 struct list_head *devices;
1926 struct super_block *sb = root->fs_info->sb;
1927 struct rcu_string *name;
1929 int seeding_dev = 0;
1932 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1935 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1936 root->fs_info->bdev_holder);
1938 return PTR_ERR(bdev);
1940 if (root->fs_info->fs_devices->seeding) {
1942 down_write(&sb->s_umount);
1943 mutex_lock(&uuid_mutex);
1946 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1948 devices = &root->fs_info->fs_devices->devices;
1950 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1951 list_for_each_entry(device, devices, dev_list) {
1952 if (device->bdev == bdev) {
1955 &root->fs_info->fs_devices->device_list_mutex);
1959 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1961 device = kzalloc(sizeof(*device), GFP_NOFS);
1963 /* we can safely leave the fs_devices entry around */
1968 name = rcu_string_strdup(device_path, GFP_NOFS);
1974 rcu_assign_pointer(device->name, name);
1976 ret = find_next_devid(root, &device->devid);
1978 rcu_string_free(device->name);
1983 trans = btrfs_start_transaction(root, 0);
1984 if (IS_ERR(trans)) {
1985 rcu_string_free(device->name);
1987 ret = PTR_ERR(trans);
1993 q = bdev_get_queue(bdev);
1994 if (blk_queue_discard(q))
1995 device->can_discard = 1;
1996 device->writeable = 1;
1997 device->work.func = pending_bios_fn;
1998 generate_random_uuid(device->uuid);
1999 spin_lock_init(&device->io_lock);
2000 device->generation = trans->transid;
2001 device->io_width = root->sectorsize;
2002 device->io_align = root->sectorsize;
2003 device->sector_size = root->sectorsize;
2004 device->total_bytes = i_size_read(bdev->bd_inode);
2005 device->disk_total_bytes = device->total_bytes;
2006 device->dev_root = root->fs_info->dev_root;
2007 device->bdev = bdev;
2008 device->in_fs_metadata = 1;
2009 device->is_tgtdev_for_dev_replace = 0;
2010 device->mode = FMODE_EXCL;
2011 set_blocksize(device->bdev, 4096);
2014 sb->s_flags &= ~MS_RDONLY;
2015 ret = btrfs_prepare_sprout(root);
2016 BUG_ON(ret); /* -ENOMEM */
2019 device->fs_devices = root->fs_info->fs_devices;
2021 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2022 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2023 list_add(&device->dev_alloc_list,
2024 &root->fs_info->fs_devices->alloc_list);
2025 root->fs_info->fs_devices->num_devices++;
2026 root->fs_info->fs_devices->open_devices++;
2027 root->fs_info->fs_devices->rw_devices++;
2028 root->fs_info->fs_devices->total_devices++;
2029 if (device->can_discard)
2030 root->fs_info->fs_devices->num_can_discard++;
2031 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2033 spin_lock(&root->fs_info->free_chunk_lock);
2034 root->fs_info->free_chunk_space += device->total_bytes;
2035 spin_unlock(&root->fs_info->free_chunk_lock);
2037 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2038 root->fs_info->fs_devices->rotating = 1;
2040 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2041 btrfs_set_super_total_bytes(root->fs_info->super_copy,
2042 total_bytes + device->total_bytes);
2044 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2045 btrfs_set_super_num_devices(root->fs_info->super_copy,
2047 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2050 ret = init_first_rw_device(trans, root, device);
2052 btrfs_abort_transaction(trans, root, ret);
2055 ret = btrfs_finish_sprout(trans, root);
2057 btrfs_abort_transaction(trans, root, ret);
2061 ret = btrfs_add_device(trans, root, device);
2063 btrfs_abort_transaction(trans, root, ret);
2069 * we've got more storage, clear any full flags on the space
2072 btrfs_clear_space_info_full(root->fs_info);
2074 unlock_chunks(root);
2075 root->fs_info->num_tolerated_disk_barrier_failures =
2076 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2077 ret = btrfs_commit_transaction(trans, root);
2080 mutex_unlock(&uuid_mutex);
2081 up_write(&sb->s_umount);
2083 if (ret) /* transaction commit */
2086 ret = btrfs_relocate_sys_chunks(root);
2088 btrfs_error(root->fs_info, ret,
2089 "Failed to relocate sys chunks after "
2090 "device initialization. This can be fixed "
2091 "using the \"btrfs balance\" command.");
2092 trans = btrfs_attach_transaction(root);
2093 if (IS_ERR(trans)) {
2094 if (PTR_ERR(trans) == -ENOENT)
2096 return PTR_ERR(trans);
2098 ret = btrfs_commit_transaction(trans, root);
2104 unlock_chunks(root);
2105 btrfs_end_transaction(trans, root);
2106 rcu_string_free(device->name);
2109 blkdev_put(bdev, FMODE_EXCL);
2111 mutex_unlock(&uuid_mutex);
2112 up_write(&sb->s_umount);
2117 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2118 struct btrfs_device **device_out)
2120 struct request_queue *q;
2121 struct btrfs_device *device;
2122 struct block_device *bdev;
2123 struct btrfs_fs_info *fs_info = root->fs_info;
2124 struct list_head *devices;
2125 struct rcu_string *name;
2129 if (fs_info->fs_devices->seeding)
2132 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2133 fs_info->bdev_holder);
2135 return PTR_ERR(bdev);
2137 filemap_write_and_wait(bdev->bd_inode->i_mapping);
2139 devices = &fs_info->fs_devices->devices;
2140 list_for_each_entry(device, devices, dev_list) {
2141 if (device->bdev == bdev) {
2147 device = kzalloc(sizeof(*device), GFP_NOFS);
2153 name = rcu_string_strdup(device_path, GFP_NOFS);
2159 rcu_assign_pointer(device->name, name);
2161 q = bdev_get_queue(bdev);
2162 if (blk_queue_discard(q))
2163 device->can_discard = 1;
2164 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2165 device->writeable = 1;
2166 device->work.func = pending_bios_fn;
2167 generate_random_uuid(device->uuid);
2168 device->devid = BTRFS_DEV_REPLACE_DEVID;
2169 spin_lock_init(&device->io_lock);
2170 device->generation = 0;
2171 device->io_width = root->sectorsize;
2172 device->io_align = root->sectorsize;
2173 device->sector_size = root->sectorsize;
2174 device->total_bytes = i_size_read(bdev->bd_inode);
2175 device->disk_total_bytes = device->total_bytes;
2176 device->dev_root = fs_info->dev_root;
2177 device->bdev = bdev;
2178 device->in_fs_metadata = 1;
2179 device->is_tgtdev_for_dev_replace = 1;
2180 device->mode = FMODE_EXCL;
2181 set_blocksize(device->bdev, 4096);
2182 device->fs_devices = fs_info->fs_devices;
2183 list_add(&device->dev_list, &fs_info->fs_devices->devices);
2184 fs_info->fs_devices->num_devices++;
2185 fs_info->fs_devices->open_devices++;
2186 if (device->can_discard)
2187 fs_info->fs_devices->num_can_discard++;
2188 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2190 *device_out = device;
2194 blkdev_put(bdev, FMODE_EXCL);
2198 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2199 struct btrfs_device *tgtdev)
2201 WARN_ON(fs_info->fs_devices->rw_devices == 0);
2202 tgtdev->io_width = fs_info->dev_root->sectorsize;
2203 tgtdev->io_align = fs_info->dev_root->sectorsize;
2204 tgtdev->sector_size = fs_info->dev_root->sectorsize;
2205 tgtdev->dev_root = fs_info->dev_root;
2206 tgtdev->in_fs_metadata = 1;
2209 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2210 struct btrfs_device *device)
2213 struct btrfs_path *path;
2214 struct btrfs_root *root;
2215 struct btrfs_dev_item *dev_item;
2216 struct extent_buffer *leaf;
2217 struct btrfs_key key;
2219 root = device->dev_root->fs_info->chunk_root;
2221 path = btrfs_alloc_path();
2225 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2226 key.type = BTRFS_DEV_ITEM_KEY;
2227 key.offset = device->devid;
2229 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2238 leaf = path->nodes[0];
2239 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2241 btrfs_set_device_id(leaf, dev_item, device->devid);
2242 btrfs_set_device_type(leaf, dev_item, device->type);
2243 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2244 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2245 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2246 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2247 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2248 btrfs_mark_buffer_dirty(leaf);
2251 btrfs_free_path(path);
2255 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2256 struct btrfs_device *device, u64 new_size)
2258 struct btrfs_super_block *super_copy =
2259 device->dev_root->fs_info->super_copy;
2260 u64 old_total = btrfs_super_total_bytes(super_copy);
2261 u64 diff = new_size - device->total_bytes;
2263 if (!device->writeable)
2265 if (new_size <= device->total_bytes ||
2266 device->is_tgtdev_for_dev_replace)
2269 btrfs_set_super_total_bytes(super_copy, old_total + diff);
2270 device->fs_devices->total_rw_bytes += diff;
2272 device->total_bytes = new_size;
2273 device->disk_total_bytes = new_size;
2274 btrfs_clear_space_info_full(device->dev_root->fs_info);
2276 return btrfs_update_device(trans, device);
2279 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2280 struct btrfs_device *device, u64 new_size)
2283 lock_chunks(device->dev_root);
2284 ret = __btrfs_grow_device(trans, device, new_size);
2285 unlock_chunks(device->dev_root);
2289 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2290 struct btrfs_root *root,
2291 u64 chunk_tree, u64 chunk_objectid,
2295 struct btrfs_path *path;
2296 struct btrfs_key key;
2298 root = root->fs_info->chunk_root;
2299 path = btrfs_alloc_path();
2303 key.objectid = chunk_objectid;
2304 key.offset = chunk_offset;
2305 key.type = BTRFS_CHUNK_ITEM_KEY;
2307 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2310 else if (ret > 0) { /* Logic error or corruption */
2311 btrfs_error(root->fs_info, -ENOENT,
2312 "Failed lookup while freeing chunk.");
2317 ret = btrfs_del_item(trans, root, path);
2319 btrfs_error(root->fs_info, ret,
2320 "Failed to delete chunk item.");
2322 btrfs_free_path(path);
2326 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2329 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2330 struct btrfs_disk_key *disk_key;
2331 struct btrfs_chunk *chunk;
2338 struct btrfs_key key;
2340 array_size = btrfs_super_sys_array_size(super_copy);
2342 ptr = super_copy->sys_chunk_array;
2345 while (cur < array_size) {
2346 disk_key = (struct btrfs_disk_key *)ptr;
2347 btrfs_disk_key_to_cpu(&key, disk_key);
2349 len = sizeof(*disk_key);
2351 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2352 chunk = (struct btrfs_chunk *)(ptr + len);
2353 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2354 len += btrfs_chunk_item_size(num_stripes);
2359 if (key.objectid == chunk_objectid &&
2360 key.offset == chunk_offset) {
2361 memmove(ptr, ptr + len, array_size - (cur + len));
2363 btrfs_set_super_sys_array_size(super_copy, array_size);
2372 static int btrfs_relocate_chunk(struct btrfs_root *root,
2373 u64 chunk_tree, u64 chunk_objectid,
2376 struct extent_map_tree *em_tree;
2377 struct btrfs_root *extent_root;
2378 struct btrfs_trans_handle *trans;
2379 struct extent_map *em;
2380 struct map_lookup *map;
2384 root = root->fs_info->chunk_root;
2385 extent_root = root->fs_info->extent_root;
2386 em_tree = &root->fs_info->mapping_tree.map_tree;
2388 ret = btrfs_can_relocate(extent_root, chunk_offset);
2392 /* step one, relocate all the extents inside this chunk */
2393 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2397 trans = btrfs_start_transaction(root, 0);
2398 if (IS_ERR(trans)) {
2399 ret = PTR_ERR(trans);
2400 btrfs_std_error(root->fs_info, ret);
2407 * step two, delete the device extents and the
2408 * chunk tree entries
2410 read_lock(&em_tree->lock);
2411 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2412 read_unlock(&em_tree->lock);
2414 BUG_ON(!em || em->start > chunk_offset ||
2415 em->start + em->len < chunk_offset);
2416 map = (struct map_lookup *)em->bdev;
2418 for (i = 0; i < map->num_stripes; i++) {
2419 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2420 map->stripes[i].physical);
2423 if (map->stripes[i].dev) {
2424 ret = btrfs_update_device(trans, map->stripes[i].dev);
2428 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2433 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2435 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2436 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2440 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2443 write_lock(&em_tree->lock);
2444 remove_extent_mapping(em_tree, em);
2445 write_unlock(&em_tree->lock);
2450 /* once for the tree */
2451 free_extent_map(em);
2453 free_extent_map(em);
2455 unlock_chunks(root);
2456 btrfs_end_transaction(trans, root);
2460 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2462 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2463 struct btrfs_path *path;
2464 struct extent_buffer *leaf;
2465 struct btrfs_chunk *chunk;
2466 struct btrfs_key key;
2467 struct btrfs_key found_key;
2468 u64 chunk_tree = chunk_root->root_key.objectid;
2470 bool retried = false;
2474 path = btrfs_alloc_path();
2479 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2480 key.offset = (u64)-1;
2481 key.type = BTRFS_CHUNK_ITEM_KEY;
2484 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2487 BUG_ON(ret == 0); /* Corruption */
2489 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2496 leaf = path->nodes[0];
2497 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2499 chunk = btrfs_item_ptr(leaf, path->slots[0],
2500 struct btrfs_chunk);
2501 chunk_type = btrfs_chunk_type(leaf, chunk);
2502 btrfs_release_path(path);
2504 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2505 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2514 if (found_key.offset == 0)
2516 key.offset = found_key.offset - 1;
2519 if (failed && !retried) {
2523 } else if (failed && retried) {
2528 btrfs_free_path(path);
2532 static int insert_balance_item(struct btrfs_root *root,
2533 struct btrfs_balance_control *bctl)
2535 struct btrfs_trans_handle *trans;
2536 struct btrfs_balance_item *item;
2537 struct btrfs_disk_balance_args disk_bargs;
2538 struct btrfs_path *path;
2539 struct extent_buffer *leaf;
2540 struct btrfs_key key;
2543 path = btrfs_alloc_path();
2547 trans = btrfs_start_transaction(root, 0);
2548 if (IS_ERR(trans)) {
2549 btrfs_free_path(path);
2550 return PTR_ERR(trans);
2553 key.objectid = BTRFS_BALANCE_OBJECTID;
2554 key.type = BTRFS_BALANCE_ITEM_KEY;
2557 ret = btrfs_insert_empty_item(trans, root, path, &key,
2562 leaf = path->nodes[0];
2563 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2565 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2567 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2568 btrfs_set_balance_data(leaf, item, &disk_bargs);
2569 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2570 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2571 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2572 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2574 btrfs_set_balance_flags(leaf, item, bctl->flags);
2576 btrfs_mark_buffer_dirty(leaf);
2578 btrfs_free_path(path);
2579 err = btrfs_commit_transaction(trans, root);
2585 static int del_balance_item(struct btrfs_root *root)
2587 struct btrfs_trans_handle *trans;
2588 struct btrfs_path *path;
2589 struct btrfs_key key;
2592 path = btrfs_alloc_path();
2596 trans = btrfs_start_transaction(root, 0);
2597 if (IS_ERR(trans)) {
2598 btrfs_free_path(path);
2599 return PTR_ERR(trans);
2602 key.objectid = BTRFS_BALANCE_OBJECTID;
2603 key.type = BTRFS_BALANCE_ITEM_KEY;
2606 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2614 ret = btrfs_del_item(trans, root, path);
2616 btrfs_free_path(path);
2617 err = btrfs_commit_transaction(trans, root);
2624 * This is a heuristic used to reduce the number of chunks balanced on
2625 * resume after balance was interrupted.
2627 static void update_balance_args(struct btrfs_balance_control *bctl)
2630 * Turn on soft mode for chunk types that were being converted.
2632 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2633 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2634 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2635 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2636 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2637 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2640 * Turn on usage filter if is not already used. The idea is
2641 * that chunks that we have already balanced should be
2642 * reasonably full. Don't do it for chunks that are being
2643 * converted - that will keep us from relocating unconverted
2644 * (albeit full) chunks.
2646 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2647 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2648 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2649 bctl->data.usage = 90;
2651 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2652 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2653 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2654 bctl->sys.usage = 90;
2656 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2657 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2658 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2659 bctl->meta.usage = 90;
2664 * Should be called with both balance and volume mutexes held to
2665 * serialize other volume operations (add_dev/rm_dev/resize) with
2666 * restriper. Same goes for unset_balance_control.
2668 static void set_balance_control(struct btrfs_balance_control *bctl)
2670 struct btrfs_fs_info *fs_info = bctl->fs_info;
2672 BUG_ON(fs_info->balance_ctl);
2674 spin_lock(&fs_info->balance_lock);
2675 fs_info->balance_ctl = bctl;
2676 spin_unlock(&fs_info->balance_lock);
2679 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2681 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2683 BUG_ON(!fs_info->balance_ctl);
2685 spin_lock(&fs_info->balance_lock);
2686 fs_info->balance_ctl = NULL;
2687 spin_unlock(&fs_info->balance_lock);
2693 * Balance filters. Return 1 if chunk should be filtered out
2694 * (should not be balanced).
2696 static int chunk_profiles_filter(u64 chunk_type,
2697 struct btrfs_balance_args *bargs)
2699 chunk_type = chunk_to_extended(chunk_type) &
2700 BTRFS_EXTENDED_PROFILE_MASK;
2702 if (bargs->profiles & chunk_type)
2708 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2709 struct btrfs_balance_args *bargs)
2711 struct btrfs_block_group_cache *cache;
2712 u64 chunk_used, user_thresh;
2715 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2716 chunk_used = btrfs_block_group_used(&cache->item);
2718 if (bargs->usage == 0)
2720 else if (bargs->usage > 100)
2721 user_thresh = cache->key.offset;
2723 user_thresh = div_factor_fine(cache->key.offset,
2726 if (chunk_used < user_thresh)
2729 btrfs_put_block_group(cache);
2733 static int chunk_devid_filter(struct extent_buffer *leaf,
2734 struct btrfs_chunk *chunk,
2735 struct btrfs_balance_args *bargs)
2737 struct btrfs_stripe *stripe;
2738 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2741 for (i = 0; i < num_stripes; i++) {
2742 stripe = btrfs_stripe_nr(chunk, i);
2743 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2750 /* [pstart, pend) */
2751 static int chunk_drange_filter(struct extent_buffer *leaf,
2752 struct btrfs_chunk *chunk,
2754 struct btrfs_balance_args *bargs)
2756 struct btrfs_stripe *stripe;
2757 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2763 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2766 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2767 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2768 factor = num_stripes / 2;
2769 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2770 factor = num_stripes - 1;
2771 } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2772 factor = num_stripes - 2;
2774 factor = num_stripes;
2777 for (i = 0; i < num_stripes; i++) {
2778 stripe = btrfs_stripe_nr(chunk, i);
2779 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2782 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2783 stripe_length = btrfs_chunk_length(leaf, chunk);
2784 do_div(stripe_length, factor);
2786 if (stripe_offset < bargs->pend &&
2787 stripe_offset + stripe_length > bargs->pstart)
2794 /* [vstart, vend) */
2795 static int chunk_vrange_filter(struct extent_buffer *leaf,
2796 struct btrfs_chunk *chunk,
2798 struct btrfs_balance_args *bargs)
2800 if (chunk_offset < bargs->vend &&
2801 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2802 /* at least part of the chunk is inside this vrange */
2808 static int chunk_soft_convert_filter(u64 chunk_type,
2809 struct btrfs_balance_args *bargs)
2811 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2814 chunk_type = chunk_to_extended(chunk_type) &
2815 BTRFS_EXTENDED_PROFILE_MASK;
2817 if (bargs->target == chunk_type)
2823 static int should_balance_chunk(struct btrfs_root *root,
2824 struct extent_buffer *leaf,
2825 struct btrfs_chunk *chunk, u64 chunk_offset)
2827 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2828 struct btrfs_balance_args *bargs = NULL;
2829 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2832 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2833 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2837 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2838 bargs = &bctl->data;
2839 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2841 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2842 bargs = &bctl->meta;
2844 /* profiles filter */
2845 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2846 chunk_profiles_filter(chunk_type, bargs)) {
2851 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2852 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2857 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2858 chunk_devid_filter(leaf, chunk, bargs)) {
2862 /* drange filter, makes sense only with devid filter */
2863 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2864 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2869 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2870 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2874 /* soft profile changing mode */
2875 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2876 chunk_soft_convert_filter(chunk_type, bargs)) {
2883 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2885 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2886 struct btrfs_root *chunk_root = fs_info->chunk_root;
2887 struct btrfs_root *dev_root = fs_info->dev_root;
2888 struct list_head *devices;
2889 struct btrfs_device *device;
2892 struct btrfs_chunk *chunk;
2893 struct btrfs_path *path;
2894 struct btrfs_key key;
2895 struct btrfs_key found_key;
2896 struct btrfs_trans_handle *trans;
2897 struct extent_buffer *leaf;
2900 int enospc_errors = 0;
2901 bool counting = true;
2903 /* step one make some room on all the devices */
2904 devices = &fs_info->fs_devices->devices;
2905 list_for_each_entry(device, devices, dev_list) {
2906 old_size = device->total_bytes;
2907 size_to_free = div_factor(old_size, 1);
2908 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2909 if (!device->writeable ||
2910 device->total_bytes - device->bytes_used > size_to_free ||
2911 device->is_tgtdev_for_dev_replace)
2914 ret = btrfs_shrink_device(device, old_size - size_to_free);
2919 trans = btrfs_start_transaction(dev_root, 0);
2920 BUG_ON(IS_ERR(trans));
2922 ret = btrfs_grow_device(trans, device, old_size);
2925 btrfs_end_transaction(trans, dev_root);
2928 /* step two, relocate all the chunks */
2929 path = btrfs_alloc_path();
2935 /* zero out stat counters */
2936 spin_lock(&fs_info->balance_lock);
2937 memset(&bctl->stat, 0, sizeof(bctl->stat));
2938 spin_unlock(&fs_info->balance_lock);
2940 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2941 key.offset = (u64)-1;
2942 key.type = BTRFS_CHUNK_ITEM_KEY;
2945 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2946 atomic_read(&fs_info->balance_cancel_req)) {
2951 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2956 * this shouldn't happen, it means the last relocate
2960 BUG(); /* FIXME break ? */
2962 ret = btrfs_previous_item(chunk_root, path, 0,
2963 BTRFS_CHUNK_ITEM_KEY);
2969 leaf = path->nodes[0];
2970 slot = path->slots[0];
2971 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2973 if (found_key.objectid != key.objectid)
2976 /* chunk zero is special */
2977 if (found_key.offset == 0)
2980 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2983 spin_lock(&fs_info->balance_lock);
2984 bctl->stat.considered++;
2985 spin_unlock(&fs_info->balance_lock);
2988 ret = should_balance_chunk(chunk_root, leaf, chunk,
2990 btrfs_release_path(path);
2995 spin_lock(&fs_info->balance_lock);
2996 bctl->stat.expected++;
2997 spin_unlock(&fs_info->balance_lock);
3001 ret = btrfs_relocate_chunk(chunk_root,
3002 chunk_root->root_key.objectid,
3005 if (ret && ret != -ENOSPC)
3007 if (ret == -ENOSPC) {
3010 spin_lock(&fs_info->balance_lock);
3011 bctl->stat.completed++;
3012 spin_unlock(&fs_info->balance_lock);
3015 key.offset = found_key.offset - 1;
3019 btrfs_release_path(path);
3024 btrfs_free_path(path);
3025 if (enospc_errors) {
3026 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
3036 * alloc_profile_is_valid - see if a given profile is valid and reduced
3037 * @flags: profile to validate
3038 * @extended: if true @flags is treated as an extended profile
3040 static int alloc_profile_is_valid(u64 flags, int extended)
3042 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3043 BTRFS_BLOCK_GROUP_PROFILE_MASK);
3045 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3047 /* 1) check that all other bits are zeroed */
3051 /* 2) see if profile is reduced */
3053 return !extended; /* "0" is valid for usual profiles */
3055 /* true if exactly one bit set */
3056 return (flags & (flags - 1)) == 0;
3059 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3061 /* cancel requested || normal exit path */
3062 return atomic_read(&fs_info->balance_cancel_req) ||
3063 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3064 atomic_read(&fs_info->balance_cancel_req) == 0);
3067 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3071 unset_balance_control(fs_info);
3072 ret = del_balance_item(fs_info->tree_root);
3074 btrfs_std_error(fs_info, ret);
3076 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3079 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
3080 struct btrfs_ioctl_balance_args *bargs);
3083 * Should be called with both balance and volume mutexes held
3085 int btrfs_balance(struct btrfs_balance_control *bctl,
3086 struct btrfs_ioctl_balance_args *bargs)
3088 struct btrfs_fs_info *fs_info = bctl->fs_info;
3095 if (btrfs_fs_closing(fs_info) ||
3096 atomic_read(&fs_info->balance_pause_req) ||
3097 atomic_read(&fs_info->balance_cancel_req)) {
3102 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3103 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3107 * In case of mixed groups both data and meta should be picked,
3108 * and identical options should be given for both of them.
3110 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3111 if (mixed && (bctl->flags & allowed)) {
3112 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3113 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3114 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3115 printk(KERN_ERR "btrfs: with mixed groups data and "
3116 "metadata balance options must be the same\n");
3122 num_devices = fs_info->fs_devices->num_devices;
3123 btrfs_dev_replace_lock(&fs_info->dev_replace);
3124 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3125 BUG_ON(num_devices < 1);
3128 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3129 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3130 if (num_devices == 1)
3131 allowed |= BTRFS_BLOCK_GROUP_DUP;
3132 else if (num_devices > 1)
3133 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3134 if (num_devices > 2)
3135 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3136 if (num_devices > 3)
3137 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3138 BTRFS_BLOCK_GROUP_RAID6);
3139 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3140 (!alloc_profile_is_valid(bctl->data.target, 1) ||
3141 (bctl->data.target & ~allowed))) {
3142 printk(KERN_ERR "btrfs: unable to start balance with target "
3143 "data profile %llu\n",
3144 (unsigned long long)bctl->data.target);
3148 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3149 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3150 (bctl->meta.target & ~allowed))) {
3151 printk(KERN_ERR "btrfs: unable to start balance with target "
3152 "metadata profile %llu\n",
3153 (unsigned long long)bctl->meta.target);
3157 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3158 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3159 (bctl->sys.target & ~allowed))) {
3160 printk(KERN_ERR "btrfs: unable to start balance with target "
3161 "system profile %llu\n",
3162 (unsigned long long)bctl->sys.target);
3167 /* allow dup'ed data chunks only in mixed mode */
3168 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3169 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3170 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3175 /* allow to reduce meta or sys integrity only if force set */
3176 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3177 BTRFS_BLOCK_GROUP_RAID10 |
3178 BTRFS_BLOCK_GROUP_RAID5 |
3179 BTRFS_BLOCK_GROUP_RAID6;
3181 seq = read_seqbegin(&fs_info->profiles_lock);
3183 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3184 (fs_info->avail_system_alloc_bits & allowed) &&
3185 !(bctl->sys.target & allowed)) ||
3186 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3187 (fs_info->avail_metadata_alloc_bits & allowed) &&
3188 !(bctl->meta.target & allowed))) {
3189 if (bctl->flags & BTRFS_BALANCE_FORCE) {
3190 printk(KERN_INFO "btrfs: force reducing metadata "
3193 printk(KERN_ERR "btrfs: balance will reduce metadata "
3194 "integrity, use force if you want this\n");
3199 } while (read_seqretry(&fs_info->profiles_lock, seq));
3201 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3202 int num_tolerated_disk_barrier_failures;
3203 u64 target = bctl->sys.target;
3205 num_tolerated_disk_barrier_failures =
3206 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3207 if (num_tolerated_disk_barrier_failures > 0 &&
3209 (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3210 BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3211 num_tolerated_disk_barrier_failures = 0;
3212 else if (num_tolerated_disk_barrier_failures > 1 &&
3214 (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3215 num_tolerated_disk_barrier_failures = 1;
3217 fs_info->num_tolerated_disk_barrier_failures =
3218 num_tolerated_disk_barrier_failures;
3221 ret = insert_balance_item(fs_info->tree_root, bctl);
3222 if (ret && ret != -EEXIST)
3225 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3226 BUG_ON(ret == -EEXIST);
3227 set_balance_control(bctl);
3229 BUG_ON(ret != -EEXIST);
3230 spin_lock(&fs_info->balance_lock);
3231 update_balance_args(bctl);
3232 spin_unlock(&fs_info->balance_lock);
3235 atomic_inc(&fs_info->balance_running);
3236 mutex_unlock(&fs_info->balance_mutex);
3238 ret = __btrfs_balance(fs_info);
3240 mutex_lock(&fs_info->balance_mutex);
3241 atomic_dec(&fs_info->balance_running);
3243 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3244 fs_info->num_tolerated_disk_barrier_failures =
3245 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3249 memset(bargs, 0, sizeof(*bargs));
3250 update_ioctl_balance_args(fs_info, 0, bargs);
3253 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3254 balance_need_close(fs_info)) {
3255 __cancel_balance(fs_info);
3258 wake_up(&fs_info->balance_wait_q);
3262 if (bctl->flags & BTRFS_BALANCE_RESUME)
3263 __cancel_balance(fs_info);
3266 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3271 static int balance_kthread(void *data)
3273 struct btrfs_fs_info *fs_info = data;
3276 mutex_lock(&fs_info->volume_mutex);
3277 mutex_lock(&fs_info->balance_mutex);
3279 if (fs_info->balance_ctl) {
3280 printk(KERN_INFO "btrfs: continuing balance\n");
3281 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3284 mutex_unlock(&fs_info->balance_mutex);
3285 mutex_unlock(&fs_info->volume_mutex);
3290 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3292 struct task_struct *tsk;
3294 spin_lock(&fs_info->balance_lock);
3295 if (!fs_info->balance_ctl) {
3296 spin_unlock(&fs_info->balance_lock);
3299 spin_unlock(&fs_info->balance_lock);
3301 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3302 printk(KERN_INFO "btrfs: force skipping balance\n");
3306 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3307 return PTR_RET(tsk);
3310 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3312 struct btrfs_balance_control *bctl;
3313 struct btrfs_balance_item *item;
3314 struct btrfs_disk_balance_args disk_bargs;
3315 struct btrfs_path *path;
3316 struct extent_buffer *leaf;
3317 struct btrfs_key key;
3320 path = btrfs_alloc_path();
3324 key.objectid = BTRFS_BALANCE_OBJECTID;
3325 key.type = BTRFS_BALANCE_ITEM_KEY;
3328 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3331 if (ret > 0) { /* ret = -ENOENT; */
3336 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3342 leaf = path->nodes[0];
3343 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3345 bctl->fs_info = fs_info;
3346 bctl->flags = btrfs_balance_flags(leaf, item);
3347 bctl->flags |= BTRFS_BALANCE_RESUME;
3349 btrfs_balance_data(leaf, item, &disk_bargs);
3350 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3351 btrfs_balance_meta(leaf, item, &disk_bargs);
3352 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3353 btrfs_balance_sys(leaf, item, &disk_bargs);
3354 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3356 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3358 mutex_lock(&fs_info->volume_mutex);
3359 mutex_lock(&fs_info->balance_mutex);
3361 set_balance_control(bctl);
3363 mutex_unlock(&fs_info->balance_mutex);
3364 mutex_unlock(&fs_info->volume_mutex);
3366 btrfs_free_path(path);
3370 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3374 mutex_lock(&fs_info->balance_mutex);
3375 if (!fs_info->balance_ctl) {
3376 mutex_unlock(&fs_info->balance_mutex);
3380 if (atomic_read(&fs_info->balance_running)) {
3381 atomic_inc(&fs_info->balance_pause_req);
3382 mutex_unlock(&fs_info->balance_mutex);
3384 wait_event(fs_info->balance_wait_q,
3385 atomic_read(&fs_info->balance_running) == 0);
3387 mutex_lock(&fs_info->balance_mutex);
3388 /* we are good with balance_ctl ripped off from under us */
3389 BUG_ON(atomic_read(&fs_info->balance_running));
3390 atomic_dec(&fs_info->balance_pause_req);
3395 mutex_unlock(&fs_info->balance_mutex);
3399 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3401 mutex_lock(&fs_info->balance_mutex);
3402 if (!fs_info->balance_ctl) {
3403 mutex_unlock(&fs_info->balance_mutex);
3407 atomic_inc(&fs_info->balance_cancel_req);
3409 * if we are running just wait and return, balance item is
3410 * deleted in btrfs_balance in this case
3412 if (atomic_read(&fs_info->balance_running)) {
3413 mutex_unlock(&fs_info->balance_mutex);
3414 wait_event(fs_info->balance_wait_q,
3415 atomic_read(&fs_info->balance_running) == 0);
3416 mutex_lock(&fs_info->balance_mutex);
3418 /* __cancel_balance needs volume_mutex */
3419 mutex_unlock(&fs_info->balance_mutex);
3420 mutex_lock(&fs_info->volume_mutex);
3421 mutex_lock(&fs_info->balance_mutex);
3423 if (fs_info->balance_ctl)
3424 __cancel_balance(fs_info);
3426 mutex_unlock(&fs_info->volume_mutex);
3429 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3430 atomic_dec(&fs_info->balance_cancel_req);
3431 mutex_unlock(&fs_info->balance_mutex);
3436 * shrinking a device means finding all of the device extents past
3437 * the new size, and then following the back refs to the chunks.
3438 * The chunk relocation code actually frees the device extent
3440 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3442 struct btrfs_trans_handle *trans;
3443 struct btrfs_root *root = device->dev_root;
3444 struct btrfs_dev_extent *dev_extent = NULL;
3445 struct btrfs_path *path;
3453 bool retried = false;
3454 struct extent_buffer *l;
3455 struct btrfs_key key;
3456 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3457 u64 old_total = btrfs_super_total_bytes(super_copy);
3458 u64 old_size = device->total_bytes;
3459 u64 diff = device->total_bytes - new_size;
3461 if (device->is_tgtdev_for_dev_replace)
3464 path = btrfs_alloc_path();
3472 device->total_bytes = new_size;
3473 if (device->writeable) {
3474 device->fs_devices->total_rw_bytes -= diff;
3475 spin_lock(&root->fs_info->free_chunk_lock);
3476 root->fs_info->free_chunk_space -= diff;
3477 spin_unlock(&root->fs_info->free_chunk_lock);
3479 unlock_chunks(root);
3482 key.objectid = device->devid;
3483 key.offset = (u64)-1;
3484 key.type = BTRFS_DEV_EXTENT_KEY;
3487 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3491 ret = btrfs_previous_item(root, path, 0, key.type);
3496 btrfs_release_path(path);
3501 slot = path->slots[0];
3502 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3504 if (key.objectid != device->devid) {
3505 btrfs_release_path(path);
3509 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3510 length = btrfs_dev_extent_length(l, dev_extent);
3512 if (key.offset + length <= new_size) {
3513 btrfs_release_path(path);
3517 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3518 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3519 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3520 btrfs_release_path(path);
3522 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3524 if (ret && ret != -ENOSPC)
3528 } while (key.offset-- > 0);
3530 if (failed && !retried) {
3534 } else if (failed && retried) {
3538 device->total_bytes = old_size;
3539 if (device->writeable)
3540 device->fs_devices->total_rw_bytes += diff;
3541 spin_lock(&root->fs_info->free_chunk_lock);
3542 root->fs_info->free_chunk_space += diff;
3543 spin_unlock(&root->fs_info->free_chunk_lock);
3544 unlock_chunks(root);
3548 /* Shrinking succeeded, else we would be at "done". */
3549 trans = btrfs_start_transaction(root, 0);
3550 if (IS_ERR(trans)) {
3551 ret = PTR_ERR(trans);
3557 device->disk_total_bytes = new_size;
3558 /* Now btrfs_update_device() will change the on-disk size. */
3559 ret = btrfs_update_device(trans, device);
3561 unlock_chunks(root);
3562 btrfs_end_transaction(trans, root);
3565 WARN_ON(diff > old_total);
3566 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3567 unlock_chunks(root);
3568 btrfs_end_transaction(trans, root);
3570 btrfs_free_path(path);
3574 static int btrfs_add_system_chunk(struct btrfs_root *root,
3575 struct btrfs_key *key,
3576 struct btrfs_chunk *chunk, int item_size)
3578 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3579 struct btrfs_disk_key disk_key;
3583 array_size = btrfs_super_sys_array_size(super_copy);
3584 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3587 ptr = super_copy->sys_chunk_array + array_size;
3588 btrfs_cpu_key_to_disk(&disk_key, key);
3589 memcpy(ptr, &disk_key, sizeof(disk_key));
3590 ptr += sizeof(disk_key);
3591 memcpy(ptr, chunk, item_size);
3592 item_size += sizeof(disk_key);
3593 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3598 * sort the devices in descending order by max_avail, total_avail
3600 static int btrfs_cmp_device_info(const void *a, const void *b)
3602 const struct btrfs_device_info *di_a = a;
3603 const struct btrfs_device_info *di_b = b;
3605 if (di_a->max_avail > di_b->max_avail)
3607 if (di_a->max_avail < di_b->max_avail)
3609 if (di_a->total_avail > di_b->total_avail)
3611 if (di_a->total_avail < di_b->total_avail)
3616 static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3617 [BTRFS_RAID_RAID10] = {
3620 .devs_max = 0, /* 0 == as many as possible */
3622 .devs_increment = 2,
3625 [BTRFS_RAID_RAID1] = {
3630 .devs_increment = 2,
3633 [BTRFS_RAID_DUP] = {
3638 .devs_increment = 1,
3641 [BTRFS_RAID_RAID0] = {
3646 .devs_increment = 1,
3649 [BTRFS_RAID_SINGLE] = {
3654 .devs_increment = 1,
3657 [BTRFS_RAID_RAID5] = {
3662 .devs_increment = 1,
3665 [BTRFS_RAID_RAID6] = {
3670 .devs_increment = 1,
3675 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3677 /* TODO allow them to set a preferred stripe size */
3681 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3683 if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3686 btrfs_set_fs_incompat(info, RAID56);
3689 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3690 struct btrfs_root *extent_root, u64 start,
3693 struct btrfs_fs_info *info = extent_root->fs_info;
3694 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3695 struct list_head *cur;
3696 struct map_lookup *map = NULL;
3697 struct extent_map_tree *em_tree;
3698 struct extent_map *em;
3699 struct btrfs_device_info *devices_info = NULL;
3701 int num_stripes; /* total number of stripes to allocate */
3702 int data_stripes; /* number of stripes that count for
3704 int sub_stripes; /* sub_stripes info for map */
3705 int dev_stripes; /* stripes per dev */
3706 int devs_max; /* max devs to use */
3707 int devs_min; /* min devs needed */
3708 int devs_increment; /* ndevs has to be a multiple of this */
3709 int ncopies; /* how many copies to data has */
3711 u64 max_stripe_size;
3715 u64 raid_stripe_len = BTRFS_STRIPE_LEN;
3721 BUG_ON(!alloc_profile_is_valid(type, 0));
3723 if (list_empty(&fs_devices->alloc_list))
3726 index = __get_raid_index(type);
3728 sub_stripes = btrfs_raid_array[index].sub_stripes;
3729 dev_stripes = btrfs_raid_array[index].dev_stripes;
3730 devs_max = btrfs_raid_array[index].devs_max;
3731 devs_min = btrfs_raid_array[index].devs_min;
3732 devs_increment = btrfs_raid_array[index].devs_increment;
3733 ncopies = btrfs_raid_array[index].ncopies;
3735 if (type & BTRFS_BLOCK_GROUP_DATA) {
3736 max_stripe_size = 1024 * 1024 * 1024;
3737 max_chunk_size = 10 * max_stripe_size;
3738 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3739 /* for larger filesystems, use larger metadata chunks */
3740 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3741 max_stripe_size = 1024 * 1024 * 1024;
3743 max_stripe_size = 256 * 1024 * 1024;
3744 max_chunk_size = max_stripe_size;
3745 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3746 max_stripe_size = 32 * 1024 * 1024;
3747 max_chunk_size = 2 * max_stripe_size;
3749 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3754 /* we don't want a chunk larger than 10% of writeable space */
3755 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3758 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3763 cur = fs_devices->alloc_list.next;
3766 * in the first pass through the devices list, we gather information
3767 * about the available holes on each device.
3770 while (cur != &fs_devices->alloc_list) {
3771 struct btrfs_device *device;
3775 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3779 if (!device->writeable) {
3781 "btrfs: read-only device in alloc_list\n");
3785 if (!device->in_fs_metadata ||
3786 device->is_tgtdev_for_dev_replace)
3789 if (device->total_bytes > device->bytes_used)
3790 total_avail = device->total_bytes - device->bytes_used;
3794 /* If there is no space on this device, skip it. */
3795 if (total_avail == 0)
3798 ret = find_free_dev_extent(trans, device,
3799 max_stripe_size * dev_stripes,
3800 &dev_offset, &max_avail);
3801 if (ret && ret != -ENOSPC)
3805 max_avail = max_stripe_size * dev_stripes;
3807 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3810 if (ndevs == fs_devices->rw_devices) {
3811 WARN(1, "%s: found more than %llu devices\n",
3812 __func__, fs_devices->rw_devices);
3815 devices_info[ndevs].dev_offset = dev_offset;
3816 devices_info[ndevs].max_avail = max_avail;
3817 devices_info[ndevs].total_avail = total_avail;
3818 devices_info[ndevs].dev = device;
3823 * now sort the devices by hole size / available space
3825 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3826 btrfs_cmp_device_info, NULL);
3828 /* round down to number of usable stripes */
3829 ndevs -= ndevs % devs_increment;
3831 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3836 if (devs_max && ndevs > devs_max)
3839 * the primary goal is to maximize the number of stripes, so use as many
3840 * devices as possible, even if the stripes are not maximum sized.
3842 stripe_size = devices_info[ndevs-1].max_avail;
3843 num_stripes = ndevs * dev_stripes;
3846 * this will have to be fixed for RAID1 and RAID10 over
3849 data_stripes = num_stripes / ncopies;
3851 if (type & BTRFS_BLOCK_GROUP_RAID5) {
3852 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
3853 btrfs_super_stripesize(info->super_copy));
3854 data_stripes = num_stripes - 1;
3856 if (type & BTRFS_BLOCK_GROUP_RAID6) {
3857 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
3858 btrfs_super_stripesize(info->super_copy));
3859 data_stripes = num_stripes - 2;
3863 * Use the number of data stripes to figure out how big this chunk
3864 * is really going to be in terms of logical address space,
3865 * and compare that answer with the max chunk size
3867 if (stripe_size * data_stripes > max_chunk_size) {
3868 u64 mask = (1ULL << 24) - 1;
3869 stripe_size = max_chunk_size;
3870 do_div(stripe_size, data_stripes);
3872 /* bump the answer up to a 16MB boundary */
3873 stripe_size = (stripe_size + mask) & ~mask;
3875 /* but don't go higher than the limits we found
3876 * while searching for free extents
3878 if (stripe_size > devices_info[ndevs-1].max_avail)
3879 stripe_size = devices_info[ndevs-1].max_avail;
3882 do_div(stripe_size, dev_stripes);
3884 /* align to BTRFS_STRIPE_LEN */
3885 do_div(stripe_size, raid_stripe_len);
3886 stripe_size *= raid_stripe_len;
3888 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3893 map->num_stripes = num_stripes;
3895 for (i = 0; i < ndevs; ++i) {
3896 for (j = 0; j < dev_stripes; ++j) {
3897 int s = i * dev_stripes + j;
3898 map->stripes[s].dev = devices_info[i].dev;
3899 map->stripes[s].physical = devices_info[i].dev_offset +
3903 map->sector_size = extent_root->sectorsize;
3904 map->stripe_len = raid_stripe_len;
3905 map->io_align = raid_stripe_len;
3906 map->io_width = raid_stripe_len;
3908 map->sub_stripes = sub_stripes;
3910 num_bytes = stripe_size * data_stripes;
3912 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3914 em = alloc_extent_map();
3919 em->bdev = (struct block_device *)map;
3921 em->len = num_bytes;
3922 em->block_start = 0;
3923 em->block_len = em->len;
3924 em->orig_block_len = stripe_size;
3926 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3927 write_lock(&em_tree->lock);
3928 ret = add_extent_mapping(em_tree, em, 0);
3930 list_add_tail(&em->list, &trans->transaction->pending_chunks);
3931 atomic_inc(&em->refs);
3933 write_unlock(&em_tree->lock);
3935 free_extent_map(em);
3939 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3940 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3943 goto error_del_extent;
3945 free_extent_map(em);
3946 check_raid56_incompat_flag(extent_root->fs_info, type);
3948 kfree(devices_info);
3952 write_lock(&em_tree->lock);
3953 remove_extent_mapping(em_tree, em);
3954 write_unlock(&em_tree->lock);
3956 /* One for our allocation */
3957 free_extent_map(em);
3958 /* One for the tree reference */
3959 free_extent_map(em);
3962 kfree(devices_info);
3966 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
3967 struct btrfs_root *extent_root,
3968 u64 chunk_offset, u64 chunk_size)
3970 struct btrfs_key key;
3971 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3972 struct btrfs_device *device;
3973 struct btrfs_chunk *chunk;
3974 struct btrfs_stripe *stripe;
3975 struct extent_map_tree *em_tree;
3976 struct extent_map *em;
3977 struct map_lookup *map;
3984 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3985 read_lock(&em_tree->lock);
3986 em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
3987 read_unlock(&em_tree->lock);
3990 btrfs_crit(extent_root->fs_info, "unable to find logical "
3991 "%Lu len %Lu", chunk_offset, chunk_size);
3995 if (em->start != chunk_offset || em->len != chunk_size) {
3996 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
3997 " %Lu-%Lu, found %Lu-%Lu\n", chunk_offset,
3998 chunk_size, em->start, em->len);
3999 free_extent_map(em);
4003 map = (struct map_lookup *)em->bdev;
4004 item_size = btrfs_chunk_item_size(map->num_stripes);
4005 stripe_size = em->orig_block_len;
4007 chunk = kzalloc(item_size, GFP_NOFS);
4013 for (i = 0; i < map->num_stripes; i++) {
4014 device = map->stripes[i].dev;
4015 dev_offset = map->stripes[i].physical;
4017 device->bytes_used += stripe_size;
4018 ret = btrfs_update_device(trans, device);
4021 ret = btrfs_alloc_dev_extent(trans, device,
4022 chunk_root->root_key.objectid,
4023 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4024 chunk_offset, dev_offset,
4030 spin_lock(&extent_root->fs_info->free_chunk_lock);
4031 extent_root->fs_info->free_chunk_space -= (stripe_size *
4033 spin_unlock(&extent_root->fs_info->free_chunk_lock);
4035 stripe = &chunk->stripe;
4036 for (i = 0; i < map->num_stripes; i++) {
4037 device = map->stripes[i].dev;
4038 dev_offset = map->stripes[i].physical;
4040 btrfs_set_stack_stripe_devid(stripe, device->devid);
4041 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4042 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4046 btrfs_set_stack_chunk_length(chunk, chunk_size);
4047 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4048 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4049 btrfs_set_stack_chunk_type(chunk, map->type);
4050 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4051 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4052 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4053 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4054 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4056 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4057 key.type = BTRFS_CHUNK_ITEM_KEY;
4058 key.offset = chunk_offset;
4060 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4061 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4063 * TODO: Cleanup of inserted chunk root in case of
4066 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4072 free_extent_map(em);
4077 * Chunk allocation falls into two parts. The first part does works
4078 * that make the new allocated chunk useable, but not do any operation
4079 * that modifies the chunk tree. The second part does the works that
4080 * require modifying the chunk tree. This division is important for the
4081 * bootstrap process of adding storage to a seed btrfs.
4083 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4084 struct btrfs_root *extent_root, u64 type)
4088 chunk_offset = find_next_chunk(extent_root->fs_info);
4089 return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4092 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4093 struct btrfs_root *root,
4094 struct btrfs_device *device)
4097 u64 sys_chunk_offset;
4099 struct btrfs_fs_info *fs_info = root->fs_info;
4100 struct btrfs_root *extent_root = fs_info->extent_root;
4103 chunk_offset = find_next_chunk(fs_info);
4104 alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4105 ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4110 sys_chunk_offset = find_next_chunk(root->fs_info);
4111 alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4112 ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4115 btrfs_abort_transaction(trans, root, ret);
4119 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4121 btrfs_abort_transaction(trans, root, ret);
4126 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4128 struct extent_map *em;
4129 struct map_lookup *map;
4130 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4134 read_lock(&map_tree->map_tree.lock);
4135 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4136 read_unlock(&map_tree->map_tree.lock);
4140 if (btrfs_test_opt(root, DEGRADED)) {
4141 free_extent_map(em);
4145 map = (struct map_lookup *)em->bdev;
4146 for (i = 0; i < map->num_stripes; i++) {
4147 if (!map->stripes[i].dev->writeable) {
4152 free_extent_map(em);
4156 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4158 extent_map_tree_init(&tree->map_tree);
4161 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4163 struct extent_map *em;
4166 write_lock(&tree->map_tree.lock);
4167 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4169 remove_extent_mapping(&tree->map_tree, em);
4170 write_unlock(&tree->map_tree.lock);
4175 free_extent_map(em);
4176 /* once for the tree */
4177 free_extent_map(em);
4181 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4183 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4184 struct extent_map *em;
4185 struct map_lookup *map;
4186 struct extent_map_tree *em_tree = &map_tree->map_tree;
4189 read_lock(&em_tree->lock);
4190 em = lookup_extent_mapping(em_tree, logical, len);
4191 read_unlock(&em_tree->lock);
4194 * We could return errors for these cases, but that could get ugly and
4195 * we'd probably do the same thing which is just not do anything else
4196 * and exit, so return 1 so the callers don't try to use other copies.
4199 btrfs_crit(fs_info, "No mapping for %Lu-%Lu\n", logical,
4204 if (em->start > logical || em->start + em->len < logical) {
4205 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4206 "%Lu-%Lu\n", logical, logical+len, em->start,
4207 em->start + em->len);
4211 map = (struct map_lookup *)em->bdev;
4212 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4213 ret = map->num_stripes;
4214 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4215 ret = map->sub_stripes;
4216 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4218 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4222 free_extent_map(em);
4224 btrfs_dev_replace_lock(&fs_info->dev_replace);
4225 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4227 btrfs_dev_replace_unlock(&fs_info->dev_replace);
4232 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4233 struct btrfs_mapping_tree *map_tree,
4236 struct extent_map *em;
4237 struct map_lookup *map;
4238 struct extent_map_tree *em_tree = &map_tree->map_tree;
4239 unsigned long len = root->sectorsize;
4241 read_lock(&em_tree->lock);
4242 em = lookup_extent_mapping(em_tree, logical, len);
4243 read_unlock(&em_tree->lock);
4246 BUG_ON(em->start > logical || em->start + em->len < logical);
4247 map = (struct map_lookup *)em->bdev;
4248 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4249 BTRFS_BLOCK_GROUP_RAID6)) {
4250 len = map->stripe_len * nr_data_stripes(map);
4252 free_extent_map(em);
4256 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4257 u64 logical, u64 len, int mirror_num)
4259 struct extent_map *em;
4260 struct map_lookup *map;
4261 struct extent_map_tree *em_tree = &map_tree->map_tree;
4264 read_lock(&em_tree->lock);
4265 em = lookup_extent_mapping(em_tree, logical, len);
4266 read_unlock(&em_tree->lock);
4269 BUG_ON(em->start > logical || em->start + em->len < logical);
4270 map = (struct map_lookup *)em->bdev;
4271 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4272 BTRFS_BLOCK_GROUP_RAID6))
4274 free_extent_map(em);
4278 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4279 struct map_lookup *map, int first, int num,
4280 int optimal, int dev_replace_is_ongoing)
4284 struct btrfs_device *srcdev;
4286 if (dev_replace_is_ongoing &&
4287 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4288 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4289 srcdev = fs_info->dev_replace.srcdev;
4294 * try to avoid the drive that is the source drive for a
4295 * dev-replace procedure, only choose it if no other non-missing
4296 * mirror is available
4298 for (tolerance = 0; tolerance < 2; tolerance++) {
4299 if (map->stripes[optimal].dev->bdev &&
4300 (tolerance || map->stripes[optimal].dev != srcdev))
4302 for (i = first; i < first + num; i++) {
4303 if (map->stripes[i].dev->bdev &&
4304 (tolerance || map->stripes[i].dev != srcdev))
4309 /* we couldn't find one that doesn't fail. Just return something
4310 * and the io error handling code will clean up eventually
4315 static inline int parity_smaller(u64 a, u64 b)
4320 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4321 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4323 struct btrfs_bio_stripe s;
4330 for (i = 0; i < bbio->num_stripes - 1; i++) {
4331 if (parity_smaller(raid_map[i], raid_map[i+1])) {
4332 s = bbio->stripes[i];
4334 bbio->stripes[i] = bbio->stripes[i+1];
4335 raid_map[i] = raid_map[i+1];
4336 bbio->stripes[i+1] = s;
4344 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4345 u64 logical, u64 *length,
4346 struct btrfs_bio **bbio_ret,
4347 int mirror_num, u64 **raid_map_ret)
4349 struct extent_map *em;
4350 struct map_lookup *map;
4351 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4352 struct extent_map_tree *em_tree = &map_tree->map_tree;
4355 u64 stripe_end_offset;
4360 u64 *raid_map = NULL;
4366 struct btrfs_bio *bbio = NULL;
4367 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4368 int dev_replace_is_ongoing = 0;
4369 int num_alloc_stripes;
4370 int patch_the_first_stripe_for_dev_replace = 0;
4371 u64 physical_to_patch_in_first_stripe = 0;
4372 u64 raid56_full_stripe_start = (u64)-1;
4374 read_lock(&em_tree->lock);
4375 em = lookup_extent_mapping(em_tree, logical, *length);
4376 read_unlock(&em_tree->lock);
4379 btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4380 (unsigned long long)logical,
4381 (unsigned long long)*length);
4385 if (em->start > logical || em->start + em->len < logical) {
4386 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4387 "found %Lu-%Lu\n", logical, em->start,
4388 em->start + em->len);
4392 map = (struct map_lookup *)em->bdev;
4393 offset = logical - em->start;
4395 stripe_len = map->stripe_len;
4398 * stripe_nr counts the total number of stripes we have to stride
4399 * to get to this block
4401 do_div(stripe_nr, stripe_len);
4403 stripe_offset = stripe_nr * stripe_len;
4404 BUG_ON(offset < stripe_offset);
4406 /* stripe_offset is the offset of this block in its stripe*/
4407 stripe_offset = offset - stripe_offset;
4409 /* if we're here for raid56, we need to know the stripe aligned start */
4410 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4411 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4412 raid56_full_stripe_start = offset;
4414 /* allow a write of a full stripe, but make sure we don't
4415 * allow straddling of stripes
4417 do_div(raid56_full_stripe_start, full_stripe_len);
4418 raid56_full_stripe_start *= full_stripe_len;
4421 if (rw & REQ_DISCARD) {
4422 /* we don't discard raid56 yet */
4424 (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4428 *length = min_t(u64, em->len - offset, *length);
4429 } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4431 /* For writes to RAID[56], allow a full stripeset across all disks.
4432 For other RAID types and for RAID[56] reads, just allow a single
4433 stripe (on a single disk). */
4434 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4436 max_len = stripe_len * nr_data_stripes(map) -
4437 (offset - raid56_full_stripe_start);
4439 /* we limit the length of each bio to what fits in a stripe */
4440 max_len = stripe_len - stripe_offset;
4442 *length = min_t(u64, em->len - offset, max_len);
4444 *length = em->len - offset;
4447 /* This is for when we're called from btrfs_merge_bio_hook() and all
4448 it cares about is the length */
4452 btrfs_dev_replace_lock(dev_replace);
4453 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4454 if (!dev_replace_is_ongoing)
4455 btrfs_dev_replace_unlock(dev_replace);
4457 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4458 !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4459 dev_replace->tgtdev != NULL) {
4461 * in dev-replace case, for repair case (that's the only
4462 * case where the mirror is selected explicitly when
4463 * calling btrfs_map_block), blocks left of the left cursor
4464 * can also be read from the target drive.
4465 * For REQ_GET_READ_MIRRORS, the target drive is added as
4466 * the last one to the array of stripes. For READ, it also
4467 * needs to be supported using the same mirror number.
4468 * If the requested block is not left of the left cursor,
4469 * EIO is returned. This can happen because btrfs_num_copies()
4470 * returns one more in the dev-replace case.
4472 u64 tmp_length = *length;
4473 struct btrfs_bio *tmp_bbio = NULL;
4474 int tmp_num_stripes;
4475 u64 srcdev_devid = dev_replace->srcdev->devid;
4476 int index_srcdev = 0;
4478 u64 physical_of_found = 0;
4480 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4481 logical, &tmp_length, &tmp_bbio, 0, NULL);
4483 WARN_ON(tmp_bbio != NULL);
4487 tmp_num_stripes = tmp_bbio->num_stripes;
4488 if (mirror_num > tmp_num_stripes) {
4490 * REQ_GET_READ_MIRRORS does not contain this
4491 * mirror, that means that the requested area
4492 * is not left of the left cursor
4500 * process the rest of the function using the mirror_num
4501 * of the source drive. Therefore look it up first.
4502 * At the end, patch the device pointer to the one of the
4505 for (i = 0; i < tmp_num_stripes; i++) {
4506 if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4508 * In case of DUP, in order to keep it
4509 * simple, only add the mirror with the
4510 * lowest physical address
4513 physical_of_found <=
4514 tmp_bbio->stripes[i].physical)
4519 tmp_bbio->stripes[i].physical;
4524 mirror_num = index_srcdev + 1;
4525 patch_the_first_stripe_for_dev_replace = 1;
4526 physical_to_patch_in_first_stripe = physical_of_found;
4535 } else if (mirror_num > map->num_stripes) {
4541 stripe_nr_orig = stripe_nr;
4542 stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4543 do_div(stripe_nr_end, map->stripe_len);
4544 stripe_end_offset = stripe_nr_end * map->stripe_len -
4547 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4548 if (rw & REQ_DISCARD)
4549 num_stripes = min_t(u64, map->num_stripes,
4550 stripe_nr_end - stripe_nr_orig);
4551 stripe_index = do_div(stripe_nr, map->num_stripes);
4552 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4553 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4554 num_stripes = map->num_stripes;
4555 else if (mirror_num)
4556 stripe_index = mirror_num - 1;
4558 stripe_index = find_live_mirror(fs_info, map, 0,
4560 current->pid % map->num_stripes,
4561 dev_replace_is_ongoing);
4562 mirror_num = stripe_index + 1;
4565 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4566 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4567 num_stripes = map->num_stripes;
4568 } else if (mirror_num) {
4569 stripe_index = mirror_num - 1;
4574 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4575 int factor = map->num_stripes / map->sub_stripes;
4577 stripe_index = do_div(stripe_nr, factor);
4578 stripe_index *= map->sub_stripes;
4580 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4581 num_stripes = map->sub_stripes;
4582 else if (rw & REQ_DISCARD)
4583 num_stripes = min_t(u64, map->sub_stripes *
4584 (stripe_nr_end - stripe_nr_orig),
4586 else if (mirror_num)
4587 stripe_index += mirror_num - 1;
4589 int old_stripe_index = stripe_index;
4590 stripe_index = find_live_mirror(fs_info, map,
4592 map->sub_stripes, stripe_index +
4593 current->pid % map->sub_stripes,
4594 dev_replace_is_ongoing);
4595 mirror_num = stripe_index - old_stripe_index + 1;
4598 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4599 BTRFS_BLOCK_GROUP_RAID6)) {
4602 if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4606 /* push stripe_nr back to the start of the full stripe */
4607 stripe_nr = raid56_full_stripe_start;
4608 do_div(stripe_nr, stripe_len);
4610 stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4612 /* RAID[56] write or recovery. Return all stripes */
4613 num_stripes = map->num_stripes;
4614 max_errors = nr_parity_stripes(map);
4616 raid_map = kmalloc(sizeof(u64) * num_stripes,
4623 /* Work out the disk rotation on this stripe-set */
4625 rot = do_div(tmp, num_stripes);
4627 /* Fill in the logical address of each stripe */
4628 tmp = stripe_nr * nr_data_stripes(map);
4629 for (i = 0; i < nr_data_stripes(map); i++)
4630 raid_map[(i+rot) % num_stripes] =
4631 em->start + (tmp + i) * map->stripe_len;
4633 raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4634 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4635 raid_map[(i+rot+1) % num_stripes] =
4638 *length = map->stripe_len;
4643 * Mirror #0 or #1 means the original data block.
4644 * Mirror #2 is RAID5 parity block.
4645 * Mirror #3 is RAID6 Q block.
4647 stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4649 stripe_index = nr_data_stripes(map) +
4652 /* We distribute the parity blocks across stripes */
4653 tmp = stripe_nr + stripe_index;
4654 stripe_index = do_div(tmp, map->num_stripes);
4658 * after this do_div call, stripe_nr is the number of stripes
4659 * on this device we have to walk to find the data, and
4660 * stripe_index is the number of our device in the stripe array
4662 stripe_index = do_div(stripe_nr, map->num_stripes);
4663 mirror_num = stripe_index + 1;
4665 BUG_ON(stripe_index >= map->num_stripes);
4667 num_alloc_stripes = num_stripes;
4668 if (dev_replace_is_ongoing) {
4669 if (rw & (REQ_WRITE | REQ_DISCARD))
4670 num_alloc_stripes <<= 1;
4671 if (rw & REQ_GET_READ_MIRRORS)
4672 num_alloc_stripes++;
4674 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4679 atomic_set(&bbio->error, 0);
4681 if (rw & REQ_DISCARD) {
4683 int sub_stripes = 0;
4684 u64 stripes_per_dev = 0;
4685 u32 remaining_stripes = 0;
4686 u32 last_stripe = 0;
4689 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4690 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4693 sub_stripes = map->sub_stripes;
4695 factor = map->num_stripes / sub_stripes;
4696 stripes_per_dev = div_u64_rem(stripe_nr_end -
4699 &remaining_stripes);
4700 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4701 last_stripe *= sub_stripes;
4704 for (i = 0; i < num_stripes; i++) {
4705 bbio->stripes[i].physical =
4706 map->stripes[stripe_index].physical +
4707 stripe_offset + stripe_nr * map->stripe_len;
4708 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4710 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4711 BTRFS_BLOCK_GROUP_RAID10)) {
4712 bbio->stripes[i].length = stripes_per_dev *
4715 if (i / sub_stripes < remaining_stripes)
4716 bbio->stripes[i].length +=
4720 * Special for the first stripe and
4723 * |-------|...|-------|
4727 if (i < sub_stripes)
4728 bbio->stripes[i].length -=
4731 if (stripe_index >= last_stripe &&
4732 stripe_index <= (last_stripe +
4734 bbio->stripes[i].length -=
4737 if (i == sub_stripes - 1)
4740 bbio->stripes[i].length = *length;
4743 if (stripe_index == map->num_stripes) {
4744 /* This could only happen for RAID0/10 */
4750 for (i = 0; i < num_stripes; i++) {
4751 bbio->stripes[i].physical =
4752 map->stripes[stripe_index].physical +
4754 stripe_nr * map->stripe_len;
4755 bbio->stripes[i].dev =
4756 map->stripes[stripe_index].dev;
4761 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
4762 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
4763 BTRFS_BLOCK_GROUP_RAID10 |
4764 BTRFS_BLOCK_GROUP_RAID5 |
4765 BTRFS_BLOCK_GROUP_DUP)) {
4767 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
4772 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
4773 dev_replace->tgtdev != NULL) {
4774 int index_where_to_add;
4775 u64 srcdev_devid = dev_replace->srcdev->devid;
4778 * duplicate the write operations while the dev replace
4779 * procedure is running. Since the copying of the old disk
4780 * to the new disk takes place at run time while the
4781 * filesystem is mounted writable, the regular write
4782 * operations to the old disk have to be duplicated to go
4783 * to the new disk as well.
4784 * Note that device->missing is handled by the caller, and
4785 * that the write to the old disk is already set up in the
4788 index_where_to_add = num_stripes;
4789 for (i = 0; i < num_stripes; i++) {
4790 if (bbio->stripes[i].dev->devid == srcdev_devid) {
4791 /* write to new disk, too */
4792 struct btrfs_bio_stripe *new =
4793 bbio->stripes + index_where_to_add;
4794 struct btrfs_bio_stripe *old =
4797 new->physical = old->physical;
4798 new->length = old->length;
4799 new->dev = dev_replace->tgtdev;
4800 index_where_to_add++;
4804 num_stripes = index_where_to_add;
4805 } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
4806 dev_replace->tgtdev != NULL) {
4807 u64 srcdev_devid = dev_replace->srcdev->devid;
4808 int index_srcdev = 0;
4810 u64 physical_of_found = 0;
4813 * During the dev-replace procedure, the target drive can
4814 * also be used to read data in case it is needed to repair
4815 * a corrupt block elsewhere. This is possible if the
4816 * requested area is left of the left cursor. In this area,
4817 * the target drive is a full copy of the source drive.
4819 for (i = 0; i < num_stripes; i++) {
4820 if (bbio->stripes[i].dev->devid == srcdev_devid) {
4822 * In case of DUP, in order to keep it
4823 * simple, only add the mirror with the
4824 * lowest physical address
4827 physical_of_found <=
4828 bbio->stripes[i].physical)
4832 physical_of_found = bbio->stripes[i].physical;
4836 u64 length = map->stripe_len;
4838 if (physical_of_found + length <=
4839 dev_replace->cursor_left) {
4840 struct btrfs_bio_stripe *tgtdev_stripe =
4841 bbio->stripes + num_stripes;
4843 tgtdev_stripe->physical = physical_of_found;
4844 tgtdev_stripe->length =
4845 bbio->stripes[index_srcdev].length;
4846 tgtdev_stripe->dev = dev_replace->tgtdev;
4854 bbio->num_stripes = num_stripes;
4855 bbio->max_errors = max_errors;
4856 bbio->mirror_num = mirror_num;
4859 * this is the case that REQ_READ && dev_replace_is_ongoing &&
4860 * mirror_num == num_stripes + 1 && dev_replace target drive is
4861 * available as a mirror
4863 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
4864 WARN_ON(num_stripes > 1);
4865 bbio->stripes[0].dev = dev_replace->tgtdev;
4866 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
4867 bbio->mirror_num = map->num_stripes + 1;
4870 sort_parity_stripes(bbio, raid_map);
4871 *raid_map_ret = raid_map;
4874 if (dev_replace_is_ongoing)
4875 btrfs_dev_replace_unlock(dev_replace);
4876 free_extent_map(em);
4880 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4881 u64 logical, u64 *length,
4882 struct btrfs_bio **bbio_ret, int mirror_num)
4884 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
4888 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4889 u64 chunk_start, u64 physical, u64 devid,
4890 u64 **logical, int *naddrs, int *stripe_len)
4892 struct extent_map_tree *em_tree = &map_tree->map_tree;
4893 struct extent_map *em;
4894 struct map_lookup *map;
4902 read_lock(&em_tree->lock);
4903 em = lookup_extent_mapping(em_tree, chunk_start, 1);
4904 read_unlock(&em_tree->lock);
4907 printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
4912 if (em->start != chunk_start) {
4913 printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
4914 em->start, chunk_start);
4915 free_extent_map(em);
4918 map = (struct map_lookup *)em->bdev;
4921 rmap_len = map->stripe_len;
4923 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4924 do_div(length, map->num_stripes / map->sub_stripes);
4925 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4926 do_div(length, map->num_stripes);
4927 else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4928 BTRFS_BLOCK_GROUP_RAID6)) {
4929 do_div(length, nr_data_stripes(map));
4930 rmap_len = map->stripe_len * nr_data_stripes(map);
4933 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
4934 BUG_ON(!buf); /* -ENOMEM */
4936 for (i = 0; i < map->num_stripes; i++) {
4937 if (devid && map->stripes[i].dev->devid != devid)
4939 if (map->stripes[i].physical > physical ||
4940 map->stripes[i].physical + length <= physical)
4943 stripe_nr = physical - map->stripes[i].physical;
4944 do_div(stripe_nr, map->stripe_len);
4946 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4947 stripe_nr = stripe_nr * map->num_stripes + i;
4948 do_div(stripe_nr, map->sub_stripes);
4949 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4950 stripe_nr = stripe_nr * map->num_stripes + i;
4951 } /* else if RAID[56], multiply by nr_data_stripes().
4952 * Alternatively, just use rmap_len below instead of
4953 * map->stripe_len */
4955 bytenr = chunk_start + stripe_nr * rmap_len;
4956 WARN_ON(nr >= map->num_stripes);
4957 for (j = 0; j < nr; j++) {
4958 if (buf[j] == bytenr)
4962 WARN_ON(nr >= map->num_stripes);
4969 *stripe_len = rmap_len;
4971 free_extent_map(em);
4975 static void btrfs_end_bio(struct bio *bio, int err)
4977 struct btrfs_bio *bbio = bio->bi_private;
4978 int is_orig_bio = 0;
4981 atomic_inc(&bbio->error);
4982 if (err == -EIO || err == -EREMOTEIO) {
4983 unsigned int stripe_index =
4984 btrfs_io_bio(bio)->stripe_index;
4985 struct btrfs_device *dev;
4987 BUG_ON(stripe_index >= bbio->num_stripes);
4988 dev = bbio->stripes[stripe_index].dev;
4990 if (bio->bi_rw & WRITE)
4991 btrfs_dev_stat_inc(dev,
4992 BTRFS_DEV_STAT_WRITE_ERRS);
4994 btrfs_dev_stat_inc(dev,
4995 BTRFS_DEV_STAT_READ_ERRS);
4996 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4997 btrfs_dev_stat_inc(dev,
4998 BTRFS_DEV_STAT_FLUSH_ERRS);
4999 btrfs_dev_stat_print_on_error(dev);
5004 if (bio == bbio->orig_bio)
5007 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5010 bio = bbio->orig_bio;
5012 bio->bi_private = bbio->private;
5013 bio->bi_end_io = bbio->end_io;
5014 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5015 /* only send an error to the higher layers if it is
5016 * beyond the tolerance of the btrfs bio
5018 if (atomic_read(&bbio->error) > bbio->max_errors) {
5022 * this bio is actually up to date, we didn't
5023 * go over the max number of errors
5025 set_bit(BIO_UPTODATE, &bio->bi_flags);
5030 bio_endio(bio, err);
5031 } else if (!is_orig_bio) {
5036 struct async_sched {
5039 struct btrfs_fs_info *info;
5040 struct btrfs_work work;
5044 * see run_scheduled_bios for a description of why bios are collected for
5047 * This will add one bio to the pending list for a device and make sure
5048 * the work struct is scheduled.
5050 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5051 struct btrfs_device *device,
5052 int rw, struct bio *bio)
5054 int should_queue = 1;
5055 struct btrfs_pending_bios *pending_bios;
5057 if (device->missing || !device->bdev) {
5058 bio_endio(bio, -EIO);
5062 /* don't bother with additional async steps for reads, right now */
5063 if (!(rw & REQ_WRITE)) {
5065 btrfsic_submit_bio(rw, bio);
5071 * nr_async_bios allows us to reliably return congestion to the
5072 * higher layers. Otherwise, the async bio makes it appear we have
5073 * made progress against dirty pages when we've really just put it
5074 * on a queue for later
5076 atomic_inc(&root->fs_info->nr_async_bios);
5077 WARN_ON(bio->bi_next);
5078 bio->bi_next = NULL;
5081 spin_lock(&device->io_lock);
5082 if (bio->bi_rw & REQ_SYNC)
5083 pending_bios = &device->pending_sync_bios;
5085 pending_bios = &device->pending_bios;
5087 if (pending_bios->tail)
5088 pending_bios->tail->bi_next = bio;
5090 pending_bios->tail = bio;
5091 if (!pending_bios->head)
5092 pending_bios->head = bio;
5093 if (device->running_pending)
5096 spin_unlock(&device->io_lock);
5099 btrfs_queue_worker(&root->fs_info->submit_workers,
5103 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5106 struct bio_vec *prev;
5107 struct request_queue *q = bdev_get_queue(bdev);
5108 unsigned short max_sectors = queue_max_sectors(q);
5109 struct bvec_merge_data bvm = {
5111 .bi_sector = sector,
5112 .bi_rw = bio->bi_rw,
5115 if (bio->bi_vcnt == 0) {
5120 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5121 if (bio_sectors(bio) > max_sectors)
5124 if (!q->merge_bvec_fn)
5127 bvm.bi_size = bio->bi_size - prev->bv_len;
5128 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5133 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5134 struct bio *bio, u64 physical, int dev_nr,
5137 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5139 bio->bi_private = bbio;
5140 btrfs_io_bio(bio)->stripe_index = dev_nr;
5141 bio->bi_end_io = btrfs_end_bio;
5142 bio->bi_sector = physical >> 9;
5145 struct rcu_string *name;
5148 name = rcu_dereference(dev->name);
5149 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5150 "(%s id %llu), size=%u\n", rw,
5151 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5152 name->str, dev->devid, bio->bi_size);
5156 bio->bi_bdev = dev->bdev;
5158 btrfs_schedule_bio(root, dev, rw, bio);
5160 btrfsic_submit_bio(rw, bio);
5163 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5164 struct bio *first_bio, struct btrfs_device *dev,
5165 int dev_nr, int rw, int async)
5167 struct bio_vec *bvec = first_bio->bi_io_vec;
5169 int nr_vecs = bio_get_nr_vecs(dev->bdev);
5170 u64 physical = bbio->stripes[dev_nr].physical;
5173 bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5177 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5178 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5179 bvec->bv_offset) < bvec->bv_len) {
5180 u64 len = bio->bi_size;
5182 atomic_inc(&bbio->stripes_pending);
5183 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5191 submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5195 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5197 atomic_inc(&bbio->error);
5198 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5199 bio->bi_private = bbio->private;
5200 bio->bi_end_io = bbio->end_io;
5201 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5202 bio->bi_sector = logical >> 9;
5204 bio_endio(bio, -EIO);
5208 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5209 int mirror_num, int async_submit)
5211 struct btrfs_device *dev;
5212 struct bio *first_bio = bio;
5213 u64 logical = (u64)bio->bi_sector << 9;
5216 u64 *raid_map = NULL;
5220 struct btrfs_bio *bbio = NULL;
5222 length = bio->bi_size;
5223 map_length = length;
5225 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5226 mirror_num, &raid_map);
5227 if (ret) /* -ENOMEM */
5230 total_devs = bbio->num_stripes;
5231 bbio->orig_bio = first_bio;
5232 bbio->private = first_bio->bi_private;
5233 bbio->end_io = first_bio->bi_end_io;
5234 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5237 /* In this case, map_length has been set to the length of
5238 a single stripe; not the whole write */
5240 return raid56_parity_write(root, bio, bbio,
5241 raid_map, map_length);
5243 return raid56_parity_recover(root, bio, bbio,
5244 raid_map, map_length,
5249 if (map_length < length) {
5250 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5251 (unsigned long long)logical,
5252 (unsigned long long)length,
5253 (unsigned long long)map_length);
5257 while (dev_nr < total_devs) {
5258 dev = bbio->stripes[dev_nr].dev;
5259 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5260 bbio_error(bbio, first_bio, logical);
5266 * Check and see if we're ok with this bio based on it's size
5267 * and offset with the given device.
5269 if (!bio_size_ok(dev->bdev, first_bio,
5270 bbio->stripes[dev_nr].physical >> 9)) {
5271 ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5272 dev_nr, rw, async_submit);
5278 if (dev_nr < total_devs - 1) {
5279 bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5280 BUG_ON(!bio); /* -ENOMEM */
5285 submit_stripe_bio(root, bbio, bio,
5286 bbio->stripes[dev_nr].physical, dev_nr, rw,
5293 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5296 struct btrfs_device *device;
5297 struct btrfs_fs_devices *cur_devices;
5299 cur_devices = fs_info->fs_devices;
5300 while (cur_devices) {
5302 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5303 device = __find_device(&cur_devices->devices,
5308 cur_devices = cur_devices->seed;
5313 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5314 u64 devid, u8 *dev_uuid)
5316 struct btrfs_device *device;
5317 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5319 device = kzalloc(sizeof(*device), GFP_NOFS);
5322 list_add(&device->dev_list,
5323 &fs_devices->devices);
5324 device->devid = devid;
5325 device->work.func = pending_bios_fn;
5326 device->fs_devices = fs_devices;
5327 device->missing = 1;
5328 fs_devices->num_devices++;
5329 fs_devices->missing_devices++;
5330 spin_lock_init(&device->io_lock);
5331 INIT_LIST_HEAD(&device->dev_alloc_list);
5332 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
5336 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5337 struct extent_buffer *leaf,
5338 struct btrfs_chunk *chunk)
5340 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5341 struct map_lookup *map;
5342 struct extent_map *em;
5346 u8 uuid[BTRFS_UUID_SIZE];
5351 logical = key->offset;
5352 length = btrfs_chunk_length(leaf, chunk);
5354 read_lock(&map_tree->map_tree.lock);
5355 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5356 read_unlock(&map_tree->map_tree.lock);
5358 /* already mapped? */
5359 if (em && em->start <= logical && em->start + em->len > logical) {
5360 free_extent_map(em);
5363 free_extent_map(em);
5366 em = alloc_extent_map();
5369 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5370 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5372 free_extent_map(em);
5376 em->bdev = (struct block_device *)map;
5377 em->start = logical;
5380 em->block_start = 0;
5381 em->block_len = em->len;
5383 map->num_stripes = num_stripes;
5384 map->io_width = btrfs_chunk_io_width(leaf, chunk);
5385 map->io_align = btrfs_chunk_io_align(leaf, chunk);
5386 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5387 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5388 map->type = btrfs_chunk_type(leaf, chunk);
5389 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5390 for (i = 0; i < num_stripes; i++) {
5391 map->stripes[i].physical =
5392 btrfs_stripe_offset_nr(leaf, chunk, i);
5393 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5394 read_extent_buffer(leaf, uuid, (unsigned long)
5395 btrfs_stripe_dev_uuid_nr(chunk, i),
5397 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5399 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5401 free_extent_map(em);
5404 if (!map->stripes[i].dev) {
5405 map->stripes[i].dev =
5406 add_missing_dev(root, devid, uuid);
5407 if (!map->stripes[i].dev) {
5409 free_extent_map(em);
5413 map->stripes[i].dev->in_fs_metadata = 1;
5416 write_lock(&map_tree->map_tree.lock);
5417 ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5418 write_unlock(&map_tree->map_tree.lock);
5419 BUG_ON(ret); /* Tree corruption */
5420 free_extent_map(em);
5425 static void fill_device_from_item(struct extent_buffer *leaf,
5426 struct btrfs_dev_item *dev_item,
5427 struct btrfs_device *device)
5431 device->devid = btrfs_device_id(leaf, dev_item);
5432 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5433 device->total_bytes = device->disk_total_bytes;
5434 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5435 device->type = btrfs_device_type(leaf, dev_item);
5436 device->io_align = btrfs_device_io_align(leaf, dev_item);
5437 device->io_width = btrfs_device_io_width(leaf, dev_item);
5438 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5439 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5440 device->is_tgtdev_for_dev_replace = 0;
5442 ptr = (unsigned long)btrfs_device_uuid(dev_item);
5443 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5446 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5448 struct btrfs_fs_devices *fs_devices;
5451 BUG_ON(!mutex_is_locked(&uuid_mutex));
5453 fs_devices = root->fs_info->fs_devices->seed;
5454 while (fs_devices) {
5455 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5459 fs_devices = fs_devices->seed;
5462 fs_devices = find_fsid(fsid);
5468 fs_devices = clone_fs_devices(fs_devices);
5469 if (IS_ERR(fs_devices)) {
5470 ret = PTR_ERR(fs_devices);
5474 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5475 root->fs_info->bdev_holder);
5477 free_fs_devices(fs_devices);
5481 if (!fs_devices->seeding) {
5482 __btrfs_close_devices(fs_devices);
5483 free_fs_devices(fs_devices);
5488 fs_devices->seed = root->fs_info->fs_devices->seed;
5489 root->fs_info->fs_devices->seed = fs_devices;
5494 static int read_one_dev(struct btrfs_root *root,
5495 struct extent_buffer *leaf,
5496 struct btrfs_dev_item *dev_item)
5498 struct btrfs_device *device;
5501 u8 fs_uuid[BTRFS_UUID_SIZE];
5502 u8 dev_uuid[BTRFS_UUID_SIZE];
5504 devid = btrfs_device_id(leaf, dev_item);
5505 read_extent_buffer(leaf, dev_uuid,
5506 (unsigned long)btrfs_device_uuid(dev_item),
5508 read_extent_buffer(leaf, fs_uuid,
5509 (unsigned long)btrfs_device_fsid(dev_item),
5512 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5513 ret = open_seed_devices(root, fs_uuid);
5514 if (ret && !btrfs_test_opt(root, DEGRADED))
5518 device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5519 if (!device || !device->bdev) {
5520 if (!btrfs_test_opt(root, DEGRADED))
5524 btrfs_warn(root->fs_info, "devid %llu missing",
5525 (unsigned long long)devid);
5526 device = add_missing_dev(root, devid, dev_uuid);
5529 } else if (!device->missing) {
5531 * this happens when a device that was properly setup
5532 * in the device info lists suddenly goes bad.
5533 * device->bdev is NULL, and so we have to set
5534 * device->missing to one here
5536 root->fs_info->fs_devices->missing_devices++;
5537 device->missing = 1;
5541 if (device->fs_devices != root->fs_info->fs_devices) {
5542 BUG_ON(device->writeable);
5543 if (device->generation !=
5544 btrfs_device_generation(leaf, dev_item))
5548 fill_device_from_item(leaf, dev_item, device);
5549 device->in_fs_metadata = 1;
5550 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5551 device->fs_devices->total_rw_bytes += device->total_bytes;
5552 spin_lock(&root->fs_info->free_chunk_lock);
5553 root->fs_info->free_chunk_space += device->total_bytes -
5555 spin_unlock(&root->fs_info->free_chunk_lock);
5561 int btrfs_read_sys_array(struct btrfs_root *root)
5563 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5564 struct extent_buffer *sb;
5565 struct btrfs_disk_key *disk_key;
5566 struct btrfs_chunk *chunk;
5568 unsigned long sb_ptr;
5574 struct btrfs_key key;
5576 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5577 BTRFS_SUPER_INFO_SIZE);
5580 btrfs_set_buffer_uptodate(sb);
5581 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5583 * The sb extent buffer is artifical and just used to read the system array.
5584 * btrfs_set_buffer_uptodate() call does not properly mark all it's
5585 * pages up-to-date when the page is larger: extent does not cover the
5586 * whole page and consequently check_page_uptodate does not find all
5587 * the page's extents up-to-date (the hole beyond sb),
5588 * write_extent_buffer then triggers a WARN_ON.
5590 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5591 * but sb spans only this function. Add an explicit SetPageUptodate call
5592 * to silence the warning eg. on PowerPC 64.
5594 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5595 SetPageUptodate(sb->pages[0]);
5597 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5598 array_size = btrfs_super_sys_array_size(super_copy);
5600 ptr = super_copy->sys_chunk_array;
5601 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5604 while (cur < array_size) {
5605 disk_key = (struct btrfs_disk_key *)ptr;
5606 btrfs_disk_key_to_cpu(&key, disk_key);
5608 len = sizeof(*disk_key); ptr += len;
5612 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5613 chunk = (struct btrfs_chunk *)sb_ptr;
5614 ret = read_one_chunk(root, &key, sb, chunk);
5617 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5618 len = btrfs_chunk_item_size(num_stripes);
5627 free_extent_buffer(sb);
5631 int btrfs_read_chunk_tree(struct btrfs_root *root)
5633 struct btrfs_path *path;
5634 struct extent_buffer *leaf;
5635 struct btrfs_key key;
5636 struct btrfs_key found_key;
5640 root = root->fs_info->chunk_root;
5642 path = btrfs_alloc_path();
5646 mutex_lock(&uuid_mutex);
5649 /* first we search for all of the device items, and then we
5650 * read in all of the chunk items. This way we can create chunk
5651 * mappings that reference all of the devices that are afound
5653 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5657 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5661 leaf = path->nodes[0];
5662 slot = path->slots[0];
5663 if (slot >= btrfs_header_nritems(leaf)) {
5664 ret = btrfs_next_leaf(root, path);
5671 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5672 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5673 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
5675 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5676 struct btrfs_dev_item *dev_item;
5677 dev_item = btrfs_item_ptr(leaf, slot,
5678 struct btrfs_dev_item);
5679 ret = read_one_dev(root, leaf, dev_item);
5683 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
5684 struct btrfs_chunk *chunk;
5685 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
5686 ret = read_one_chunk(root, &found_key, leaf, chunk);
5692 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
5694 btrfs_release_path(path);
5699 unlock_chunks(root);
5700 mutex_unlock(&uuid_mutex);
5702 btrfs_free_path(path);
5706 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
5708 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5709 struct btrfs_device *device;
5711 mutex_lock(&fs_devices->device_list_mutex);
5712 list_for_each_entry(device, &fs_devices->devices, dev_list)
5713 device->dev_root = fs_info->dev_root;
5714 mutex_unlock(&fs_devices->device_list_mutex);
5717 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
5721 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5722 btrfs_dev_stat_reset(dev, i);
5725 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
5727 struct btrfs_key key;
5728 struct btrfs_key found_key;
5729 struct btrfs_root *dev_root = fs_info->dev_root;
5730 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5731 struct extent_buffer *eb;
5734 struct btrfs_device *device;
5735 struct btrfs_path *path = NULL;
5738 path = btrfs_alloc_path();
5744 mutex_lock(&fs_devices->device_list_mutex);
5745 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5747 struct btrfs_dev_stats_item *ptr;
5750 key.type = BTRFS_DEV_STATS_KEY;
5751 key.offset = device->devid;
5752 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
5754 __btrfs_reset_dev_stats(device);
5755 device->dev_stats_valid = 1;
5756 btrfs_release_path(path);
5759 slot = path->slots[0];
5760 eb = path->nodes[0];
5761 btrfs_item_key_to_cpu(eb, &found_key, slot);
5762 item_size = btrfs_item_size_nr(eb, slot);
5764 ptr = btrfs_item_ptr(eb, slot,
5765 struct btrfs_dev_stats_item);
5767 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5768 if (item_size >= (1 + i) * sizeof(__le64))
5769 btrfs_dev_stat_set(device, i,
5770 btrfs_dev_stats_value(eb, ptr, i));
5772 btrfs_dev_stat_reset(device, i);
5775 device->dev_stats_valid = 1;
5776 btrfs_dev_stat_print_on_load(device);
5777 btrfs_release_path(path);
5779 mutex_unlock(&fs_devices->device_list_mutex);
5782 btrfs_free_path(path);
5783 return ret < 0 ? ret : 0;
5786 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
5787 struct btrfs_root *dev_root,
5788 struct btrfs_device *device)
5790 struct btrfs_path *path;
5791 struct btrfs_key key;
5792 struct extent_buffer *eb;
5793 struct btrfs_dev_stats_item *ptr;
5798 key.type = BTRFS_DEV_STATS_KEY;
5799 key.offset = device->devid;
5801 path = btrfs_alloc_path();
5803 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
5805 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
5806 ret, rcu_str_deref(device->name));
5811 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
5812 /* need to delete old one and insert a new one */
5813 ret = btrfs_del_item(trans, dev_root, path);
5815 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
5816 rcu_str_deref(device->name), ret);
5823 /* need to insert a new item */
5824 btrfs_release_path(path);
5825 ret = btrfs_insert_empty_item(trans, dev_root, path,
5826 &key, sizeof(*ptr));
5828 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
5829 rcu_str_deref(device->name), ret);
5834 eb = path->nodes[0];
5835 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
5836 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5837 btrfs_set_dev_stats_value(eb, ptr, i,
5838 btrfs_dev_stat_read(device, i));
5839 btrfs_mark_buffer_dirty(eb);
5842 btrfs_free_path(path);
5847 * called from commit_transaction. Writes all changed device stats to disk.
5849 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
5850 struct btrfs_fs_info *fs_info)
5852 struct btrfs_root *dev_root = fs_info->dev_root;
5853 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
5854 struct btrfs_device *device;
5857 mutex_lock(&fs_devices->device_list_mutex);
5858 list_for_each_entry(device, &fs_devices->devices, dev_list) {
5859 if (!device->dev_stats_valid || !device->dev_stats_dirty)
5862 ret = update_dev_stat_item(trans, dev_root, device);
5864 device->dev_stats_dirty = 0;
5866 mutex_unlock(&fs_devices->device_list_mutex);
5871 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
5873 btrfs_dev_stat_inc(dev, index);
5874 btrfs_dev_stat_print_on_error(dev);
5877 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
5879 if (!dev->dev_stats_valid)
5881 printk_ratelimited_in_rcu(KERN_ERR
5882 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5883 rcu_str_deref(dev->name),
5884 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5885 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5886 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5887 btrfs_dev_stat_read(dev,
5888 BTRFS_DEV_STAT_CORRUPTION_ERRS),
5889 btrfs_dev_stat_read(dev,
5890 BTRFS_DEV_STAT_GENERATION_ERRS));
5893 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
5897 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5898 if (btrfs_dev_stat_read(dev, i) != 0)
5900 if (i == BTRFS_DEV_STAT_VALUES_MAX)
5901 return; /* all values == 0, suppress message */
5903 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
5904 rcu_str_deref(dev->name),
5905 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
5906 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
5907 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
5908 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
5909 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
5912 int btrfs_get_dev_stats(struct btrfs_root *root,
5913 struct btrfs_ioctl_get_dev_stats *stats)
5915 struct btrfs_device *dev;
5916 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5919 mutex_lock(&fs_devices->device_list_mutex);
5920 dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
5921 mutex_unlock(&fs_devices->device_list_mutex);
5925 "btrfs: get dev_stats failed, device not found\n");
5927 } else if (!dev->dev_stats_valid) {
5929 "btrfs: get dev_stats failed, not yet valid\n");
5931 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
5932 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
5933 if (stats->nr_items > i)
5935 btrfs_dev_stat_read_and_reset(dev, i);
5937 btrfs_dev_stat_reset(dev, i);
5940 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
5941 if (stats->nr_items > i)
5942 stats->values[i] = btrfs_dev_stat_read(dev, i);
5944 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
5945 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
5949 int btrfs_scratch_superblock(struct btrfs_device *device)
5951 struct buffer_head *bh;
5952 struct btrfs_super_block *disk_super;
5954 bh = btrfs_read_dev_super(device->bdev);
5957 disk_super = (struct btrfs_super_block *)bh->b_data;
5959 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
5960 set_buffer_dirty(bh);
5961 sync_dirty_buffer(bh);