2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
25 #include "extent_map.h"
27 #include "transaction.h"
28 #include "print-tree.h"
39 struct btrfs_bio_stripe stripes[];
42 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
43 (sizeof(struct btrfs_bio_stripe) * (n)))
45 static DEFINE_MUTEX(uuid_mutex);
46 static LIST_HEAD(fs_uuids);
48 void btrfs_lock_volumes(void)
50 mutex_lock(&uuid_mutex);
53 void btrfs_unlock_volumes(void)
55 mutex_unlock(&uuid_mutex);
58 int btrfs_cleanup_fs_uuids(void)
60 struct btrfs_fs_devices *fs_devices;
61 struct list_head *uuid_cur;
62 struct list_head *devices_cur;
63 struct btrfs_device *dev;
65 list_for_each(uuid_cur, &fs_uuids) {
66 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
68 while(!list_empty(&fs_devices->devices)) {
69 devices_cur = fs_devices->devices.next;
70 dev = list_entry(devices_cur, struct btrfs_device,
73 close_bdev_excl(dev->bdev);
74 fs_devices->open_devices--;
76 list_del(&dev->dev_list);
84 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
87 struct btrfs_device *dev;
88 struct list_head *cur;
90 list_for_each(cur, head) {
91 dev = list_entry(cur, struct btrfs_device, dev_list);
92 if (dev->devid == devid &&
93 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
100 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
102 struct list_head *cur;
103 struct btrfs_fs_devices *fs_devices;
105 list_for_each(cur, &fs_uuids) {
106 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
107 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
113 static int device_list_add(const char *path,
114 struct btrfs_super_block *disk_super,
115 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
117 struct btrfs_device *device;
118 struct btrfs_fs_devices *fs_devices;
119 u64 found_transid = btrfs_super_generation(disk_super);
121 fs_devices = find_fsid(disk_super->fsid);
123 fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
126 INIT_LIST_HEAD(&fs_devices->devices);
127 INIT_LIST_HEAD(&fs_devices->alloc_list);
128 list_add(&fs_devices->list, &fs_uuids);
129 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
130 fs_devices->latest_devid = devid;
131 fs_devices->latest_trans = found_transid;
132 fs_devices->num_devices = 0;
135 device = __find_device(&fs_devices->devices, devid,
136 disk_super->dev_item.uuid);
139 device = kzalloc(sizeof(*device), GFP_NOFS);
141 /* we can safely leave the fs_devices entry around */
144 device->devid = devid;
145 memcpy(device->uuid, disk_super->dev_item.uuid,
147 device->barriers = 1;
148 spin_lock_init(&device->io_lock);
149 device->name = kstrdup(path, GFP_NOFS);
154 list_add(&device->dev_list, &fs_devices->devices);
155 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
156 fs_devices->num_devices++;
159 if (found_transid > fs_devices->latest_trans) {
160 fs_devices->latest_devid = devid;
161 fs_devices->latest_trans = found_transid;
163 *fs_devices_ret = fs_devices;
167 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
169 struct list_head *head = &fs_devices->devices;
170 struct list_head *cur;
171 struct btrfs_device *device;
173 mutex_lock(&uuid_mutex);
175 list_for_each(cur, head) {
176 device = list_entry(cur, struct btrfs_device, dev_list);
177 if (!device->in_fs_metadata) {
179 close_bdev_excl(device->bdev);
180 fs_devices->open_devices--;
182 list_del(&device->dev_list);
183 list_del(&device->dev_alloc_list);
184 fs_devices->num_devices--;
190 mutex_unlock(&uuid_mutex);
194 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
196 struct list_head *head = &fs_devices->devices;
197 struct list_head *cur;
198 struct btrfs_device *device;
200 mutex_lock(&uuid_mutex);
201 list_for_each(cur, head) {
202 device = list_entry(cur, struct btrfs_device, dev_list);
204 close_bdev_excl(device->bdev);
205 fs_devices->open_devices--;
208 device->in_fs_metadata = 0;
210 fs_devices->mounted = 0;
211 mutex_unlock(&uuid_mutex);
215 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
216 int flags, void *holder)
218 struct block_device *bdev;
219 struct list_head *head = &fs_devices->devices;
220 struct list_head *cur;
221 struct btrfs_device *device;
222 struct block_device *latest_bdev = NULL;
223 struct buffer_head *bh;
224 struct btrfs_super_block *disk_super;
225 u64 latest_devid = 0;
226 u64 latest_transid = 0;
231 mutex_lock(&uuid_mutex);
232 if (fs_devices->mounted)
235 list_for_each(cur, head) {
236 device = list_entry(cur, struct btrfs_device, dev_list);
243 bdev = open_bdev_excl(device->name, flags, holder);
246 printk("open %s failed\n", device->name);
249 set_blocksize(bdev, 4096);
251 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
255 disk_super = (struct btrfs_super_block *)bh->b_data;
256 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
257 sizeof(disk_super->magic)))
260 devid = le64_to_cpu(disk_super->dev_item.devid);
261 if (devid != device->devid)
264 transid = btrfs_super_generation(disk_super);
265 if (transid > latest_transid) {
266 latest_devid = devid;
267 latest_transid = transid;
272 device->in_fs_metadata = 0;
273 fs_devices->open_devices++;
279 close_bdev_excl(bdev);
283 if (fs_devices->open_devices == 0) {
287 fs_devices->mounted = 1;
288 fs_devices->latest_bdev = latest_bdev;
289 fs_devices->latest_devid = latest_devid;
290 fs_devices->latest_trans = latest_transid;
292 mutex_unlock(&uuid_mutex);
296 int btrfs_scan_one_device(const char *path, int flags, void *holder,
297 struct btrfs_fs_devices **fs_devices_ret)
299 struct btrfs_super_block *disk_super;
300 struct block_device *bdev;
301 struct buffer_head *bh;
306 mutex_lock(&uuid_mutex);
308 bdev = open_bdev_excl(path, flags, holder);
315 ret = set_blocksize(bdev, 4096);
318 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
323 disk_super = (struct btrfs_super_block *)bh->b_data;
324 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
325 sizeof(disk_super->magic))) {
329 devid = le64_to_cpu(disk_super->dev_item.devid);
330 transid = btrfs_super_generation(disk_super);
331 if (disk_super->label[0])
332 printk("device label %s ", disk_super->label);
334 /* FIXME, make a readl uuid parser */
335 printk("device fsid %llx-%llx ",
336 *(unsigned long long *)disk_super->fsid,
337 *(unsigned long long *)(disk_super->fsid + 8));
339 printk("devid %Lu transid %Lu %s\n", devid, transid, path);
340 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
345 close_bdev_excl(bdev);
347 mutex_unlock(&uuid_mutex);
352 * this uses a pretty simple search, the expectation is that it is
353 * called very infrequently and that a given device has a small number
356 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
357 struct btrfs_device *device,
358 struct btrfs_path *path,
359 u64 num_bytes, u64 *start)
361 struct btrfs_key key;
362 struct btrfs_root *root = device->dev_root;
363 struct btrfs_dev_extent *dev_extent = NULL;
366 u64 search_start = 0;
367 u64 search_end = device->total_bytes;
371 struct extent_buffer *l;
376 /* FIXME use last free of some kind */
378 /* we don't want to overwrite the superblock on the drive,
379 * so we make sure to start at an offset of at least 1MB
381 search_start = max((u64)1024 * 1024, search_start);
383 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
384 search_start = max(root->fs_info->alloc_start, search_start);
386 key.objectid = device->devid;
387 key.offset = search_start;
388 key.type = BTRFS_DEV_EXTENT_KEY;
389 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
392 ret = btrfs_previous_item(root, path, 0, key.type);
396 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
399 slot = path->slots[0];
400 if (slot >= btrfs_header_nritems(l)) {
401 ret = btrfs_next_leaf(root, path);
408 if (search_start >= search_end) {
412 *start = search_start;
416 *start = last_byte > search_start ?
417 last_byte : search_start;
418 if (search_end <= *start) {
424 btrfs_item_key_to_cpu(l, &key, slot);
426 if (key.objectid < device->devid)
429 if (key.objectid > device->devid)
432 if (key.offset >= search_start && key.offset > last_byte &&
434 if (last_byte < search_start)
435 last_byte = search_start;
436 hole_size = key.offset - last_byte;
437 if (key.offset > last_byte &&
438 hole_size >= num_bytes) {
443 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
448 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
449 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
455 /* we have to make sure we didn't find an extent that has already
456 * been allocated by the map tree or the original allocation
458 btrfs_release_path(root, path);
459 BUG_ON(*start < search_start);
461 if (*start + num_bytes > search_end) {
465 /* check for pending inserts here */
469 btrfs_release_path(root, path);
473 int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
474 struct btrfs_device *device,
478 struct btrfs_path *path;
479 struct btrfs_root *root = device->dev_root;
480 struct btrfs_key key;
481 struct btrfs_key found_key;
482 struct extent_buffer *leaf = NULL;
483 struct btrfs_dev_extent *extent = NULL;
485 path = btrfs_alloc_path();
489 key.objectid = device->devid;
491 key.type = BTRFS_DEV_EXTENT_KEY;
493 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
495 ret = btrfs_previous_item(root, path, key.objectid,
496 BTRFS_DEV_EXTENT_KEY);
498 leaf = path->nodes[0];
499 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
500 extent = btrfs_item_ptr(leaf, path->slots[0],
501 struct btrfs_dev_extent);
502 BUG_ON(found_key.offset > start || found_key.offset +
503 btrfs_dev_extent_length(leaf, extent) < start);
505 } else if (ret == 0) {
506 leaf = path->nodes[0];
507 extent = btrfs_item_ptr(leaf, path->slots[0],
508 struct btrfs_dev_extent);
512 if (device->bytes_used > 0)
513 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
514 ret = btrfs_del_item(trans, root, path);
517 btrfs_free_path(path);
521 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
522 struct btrfs_device *device,
523 u64 chunk_tree, u64 chunk_objectid,
525 u64 num_bytes, u64 *start)
528 struct btrfs_path *path;
529 struct btrfs_root *root = device->dev_root;
530 struct btrfs_dev_extent *extent;
531 struct extent_buffer *leaf;
532 struct btrfs_key key;
534 WARN_ON(!device->in_fs_metadata);
535 path = btrfs_alloc_path();
539 ret = find_free_dev_extent(trans, device, path, num_bytes, start);
544 key.objectid = device->devid;
546 key.type = BTRFS_DEV_EXTENT_KEY;
547 ret = btrfs_insert_empty_item(trans, root, path, &key,
551 leaf = path->nodes[0];
552 extent = btrfs_item_ptr(leaf, path->slots[0],
553 struct btrfs_dev_extent);
554 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
555 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
556 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
558 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
559 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
562 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
563 btrfs_mark_buffer_dirty(leaf);
565 btrfs_free_path(path);
569 static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
571 struct btrfs_path *path;
573 struct btrfs_key key;
574 struct btrfs_chunk *chunk;
575 struct btrfs_key found_key;
577 path = btrfs_alloc_path();
580 key.objectid = objectid;
581 key.offset = (u64)-1;
582 key.type = BTRFS_CHUNK_ITEM_KEY;
584 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
590 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
594 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
596 if (found_key.objectid != objectid)
599 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
601 *offset = found_key.offset +
602 btrfs_chunk_length(path->nodes[0], chunk);
607 btrfs_free_path(path);
611 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
615 struct btrfs_key key;
616 struct btrfs_key found_key;
618 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
619 key.type = BTRFS_DEV_ITEM_KEY;
620 key.offset = (u64)-1;
622 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
628 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
633 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
635 *objectid = found_key.offset + 1;
639 btrfs_release_path(root, path);
644 * the device information is stored in the chunk root
645 * the btrfs_device struct should be fully filled in
647 int btrfs_add_device(struct btrfs_trans_handle *trans,
648 struct btrfs_root *root,
649 struct btrfs_device *device)
652 struct btrfs_path *path;
653 struct btrfs_dev_item *dev_item;
654 struct extent_buffer *leaf;
655 struct btrfs_key key;
659 root = root->fs_info->chunk_root;
661 path = btrfs_alloc_path();
665 ret = find_next_devid(root, path, &free_devid);
669 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
670 key.type = BTRFS_DEV_ITEM_KEY;
671 key.offset = free_devid;
673 ret = btrfs_insert_empty_item(trans, root, path, &key,
678 leaf = path->nodes[0];
679 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
681 device->devid = free_devid;
682 btrfs_set_device_id(leaf, dev_item, device->devid);
683 btrfs_set_device_type(leaf, dev_item, device->type);
684 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
685 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
686 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
687 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
688 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
689 btrfs_set_device_group(leaf, dev_item, 0);
690 btrfs_set_device_seek_speed(leaf, dev_item, 0);
691 btrfs_set_device_bandwidth(leaf, dev_item, 0);
693 ptr = (unsigned long)btrfs_device_uuid(dev_item);
694 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
695 btrfs_mark_buffer_dirty(leaf);
699 btrfs_free_path(path);
703 static int btrfs_rm_dev_item(struct btrfs_root *root,
704 struct btrfs_device *device)
707 struct btrfs_path *path;
708 struct block_device *bdev = device->bdev;
709 struct btrfs_device *next_dev;
710 struct btrfs_key key;
712 struct btrfs_fs_devices *fs_devices;
713 struct btrfs_trans_handle *trans;
715 root = root->fs_info->chunk_root;
717 path = btrfs_alloc_path();
721 trans = btrfs_start_transaction(root, 1);
722 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
723 key.type = BTRFS_DEV_ITEM_KEY;
724 key.offset = device->devid;
726 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
735 ret = btrfs_del_item(trans, root, path);
740 * at this point, the device is zero sized. We want to
741 * remove it from the devices list and zero out the old super
743 list_del_init(&device->dev_list);
744 list_del_init(&device->dev_alloc_list);
745 fs_devices = root->fs_info->fs_devices;
747 next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
749 if (bdev == root->fs_info->sb->s_bdev)
750 root->fs_info->sb->s_bdev = next_dev->bdev;
751 if (bdev == fs_devices->latest_bdev)
752 fs_devices->latest_bdev = next_dev->bdev;
754 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
755 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
756 total_bytes - device->total_bytes);
758 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
759 btrfs_set_super_num_devices(&root->fs_info->super_copy,
762 btrfs_free_path(path);
763 btrfs_commit_transaction(trans, root);
767 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
769 struct btrfs_device *device;
770 struct block_device *bdev;
771 struct buffer_head *bh = NULL;
772 struct btrfs_super_block *disk_super;
777 mutex_lock(&root->fs_info->fs_mutex);
778 mutex_lock(&uuid_mutex);
780 all_avail = root->fs_info->avail_data_alloc_bits |
781 root->fs_info->avail_system_alloc_bits |
782 root->fs_info->avail_metadata_alloc_bits;
784 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
785 btrfs_super_num_devices(&root->fs_info->super_copy) <= 4) {
786 printk("btrfs: unable to go below four devices on raid10\n");
791 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
792 btrfs_super_num_devices(&root->fs_info->super_copy) <= 2) {
793 printk("btrfs: unable to go below two devices on raid1\n");
798 if (strcmp(device_path, "missing") == 0) {
799 struct list_head *cur;
800 struct list_head *devices;
801 struct btrfs_device *tmp;
804 devices = &root->fs_info->fs_devices->devices;
805 list_for_each(cur, devices) {
806 tmp = list_entry(cur, struct btrfs_device, dev_list);
807 if (tmp->in_fs_metadata && !tmp->bdev) {
816 printk("btrfs: no missing devices found to remove\n");
821 bdev = open_bdev_excl(device_path, 0,
822 root->fs_info->bdev_holder);
828 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
833 disk_super = (struct btrfs_super_block *)bh->b_data;
834 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
835 sizeof(disk_super->magic))) {
839 if (memcmp(disk_super->fsid, root->fs_info->fsid,
844 devid = le64_to_cpu(disk_super->dev_item.devid);
845 device = btrfs_find_device(root, devid, NULL);
852 root->fs_info->fs_devices->num_devices--;
854 ret = btrfs_shrink_device(device, 0);
859 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
864 /* make sure this device isn't detected as part of
867 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
868 set_buffer_dirty(bh);
869 sync_dirty_buffer(bh);
875 /* one close for the device struct or super_block */
876 close_bdev_excl(device->bdev);
877 root->fs_info->fs_devices->open_devices--;
880 /* one close for us */
881 close_bdev_excl(bdev);
892 close_bdev_excl(bdev);
894 mutex_unlock(&uuid_mutex);
895 mutex_unlock(&root->fs_info->fs_mutex);
899 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
901 struct btrfs_trans_handle *trans;
902 struct btrfs_device *device;
903 struct block_device *bdev;
904 struct list_head *cur;
905 struct list_head *devices;
910 bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
914 mutex_lock(&root->fs_info->fs_mutex);
915 trans = btrfs_start_transaction(root, 1);
916 devices = &root->fs_info->fs_devices->devices;
917 list_for_each(cur, devices) {
918 device = list_entry(cur, struct btrfs_device, dev_list);
919 if (device->bdev == bdev) {
925 device = kzalloc(sizeof(*device), GFP_NOFS);
927 /* we can safely leave the fs_devices entry around */
932 device->barriers = 1;
933 generate_random_uuid(device->uuid);
934 spin_lock_init(&device->io_lock);
935 device->name = kstrdup(device_path, GFP_NOFS);
940 device->io_width = root->sectorsize;
941 device->io_align = root->sectorsize;
942 device->sector_size = root->sectorsize;
943 device->total_bytes = i_size_read(bdev->bd_inode);
944 device->dev_root = root->fs_info->dev_root;
946 device->in_fs_metadata = 1;
948 ret = btrfs_add_device(trans, root, device);
952 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
953 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
954 total_bytes + device->total_bytes);
956 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
957 btrfs_set_super_num_devices(&root->fs_info->super_copy,
960 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
961 list_add(&device->dev_alloc_list,
962 &root->fs_info->fs_devices->alloc_list);
963 root->fs_info->fs_devices->num_devices++;
964 root->fs_info->fs_devices->open_devices++;
966 btrfs_end_transaction(trans, root);
967 mutex_unlock(&root->fs_info->fs_mutex);
971 close_bdev_excl(bdev);
975 int btrfs_update_device(struct btrfs_trans_handle *trans,
976 struct btrfs_device *device)
979 struct btrfs_path *path;
980 struct btrfs_root *root;
981 struct btrfs_dev_item *dev_item;
982 struct extent_buffer *leaf;
983 struct btrfs_key key;
985 root = device->dev_root->fs_info->chunk_root;
987 path = btrfs_alloc_path();
991 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
992 key.type = BTRFS_DEV_ITEM_KEY;
993 key.offset = device->devid;
995 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1004 leaf = path->nodes[0];
1005 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1007 btrfs_set_device_id(leaf, dev_item, device->devid);
1008 btrfs_set_device_type(leaf, dev_item, device->type);
1009 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1010 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1011 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1012 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1013 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1014 btrfs_mark_buffer_dirty(leaf);
1017 btrfs_free_path(path);
1021 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1022 struct btrfs_device *device, u64 new_size)
1024 struct btrfs_super_block *super_copy =
1025 &device->dev_root->fs_info->super_copy;
1026 u64 old_total = btrfs_super_total_bytes(super_copy);
1027 u64 diff = new_size - device->total_bytes;
1029 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1030 return btrfs_update_device(trans, device);
1033 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1034 struct btrfs_root *root,
1035 u64 chunk_tree, u64 chunk_objectid,
1039 struct btrfs_path *path;
1040 struct btrfs_key key;
1042 root = root->fs_info->chunk_root;
1043 path = btrfs_alloc_path();
1047 key.objectid = chunk_objectid;
1048 key.offset = chunk_offset;
1049 key.type = BTRFS_CHUNK_ITEM_KEY;
1051 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1054 ret = btrfs_del_item(trans, root, path);
1057 btrfs_free_path(path);
1061 int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1064 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1065 struct btrfs_disk_key *disk_key;
1066 struct btrfs_chunk *chunk;
1073 struct btrfs_key key;
1075 array_size = btrfs_super_sys_array_size(super_copy);
1077 ptr = super_copy->sys_chunk_array;
1080 while (cur < array_size) {
1081 disk_key = (struct btrfs_disk_key *)ptr;
1082 btrfs_disk_key_to_cpu(&key, disk_key);
1084 len = sizeof(*disk_key);
1086 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1087 chunk = (struct btrfs_chunk *)(ptr + len);
1088 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1089 len += btrfs_chunk_item_size(num_stripes);
1094 if (key.objectid == chunk_objectid &&
1095 key.offset == chunk_offset) {
1096 memmove(ptr, ptr + len, array_size - (cur + len));
1098 btrfs_set_super_sys_array_size(super_copy, array_size);
1108 int btrfs_relocate_chunk(struct btrfs_root *root,
1109 u64 chunk_tree, u64 chunk_objectid,
1112 struct extent_map_tree *em_tree;
1113 struct btrfs_root *extent_root;
1114 struct btrfs_trans_handle *trans;
1115 struct extent_map *em;
1116 struct map_lookup *map;
1120 printk("btrfs relocating chunk %llu\n",
1121 (unsigned long long)chunk_offset);
1122 root = root->fs_info->chunk_root;
1123 extent_root = root->fs_info->extent_root;
1124 em_tree = &root->fs_info->mapping_tree.map_tree;
1126 /* step one, relocate all the extents inside this chunk */
1127 ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
1130 trans = btrfs_start_transaction(root, 1);
1134 * step two, delete the device extents and the
1135 * chunk tree entries
1137 spin_lock(&em_tree->lock);
1138 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1139 spin_unlock(&em_tree->lock);
1141 BUG_ON(em->start > chunk_offset ||
1142 em->start + em->len < chunk_offset);
1143 map = (struct map_lookup *)em->bdev;
1145 for (i = 0; i < map->num_stripes; i++) {
1146 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1147 map->stripes[i].physical);
1150 if (map->stripes[i].dev) {
1151 ret = btrfs_update_device(trans, map->stripes[i].dev);
1155 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1160 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1161 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1165 spin_lock(&em_tree->lock);
1166 remove_extent_mapping(em_tree, em);
1170 /* once for the tree */
1171 free_extent_map(em);
1172 spin_unlock(&em_tree->lock);
1175 free_extent_map(em);
1177 btrfs_end_transaction(trans, root);
1181 static u64 div_factor(u64 num, int factor)
1191 int btrfs_balance(struct btrfs_root *dev_root)
1194 struct list_head *cur;
1195 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1196 struct btrfs_device *device;
1199 struct btrfs_path *path;
1200 struct btrfs_key key;
1201 struct btrfs_chunk *chunk;
1202 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1203 struct btrfs_trans_handle *trans;
1204 struct btrfs_key found_key;
1207 dev_root = dev_root->fs_info->dev_root;
1209 mutex_lock(&dev_root->fs_info->fs_mutex);
1210 /* step one make some room on all the devices */
1211 list_for_each(cur, devices) {
1212 device = list_entry(cur, struct btrfs_device, dev_list);
1213 old_size = device->total_bytes;
1214 size_to_free = div_factor(old_size, 1);
1215 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1216 if (device->total_bytes - device->bytes_used > size_to_free)
1219 ret = btrfs_shrink_device(device, old_size - size_to_free);
1222 trans = btrfs_start_transaction(dev_root, 1);
1225 ret = btrfs_grow_device(trans, device, old_size);
1228 btrfs_end_transaction(trans, dev_root);
1231 /* step two, relocate all the chunks */
1232 path = btrfs_alloc_path();
1235 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1236 key.offset = (u64)-1;
1237 key.type = BTRFS_CHUNK_ITEM_KEY;
1240 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1245 * this shouldn't happen, it means the last relocate
1251 ret = btrfs_previous_item(chunk_root, path, 0,
1252 BTRFS_CHUNK_ITEM_KEY);
1256 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1258 if (found_key.objectid != key.objectid)
1260 chunk = btrfs_item_ptr(path->nodes[0],
1262 struct btrfs_chunk);
1263 key.offset = found_key.offset;
1264 /* chunk zero is special */
1265 if (key.offset == 0)
1268 ret = btrfs_relocate_chunk(chunk_root,
1269 chunk_root->root_key.objectid,
1273 btrfs_release_path(chunk_root, path);
1277 btrfs_free_path(path);
1278 mutex_unlock(&dev_root->fs_info->fs_mutex);
1283 * shrinking a device means finding all of the device extents past
1284 * the new size, and then following the back refs to the chunks.
1285 * The chunk relocation code actually frees the device extent
1287 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1289 struct btrfs_trans_handle *trans;
1290 struct btrfs_root *root = device->dev_root;
1291 struct btrfs_dev_extent *dev_extent = NULL;
1292 struct btrfs_path *path;
1299 struct extent_buffer *l;
1300 struct btrfs_key key;
1301 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1302 u64 old_total = btrfs_super_total_bytes(super_copy);
1303 u64 diff = device->total_bytes - new_size;
1306 path = btrfs_alloc_path();
1310 trans = btrfs_start_transaction(root, 1);
1318 device->total_bytes = new_size;
1319 ret = btrfs_update_device(trans, device);
1321 btrfs_end_transaction(trans, root);
1324 WARN_ON(diff > old_total);
1325 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1326 btrfs_end_transaction(trans, root);
1328 key.objectid = device->devid;
1329 key.offset = (u64)-1;
1330 key.type = BTRFS_DEV_EXTENT_KEY;
1333 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1337 ret = btrfs_previous_item(root, path, 0, key.type);
1346 slot = path->slots[0];
1347 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1349 if (key.objectid != device->devid)
1352 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1353 length = btrfs_dev_extent_length(l, dev_extent);
1355 if (key.offset + length <= new_size)
1358 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1359 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1360 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1361 btrfs_release_path(root, path);
1363 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1370 btrfs_free_path(path);
1374 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1375 struct btrfs_root *root,
1376 struct btrfs_key *key,
1377 struct btrfs_chunk *chunk, int item_size)
1379 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1380 struct btrfs_disk_key disk_key;
1384 array_size = btrfs_super_sys_array_size(super_copy);
1385 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1388 ptr = super_copy->sys_chunk_array + array_size;
1389 btrfs_cpu_key_to_disk(&disk_key, key);
1390 memcpy(ptr, &disk_key, sizeof(disk_key));
1391 ptr += sizeof(disk_key);
1392 memcpy(ptr, chunk, item_size);
1393 item_size += sizeof(disk_key);
1394 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1398 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
1401 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1403 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1404 return calc_size * (num_stripes / sub_stripes);
1406 return calc_size * num_stripes;
1410 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1411 struct btrfs_root *extent_root, u64 *start,
1412 u64 *num_bytes, u64 type)
1415 struct btrfs_fs_info *info = extent_root->fs_info;
1416 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
1417 struct btrfs_path *path;
1418 struct btrfs_stripe *stripes;
1419 struct btrfs_device *device = NULL;
1420 struct btrfs_chunk *chunk;
1421 struct list_head private_devs;
1422 struct list_head *dev_list;
1423 struct list_head *cur;
1424 struct extent_map_tree *em_tree;
1425 struct map_lookup *map;
1426 struct extent_map *em;
1427 int min_stripe_size = 1 * 1024 * 1024;
1429 u64 calc_size = 1024 * 1024 * 1024;
1430 u64 max_chunk_size = calc_size;
1435 int num_stripes = 1;
1436 int min_stripes = 1;
1437 int sub_stripes = 0;
1441 int stripe_len = 64 * 1024;
1442 struct btrfs_key key;
1444 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1445 (type & BTRFS_BLOCK_GROUP_DUP)) {
1447 type &= ~BTRFS_BLOCK_GROUP_DUP;
1449 dev_list = &extent_root->fs_info->fs_devices->alloc_list;
1450 if (list_empty(dev_list))
1453 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1454 num_stripes = btrfs_super_num_devices(&info->super_copy);
1457 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1461 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1462 num_stripes = min_t(u64, 2,
1463 btrfs_super_num_devices(&info->super_copy));
1464 if (num_stripes < 2)
1468 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1469 num_stripes = btrfs_super_num_devices(&info->super_copy);
1470 if (num_stripes < 4)
1472 num_stripes &= ~(u32)1;
1477 if (type & BTRFS_BLOCK_GROUP_DATA) {
1478 max_chunk_size = 10 * calc_size;
1479 min_stripe_size = 64 * 1024 * 1024;
1480 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1481 max_chunk_size = 4 * calc_size;
1482 min_stripe_size = 32 * 1024 * 1024;
1483 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1484 calc_size = 8 * 1024 * 1024;
1485 max_chunk_size = calc_size * 2;
1486 min_stripe_size = 1 * 1024 * 1024;
1489 path = btrfs_alloc_path();
1493 /* we don't want a chunk larger than 10% of the FS */
1494 percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1495 max_chunk_size = min(percent_max, max_chunk_size);
1498 if (calc_size * num_stripes > max_chunk_size) {
1499 calc_size = max_chunk_size;
1500 do_div(calc_size, num_stripes);
1501 do_div(calc_size, stripe_len);
1502 calc_size *= stripe_len;
1504 /* we don't want tiny stripes */
1505 calc_size = max_t(u64, min_stripe_size, calc_size);
1507 do_div(calc_size, stripe_len);
1508 calc_size *= stripe_len;
1510 INIT_LIST_HEAD(&private_devs);
1511 cur = dev_list->next;
1514 if (type & BTRFS_BLOCK_GROUP_DUP)
1515 min_free = calc_size * 2;
1517 min_free = calc_size;
1519 /* we add 1MB because we never use the first 1MB of the device */
1520 min_free += 1024 * 1024;
1522 /* build a private list of devices we will allocate from */
1523 while(index < num_stripes) {
1524 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1526 if (device->total_bytes > device->bytes_used)
1527 avail = device->total_bytes - device->bytes_used;
1532 if (device->in_fs_metadata && avail >= min_free) {
1533 u64 ignored_start = 0;
1534 ret = find_free_dev_extent(trans, device, path,
1538 list_move_tail(&device->dev_alloc_list,
1541 if (type & BTRFS_BLOCK_GROUP_DUP)
1544 } else if (device->in_fs_metadata && avail > max_avail)
1546 if (cur == dev_list)
1549 if (index < num_stripes) {
1550 list_splice(&private_devs, dev_list);
1551 if (index >= min_stripes) {
1552 num_stripes = index;
1553 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1554 num_stripes /= sub_stripes;
1555 num_stripes *= sub_stripes;
1560 if (!looped && max_avail > 0) {
1562 calc_size = max_avail;
1565 btrfs_free_path(path);
1568 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1569 key.type = BTRFS_CHUNK_ITEM_KEY;
1570 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1573 btrfs_free_path(path);
1577 chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1579 btrfs_free_path(path);
1583 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1586 btrfs_free_path(path);
1589 btrfs_free_path(path);
1592 stripes = &chunk->stripe;
1593 *num_bytes = chunk_bytes_by_type(type, calc_size,
1594 num_stripes, sub_stripes);
1597 while(index < num_stripes) {
1598 struct btrfs_stripe *stripe;
1599 BUG_ON(list_empty(&private_devs));
1600 cur = private_devs.next;
1601 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1603 /* loop over this device again if we're doing a dup group */
1604 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1605 (index == num_stripes - 1))
1606 list_move_tail(&device->dev_alloc_list, dev_list);
1608 ret = btrfs_alloc_dev_extent(trans, device,
1609 info->chunk_root->root_key.objectid,
1610 BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1611 calc_size, &dev_offset);
1613 device->bytes_used += calc_size;
1614 ret = btrfs_update_device(trans, device);
1617 map->stripes[index].dev = device;
1618 map->stripes[index].physical = dev_offset;
1619 stripe = stripes + index;
1620 btrfs_set_stack_stripe_devid(stripe, device->devid);
1621 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1622 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1623 physical = dev_offset;
1626 BUG_ON(!list_empty(&private_devs));
1628 /* key was set above */
1629 btrfs_set_stack_chunk_length(chunk, *num_bytes);
1630 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1631 btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1632 btrfs_set_stack_chunk_type(chunk, type);
1633 btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1634 btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1635 btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1636 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
1637 btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1638 map->sector_size = extent_root->sectorsize;
1639 map->stripe_len = stripe_len;
1640 map->io_align = stripe_len;
1641 map->io_width = stripe_len;
1643 map->num_stripes = num_stripes;
1644 map->sub_stripes = sub_stripes;
1646 ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1647 btrfs_chunk_item_size(num_stripes));
1649 *start = key.offset;;
1651 em = alloc_extent_map(GFP_NOFS);
1654 em->bdev = (struct block_device *)map;
1655 em->start = key.offset;
1656 em->len = *num_bytes;
1657 em->block_start = 0;
1659 if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1660 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1661 chunk, btrfs_chunk_item_size(num_stripes));
1666 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1667 spin_lock(&em_tree->lock);
1668 ret = add_extent_mapping(em_tree, em);
1669 spin_unlock(&em_tree->lock);
1671 free_extent_map(em);
1675 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1677 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1680 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1682 struct extent_map *em;
1685 spin_lock(&tree->map_tree.lock);
1686 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1688 remove_extent_mapping(&tree->map_tree, em);
1689 spin_unlock(&tree->map_tree.lock);
1694 free_extent_map(em);
1695 /* once for the tree */
1696 free_extent_map(em);
1700 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1702 struct extent_map *em;
1703 struct map_lookup *map;
1704 struct extent_map_tree *em_tree = &map_tree->map_tree;
1707 spin_lock(&em_tree->lock);
1708 em = lookup_extent_mapping(em_tree, logical, len);
1709 spin_unlock(&em_tree->lock);
1712 BUG_ON(em->start > logical || em->start + em->len < logical);
1713 map = (struct map_lookup *)em->bdev;
1714 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1715 ret = map->num_stripes;
1716 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1717 ret = map->sub_stripes;
1720 free_extent_map(em);
1724 static int find_live_mirror(struct map_lookup *map, int first, int num,
1728 if (map->stripes[optimal].dev->bdev)
1730 for (i = first; i < first + num; i++) {
1731 if (map->stripes[i].dev->bdev)
1734 /* we couldn't find one that doesn't fail. Just return something
1735 * and the io error handling code will clean up eventually
1740 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1741 u64 logical, u64 *length,
1742 struct btrfs_multi_bio **multi_ret,
1743 int mirror_num, struct page *unplug_page)
1745 struct extent_map *em;
1746 struct map_lookup *map;
1747 struct extent_map_tree *em_tree = &map_tree->map_tree;
1751 int stripes_allocated = 8;
1752 int stripes_required = 1;
1757 struct btrfs_multi_bio *multi = NULL;
1759 if (multi_ret && !(rw & (1 << BIO_RW))) {
1760 stripes_allocated = 1;
1764 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1769 atomic_set(&multi->error, 0);
1772 spin_lock(&em_tree->lock);
1773 em = lookup_extent_mapping(em_tree, logical, *length);
1774 spin_unlock(&em_tree->lock);
1776 if (!em && unplug_page)
1780 printk("unable to find logical %Lu len %Lu\n", logical, *length);
1784 BUG_ON(em->start > logical || em->start + em->len < logical);
1785 map = (struct map_lookup *)em->bdev;
1786 offset = logical - em->start;
1788 if (mirror_num > map->num_stripes)
1791 /* if our multi bio struct is too small, back off and try again */
1792 if (rw & (1 << BIO_RW)) {
1793 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1794 BTRFS_BLOCK_GROUP_DUP)) {
1795 stripes_required = map->num_stripes;
1797 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1798 stripes_required = map->sub_stripes;
1802 if (multi_ret && rw == WRITE &&
1803 stripes_allocated < stripes_required) {
1804 stripes_allocated = map->num_stripes;
1805 free_extent_map(em);
1811 * stripe_nr counts the total number of stripes we have to stride
1812 * to get to this block
1814 do_div(stripe_nr, map->stripe_len);
1816 stripe_offset = stripe_nr * map->stripe_len;
1817 BUG_ON(offset < stripe_offset);
1819 /* stripe_offset is the offset of this block in its stripe*/
1820 stripe_offset = offset - stripe_offset;
1822 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1823 BTRFS_BLOCK_GROUP_RAID10 |
1824 BTRFS_BLOCK_GROUP_DUP)) {
1825 /* we limit the length of each bio to what fits in a stripe */
1826 *length = min_t(u64, em->len - offset,
1827 map->stripe_len - stripe_offset);
1829 *length = em->len - offset;
1832 if (!multi_ret && !unplug_page)
1837 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1838 if (unplug_page || (rw & (1 << BIO_RW)))
1839 num_stripes = map->num_stripes;
1840 else if (mirror_num)
1841 stripe_index = mirror_num - 1;
1843 stripe_index = find_live_mirror(map, 0,
1845 current->pid % map->num_stripes);
1848 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1849 if (rw & (1 << BIO_RW))
1850 num_stripes = map->num_stripes;
1851 else if (mirror_num)
1852 stripe_index = mirror_num - 1;
1854 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1855 int factor = map->num_stripes / map->sub_stripes;
1857 stripe_index = do_div(stripe_nr, factor);
1858 stripe_index *= map->sub_stripes;
1860 if (unplug_page || (rw & (1 << BIO_RW)))
1861 num_stripes = map->sub_stripes;
1862 else if (mirror_num)
1863 stripe_index += mirror_num - 1;
1865 stripe_index = find_live_mirror(map, stripe_index,
1866 map->sub_stripes, stripe_index +
1867 current->pid % map->sub_stripes);
1871 * after this do_div call, stripe_nr is the number of stripes
1872 * on this device we have to walk to find the data, and
1873 * stripe_index is the number of our device in the stripe array
1875 stripe_index = do_div(stripe_nr, map->num_stripes);
1877 BUG_ON(stripe_index >= map->num_stripes);
1879 for (i = 0; i < num_stripes; i++) {
1881 struct btrfs_device *device;
1882 struct backing_dev_info *bdi;
1884 device = map->stripes[stripe_index].dev;
1886 bdi = blk_get_backing_dev_info(device->bdev);
1887 if (bdi->unplug_io_fn) {
1888 bdi->unplug_io_fn(bdi, unplug_page);
1892 multi->stripes[i].physical =
1893 map->stripes[stripe_index].physical +
1894 stripe_offset + stripe_nr * map->stripe_len;
1895 multi->stripes[i].dev = map->stripes[stripe_index].dev;
1901 multi->num_stripes = num_stripes;
1902 multi->max_errors = max_errors;
1905 free_extent_map(em);
1909 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1910 u64 logical, u64 *length,
1911 struct btrfs_multi_bio **multi_ret, int mirror_num)
1913 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
1917 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
1918 u64 logical, struct page *page)
1920 u64 length = PAGE_CACHE_SIZE;
1921 return __btrfs_map_block(map_tree, READ, logical, &length,
1926 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1927 static void end_bio_multi_stripe(struct bio *bio, int err)
1929 static int end_bio_multi_stripe(struct bio *bio,
1930 unsigned int bytes_done, int err)
1933 struct btrfs_multi_bio *multi = bio->bi_private;
1935 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1940 atomic_inc(&multi->error);
1942 if (atomic_dec_and_test(&multi->stripes_pending)) {
1943 bio->bi_private = multi->private;
1944 bio->bi_end_io = multi->end_io;
1945 /* only send an error to the higher layers if it is
1946 * beyond the tolerance of the multi-bio
1948 if (atomic_read(&multi->error) > multi->max_errors) {
1952 * this bio is actually up to date, we didn't
1953 * go over the max number of errors
1955 set_bit(BIO_UPTODATE, &bio->bi_flags);
1960 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1961 bio_endio(bio, bio->bi_size, err);
1963 bio_endio(bio, err);
1968 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1973 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
1976 struct btrfs_mapping_tree *map_tree;
1977 struct btrfs_device *dev;
1978 struct bio *first_bio = bio;
1979 u64 logical = bio->bi_sector << 9;
1982 struct btrfs_multi_bio *multi = NULL;
1987 length = bio->bi_size;
1988 map_tree = &root->fs_info->mapping_tree;
1989 map_length = length;
1991 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
1995 total_devs = multi->num_stripes;
1996 if (map_length < length) {
1997 printk("mapping failed logical %Lu bio len %Lu "
1998 "len %Lu\n", logical, length, map_length);
2001 multi->end_io = first_bio->bi_end_io;
2002 multi->private = first_bio->bi_private;
2003 atomic_set(&multi->stripes_pending, multi->num_stripes);
2005 while(dev_nr < total_devs) {
2006 if (total_devs > 1) {
2007 if (dev_nr < total_devs - 1) {
2008 bio = bio_clone(first_bio, GFP_NOFS);
2013 bio->bi_private = multi;
2014 bio->bi_end_io = end_bio_multi_stripe;
2016 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2017 dev = multi->stripes[dev_nr].dev;
2018 if (dev && dev->bdev) {
2019 bio->bi_bdev = dev->bdev;
2020 spin_lock(&dev->io_lock);
2022 spin_unlock(&dev->io_lock);
2023 submit_bio(rw, bio);
2025 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2026 bio->bi_sector = logical >> 9;
2027 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2028 bio_endio(bio, bio->bi_size, -EIO);
2030 bio_endio(bio, -EIO);
2035 if (total_devs == 1)
2040 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2043 struct list_head *head = &root->fs_info->fs_devices->devices;
2045 return __find_device(head, devid, uuid);
2048 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2049 u64 devid, u8 *dev_uuid)
2051 struct btrfs_device *device;
2052 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2054 device = kzalloc(sizeof(*device), GFP_NOFS);
2055 list_add(&device->dev_list,
2056 &fs_devices->devices);
2057 list_add(&device->dev_alloc_list,
2058 &fs_devices->alloc_list);
2059 device->barriers = 1;
2060 device->dev_root = root->fs_info->dev_root;
2061 device->devid = devid;
2062 fs_devices->num_devices++;
2063 spin_lock_init(&device->io_lock);
2064 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2069 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2070 struct extent_buffer *leaf,
2071 struct btrfs_chunk *chunk)
2073 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2074 struct map_lookup *map;
2075 struct extent_map *em;
2079 u8 uuid[BTRFS_UUID_SIZE];
2084 logical = key->offset;
2085 length = btrfs_chunk_length(leaf, chunk);
2087 spin_lock(&map_tree->map_tree.lock);
2088 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2089 spin_unlock(&map_tree->map_tree.lock);
2091 /* already mapped? */
2092 if (em && em->start <= logical && em->start + em->len > logical) {
2093 free_extent_map(em);
2096 free_extent_map(em);
2099 map = kzalloc(sizeof(*map), GFP_NOFS);
2103 em = alloc_extent_map(GFP_NOFS);
2106 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2107 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2109 free_extent_map(em);
2113 em->bdev = (struct block_device *)map;
2114 em->start = logical;
2116 em->block_start = 0;
2118 map->num_stripes = num_stripes;
2119 map->io_width = btrfs_chunk_io_width(leaf, chunk);
2120 map->io_align = btrfs_chunk_io_align(leaf, chunk);
2121 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2122 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2123 map->type = btrfs_chunk_type(leaf, chunk);
2124 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2125 for (i = 0; i < num_stripes; i++) {
2126 map->stripes[i].physical =
2127 btrfs_stripe_offset_nr(leaf, chunk, i);
2128 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2129 read_extent_buffer(leaf, uuid, (unsigned long)
2130 btrfs_stripe_dev_uuid_nr(chunk, i),
2132 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
2134 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2136 free_extent_map(em);
2139 if (!map->stripes[i].dev) {
2140 map->stripes[i].dev =
2141 add_missing_dev(root, devid, uuid);
2142 if (!map->stripes[i].dev) {
2144 free_extent_map(em);
2148 map->stripes[i].dev->in_fs_metadata = 1;
2151 spin_lock(&map_tree->map_tree.lock);
2152 ret = add_extent_mapping(&map_tree->map_tree, em);
2153 spin_unlock(&map_tree->map_tree.lock);
2155 free_extent_map(em);
2160 static int fill_device_from_item(struct extent_buffer *leaf,
2161 struct btrfs_dev_item *dev_item,
2162 struct btrfs_device *device)
2166 device->devid = btrfs_device_id(leaf, dev_item);
2167 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2168 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2169 device->type = btrfs_device_type(leaf, dev_item);
2170 device->io_align = btrfs_device_io_align(leaf, dev_item);
2171 device->io_width = btrfs_device_io_width(leaf, dev_item);
2172 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
2174 ptr = (unsigned long)btrfs_device_uuid(dev_item);
2175 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2180 static int read_one_dev(struct btrfs_root *root,
2181 struct extent_buffer *leaf,
2182 struct btrfs_dev_item *dev_item)
2184 struct btrfs_device *device;
2187 u8 dev_uuid[BTRFS_UUID_SIZE];
2189 devid = btrfs_device_id(leaf, dev_item);
2190 read_extent_buffer(leaf, dev_uuid,
2191 (unsigned long)btrfs_device_uuid(dev_item),
2193 device = btrfs_find_device(root, devid, dev_uuid);
2195 printk("warning devid %Lu missing\n", devid);
2196 device = add_missing_dev(root, devid, dev_uuid);
2201 fill_device_from_item(leaf, dev_item, device);
2202 device->dev_root = root->fs_info->dev_root;
2203 device->in_fs_metadata = 1;
2206 ret = btrfs_open_device(device);
2214 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
2216 struct btrfs_dev_item *dev_item;
2218 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
2220 return read_one_dev(root, buf, dev_item);
2223 int btrfs_read_sys_array(struct btrfs_root *root)
2225 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2226 struct extent_buffer *sb;
2227 struct btrfs_disk_key *disk_key;
2228 struct btrfs_chunk *chunk;
2230 unsigned long sb_ptr;
2236 struct btrfs_key key;
2238 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
2239 BTRFS_SUPER_INFO_SIZE);
2242 btrfs_set_buffer_uptodate(sb);
2243 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
2244 array_size = btrfs_super_sys_array_size(super_copy);
2246 ptr = super_copy->sys_chunk_array;
2247 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
2250 while (cur < array_size) {
2251 disk_key = (struct btrfs_disk_key *)ptr;
2252 btrfs_disk_key_to_cpu(&key, disk_key);
2254 len = sizeof(*disk_key); ptr += len;
2258 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2259 chunk = (struct btrfs_chunk *)sb_ptr;
2260 ret = read_one_chunk(root, &key, sb, chunk);
2263 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2264 len = btrfs_chunk_item_size(num_stripes);
2273 free_extent_buffer(sb);
2277 int btrfs_read_chunk_tree(struct btrfs_root *root)
2279 struct btrfs_path *path;
2280 struct extent_buffer *leaf;
2281 struct btrfs_key key;
2282 struct btrfs_key found_key;
2286 root = root->fs_info->chunk_root;
2288 path = btrfs_alloc_path();
2292 /* first we search for all of the device items, and then we
2293 * read in all of the chunk items. This way we can create chunk
2294 * mappings that reference all of the devices that are afound
2296 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2300 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2302 leaf = path->nodes[0];
2303 slot = path->slots[0];
2304 if (slot >= btrfs_header_nritems(leaf)) {
2305 ret = btrfs_next_leaf(root, path);
2312 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2313 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2314 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
2316 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2317 struct btrfs_dev_item *dev_item;
2318 dev_item = btrfs_item_ptr(leaf, slot,
2319 struct btrfs_dev_item);
2320 ret = read_one_dev(root, leaf, dev_item);
2323 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2324 struct btrfs_chunk *chunk;
2325 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2326 ret = read_one_chunk(root, &found_key, leaf, chunk);
2330 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2332 btrfs_release_path(root, path);
2336 btrfs_free_path(path);