]> git.karo-electronics.de Git - linux-beck.git/blob - fs/btrfs/volumes.c
74614e3b5ad3a69349e695a96c2fd8933fe84237
[linux-beck.git] / fs / btrfs / volumes.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include <linux/raid/pq.h>
29 #include <linux/semaphore.h>
30 #include <asm/div64.h>
31 #include "compat.h"
32 #include "ctree.h"
33 #include "extent_map.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "print-tree.h"
37 #include "volumes.h"
38 #include "raid56.h"
39 #include "async-thread.h"
40 #include "check-integrity.h"
41 #include "rcu-string.h"
42 #include "math.h"
43 #include "dev-replace.h"
44
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46                                 struct btrfs_root *root,
47                                 struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
50 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
51 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
52
53 static DEFINE_MUTEX(uuid_mutex);
54 static LIST_HEAD(fs_uuids);
55
56 static void lock_chunks(struct btrfs_root *root)
57 {
58         mutex_lock(&root->fs_info->chunk_mutex);
59 }
60
61 static void unlock_chunks(struct btrfs_root *root)
62 {
63         mutex_unlock(&root->fs_info->chunk_mutex);
64 }
65
66 static struct btrfs_fs_devices *__alloc_fs_devices(void)
67 {
68         struct btrfs_fs_devices *fs_devs;
69
70         fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
71         if (!fs_devs)
72                 return ERR_PTR(-ENOMEM);
73
74         mutex_init(&fs_devs->device_list_mutex);
75
76         INIT_LIST_HEAD(&fs_devs->devices);
77         INIT_LIST_HEAD(&fs_devs->alloc_list);
78         INIT_LIST_HEAD(&fs_devs->list);
79
80         return fs_devs;
81 }
82
83 /**
84  * alloc_fs_devices - allocate struct btrfs_fs_devices
85  * @fsid:       a pointer to UUID for this FS.  If NULL a new UUID is
86  *              generated.
87  *
88  * Return: a pointer to a new &struct btrfs_fs_devices on success;
89  * ERR_PTR() on error.  Returned struct is not linked onto any lists and
90  * can be destroyed with kfree() right away.
91  */
92 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
93 {
94         struct btrfs_fs_devices *fs_devs;
95
96         fs_devs = __alloc_fs_devices();
97         if (IS_ERR(fs_devs))
98                 return fs_devs;
99
100         if (fsid)
101                 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
102         else
103                 generate_random_uuid(fs_devs->fsid);
104
105         return fs_devs;
106 }
107
108 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
109 {
110         struct btrfs_device *device;
111         WARN_ON(fs_devices->opened);
112         while (!list_empty(&fs_devices->devices)) {
113                 device = list_entry(fs_devices->devices.next,
114                                     struct btrfs_device, dev_list);
115                 list_del(&device->dev_list);
116                 rcu_string_free(device->name);
117                 kfree(device);
118         }
119         kfree(fs_devices);
120 }
121
122 static void btrfs_kobject_uevent(struct block_device *bdev,
123                                  enum kobject_action action)
124 {
125         int ret;
126
127         ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
128         if (ret)
129                 pr_warn("Sending event '%d' to kobject: '%s' (%p): failed\n",
130                         action,
131                         kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
132                         &disk_to_dev(bdev->bd_disk)->kobj);
133 }
134
135 void btrfs_cleanup_fs_uuids(void)
136 {
137         struct btrfs_fs_devices *fs_devices;
138
139         while (!list_empty(&fs_uuids)) {
140                 fs_devices = list_entry(fs_uuids.next,
141                                         struct btrfs_fs_devices, list);
142                 list_del(&fs_devices->list);
143                 free_fs_devices(fs_devices);
144         }
145 }
146
147 static struct btrfs_device *__alloc_device(void)
148 {
149         struct btrfs_device *dev;
150
151         dev = kzalloc(sizeof(*dev), GFP_NOFS);
152         if (!dev)
153                 return ERR_PTR(-ENOMEM);
154
155         INIT_LIST_HEAD(&dev->dev_list);
156         INIT_LIST_HEAD(&dev->dev_alloc_list);
157
158         spin_lock_init(&dev->io_lock);
159
160         spin_lock_init(&dev->reada_lock);
161         atomic_set(&dev->reada_in_flight, 0);
162         INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
163         INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
164
165         return dev;
166 }
167
168 static noinline struct btrfs_device *__find_device(struct list_head *head,
169                                                    u64 devid, u8 *uuid)
170 {
171         struct btrfs_device *dev;
172
173         list_for_each_entry(dev, head, dev_list) {
174                 if (dev->devid == devid &&
175                     (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
176                         return dev;
177                 }
178         }
179         return NULL;
180 }
181
182 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
183 {
184         struct btrfs_fs_devices *fs_devices;
185
186         list_for_each_entry(fs_devices, &fs_uuids, list) {
187                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
188                         return fs_devices;
189         }
190         return NULL;
191 }
192
193 static int
194 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
195                       int flush, struct block_device **bdev,
196                       struct buffer_head **bh)
197 {
198         int ret;
199
200         *bdev = blkdev_get_by_path(device_path, flags, holder);
201
202         if (IS_ERR(*bdev)) {
203                 ret = PTR_ERR(*bdev);
204                 printk(KERN_INFO "btrfs: open %s failed\n", device_path);
205                 goto error;
206         }
207
208         if (flush)
209                 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
210         ret = set_blocksize(*bdev, 4096);
211         if (ret) {
212                 blkdev_put(*bdev, flags);
213                 goto error;
214         }
215         invalidate_bdev(*bdev);
216         *bh = btrfs_read_dev_super(*bdev);
217         if (!*bh) {
218                 ret = -EINVAL;
219                 blkdev_put(*bdev, flags);
220                 goto error;
221         }
222
223         return 0;
224
225 error:
226         *bdev = NULL;
227         *bh = NULL;
228         return ret;
229 }
230
231 static void requeue_list(struct btrfs_pending_bios *pending_bios,
232                         struct bio *head, struct bio *tail)
233 {
234
235         struct bio *old_head;
236
237         old_head = pending_bios->head;
238         pending_bios->head = head;
239         if (pending_bios->tail)
240                 tail->bi_next = old_head;
241         else
242                 pending_bios->tail = tail;
243 }
244
245 /*
246  * we try to collect pending bios for a device so we don't get a large
247  * number of procs sending bios down to the same device.  This greatly
248  * improves the schedulers ability to collect and merge the bios.
249  *
250  * But, it also turns into a long list of bios to process and that is sure
251  * to eventually make the worker thread block.  The solution here is to
252  * make some progress and then put this work struct back at the end of
253  * the list if the block device is congested.  This way, multiple devices
254  * can make progress from a single worker thread.
255  */
256 static noinline void run_scheduled_bios(struct btrfs_device *device)
257 {
258         struct bio *pending;
259         struct backing_dev_info *bdi;
260         struct btrfs_fs_info *fs_info;
261         struct btrfs_pending_bios *pending_bios;
262         struct bio *tail;
263         struct bio *cur;
264         int again = 0;
265         unsigned long num_run;
266         unsigned long batch_run = 0;
267         unsigned long limit;
268         unsigned long last_waited = 0;
269         int force_reg = 0;
270         int sync_pending = 0;
271         struct blk_plug plug;
272
273         /*
274          * this function runs all the bios we've collected for
275          * a particular device.  We don't want to wander off to
276          * another device without first sending all of these down.
277          * So, setup a plug here and finish it off before we return
278          */
279         blk_start_plug(&plug);
280
281         bdi = blk_get_backing_dev_info(device->bdev);
282         fs_info = device->dev_root->fs_info;
283         limit = btrfs_async_submit_limit(fs_info);
284         limit = limit * 2 / 3;
285
286 loop:
287         spin_lock(&device->io_lock);
288
289 loop_lock:
290         num_run = 0;
291
292         /* take all the bios off the list at once and process them
293          * later on (without the lock held).  But, remember the
294          * tail and other pointers so the bios can be properly reinserted
295          * into the list if we hit congestion
296          */
297         if (!force_reg && device->pending_sync_bios.head) {
298                 pending_bios = &device->pending_sync_bios;
299                 force_reg = 1;
300         } else {
301                 pending_bios = &device->pending_bios;
302                 force_reg = 0;
303         }
304
305         pending = pending_bios->head;
306         tail = pending_bios->tail;
307         WARN_ON(pending && !tail);
308
309         /*
310          * if pending was null this time around, no bios need processing
311          * at all and we can stop.  Otherwise it'll loop back up again
312          * and do an additional check so no bios are missed.
313          *
314          * device->running_pending is used to synchronize with the
315          * schedule_bio code.
316          */
317         if (device->pending_sync_bios.head == NULL &&
318             device->pending_bios.head == NULL) {
319                 again = 0;
320                 device->running_pending = 0;
321         } else {
322                 again = 1;
323                 device->running_pending = 1;
324         }
325
326         pending_bios->head = NULL;
327         pending_bios->tail = NULL;
328
329         spin_unlock(&device->io_lock);
330
331         while (pending) {
332
333                 rmb();
334                 /* we want to work on both lists, but do more bios on the
335                  * sync list than the regular list
336                  */
337                 if ((num_run > 32 &&
338                     pending_bios != &device->pending_sync_bios &&
339                     device->pending_sync_bios.head) ||
340                    (num_run > 64 && pending_bios == &device->pending_sync_bios &&
341                     device->pending_bios.head)) {
342                         spin_lock(&device->io_lock);
343                         requeue_list(pending_bios, pending, tail);
344                         goto loop_lock;
345                 }
346
347                 cur = pending;
348                 pending = pending->bi_next;
349                 cur->bi_next = NULL;
350
351                 if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
352                     waitqueue_active(&fs_info->async_submit_wait))
353                         wake_up(&fs_info->async_submit_wait);
354
355                 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
356
357                 /*
358                  * if we're doing the sync list, record that our
359                  * plug has some sync requests on it
360                  *
361                  * If we're doing the regular list and there are
362                  * sync requests sitting around, unplug before
363                  * we add more
364                  */
365                 if (pending_bios == &device->pending_sync_bios) {
366                         sync_pending = 1;
367                 } else if (sync_pending) {
368                         blk_finish_plug(&plug);
369                         blk_start_plug(&plug);
370                         sync_pending = 0;
371                 }
372
373                 btrfsic_submit_bio(cur->bi_rw, cur);
374                 num_run++;
375                 batch_run++;
376                 if (need_resched())
377                         cond_resched();
378
379                 /*
380                  * we made progress, there is more work to do and the bdi
381                  * is now congested.  Back off and let other work structs
382                  * run instead
383                  */
384                 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
385                     fs_info->fs_devices->open_devices > 1) {
386                         struct io_context *ioc;
387
388                         ioc = current->io_context;
389
390                         /*
391                          * the main goal here is that we don't want to
392                          * block if we're going to be able to submit
393                          * more requests without blocking.
394                          *
395                          * This code does two great things, it pokes into
396                          * the elevator code from a filesystem _and_
397                          * it makes assumptions about how batching works.
398                          */
399                         if (ioc && ioc->nr_batch_requests > 0 &&
400                             time_before(jiffies, ioc->last_waited + HZ/50UL) &&
401                             (last_waited == 0 ||
402                              ioc->last_waited == last_waited)) {
403                                 /*
404                                  * we want to go through our batch of
405                                  * requests and stop.  So, we copy out
406                                  * the ioc->last_waited time and test
407                                  * against it before looping
408                                  */
409                                 last_waited = ioc->last_waited;
410                                 if (need_resched())
411                                         cond_resched();
412                                 continue;
413                         }
414                         spin_lock(&device->io_lock);
415                         requeue_list(pending_bios, pending, tail);
416                         device->running_pending = 1;
417
418                         spin_unlock(&device->io_lock);
419                         btrfs_requeue_work(&device->work);
420                         goto done;
421                 }
422                 /* unplug every 64 requests just for good measure */
423                 if (batch_run % 64 == 0) {
424                         blk_finish_plug(&plug);
425                         blk_start_plug(&plug);
426                         sync_pending = 0;
427                 }
428         }
429
430         cond_resched();
431         if (again)
432                 goto loop;
433
434         spin_lock(&device->io_lock);
435         if (device->pending_bios.head || device->pending_sync_bios.head)
436                 goto loop_lock;
437         spin_unlock(&device->io_lock);
438
439 done:
440         blk_finish_plug(&plug);
441 }
442
443 static void pending_bios_fn(struct btrfs_work *work)
444 {
445         struct btrfs_device *device;
446
447         device = container_of(work, struct btrfs_device, work);
448         run_scheduled_bios(device);
449 }
450
451 static noinline int device_list_add(const char *path,
452                            struct btrfs_super_block *disk_super,
453                            u64 devid, struct btrfs_fs_devices **fs_devices_ret)
454 {
455         struct btrfs_device *device;
456         struct btrfs_fs_devices *fs_devices;
457         struct rcu_string *name;
458         u64 found_transid = btrfs_super_generation(disk_super);
459
460         fs_devices = find_fsid(disk_super->fsid);
461         if (!fs_devices) {
462                 fs_devices = alloc_fs_devices(disk_super->fsid);
463                 if (IS_ERR(fs_devices))
464                         return PTR_ERR(fs_devices);
465
466                 list_add(&fs_devices->list, &fs_uuids);
467                 fs_devices->latest_devid = devid;
468                 fs_devices->latest_trans = found_transid;
469
470                 device = NULL;
471         } else {
472                 device = __find_device(&fs_devices->devices, devid,
473                                        disk_super->dev_item.uuid);
474         }
475         if (!device) {
476                 if (fs_devices->opened)
477                         return -EBUSY;
478
479                 device = btrfs_alloc_device(NULL, &devid,
480                                             disk_super->dev_item.uuid);
481                 if (IS_ERR(device)) {
482                         /* we can safely leave the fs_devices entry around */
483                         return PTR_ERR(device);
484                 }
485
486                 name = rcu_string_strdup(path, GFP_NOFS);
487                 if (!name) {
488                         kfree(device);
489                         return -ENOMEM;
490                 }
491                 rcu_assign_pointer(device->name, name);
492
493                 mutex_lock(&fs_devices->device_list_mutex);
494                 list_add_rcu(&device->dev_list, &fs_devices->devices);
495                 fs_devices->num_devices++;
496                 mutex_unlock(&fs_devices->device_list_mutex);
497
498                 device->fs_devices = fs_devices;
499         } else if (!device->name || strcmp(device->name->str, path)) {
500                 name = rcu_string_strdup(path, GFP_NOFS);
501                 if (!name)
502                         return -ENOMEM;
503                 rcu_string_free(device->name);
504                 rcu_assign_pointer(device->name, name);
505                 if (device->missing) {
506                         fs_devices->missing_devices--;
507                         device->missing = 0;
508                 }
509         }
510
511         if (found_transid > fs_devices->latest_trans) {
512                 fs_devices->latest_devid = devid;
513                 fs_devices->latest_trans = found_transid;
514         }
515         *fs_devices_ret = fs_devices;
516         return 0;
517 }
518
519 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
520 {
521         struct btrfs_fs_devices *fs_devices;
522         struct btrfs_device *device;
523         struct btrfs_device *orig_dev;
524
525         fs_devices = alloc_fs_devices(orig->fsid);
526         if (IS_ERR(fs_devices))
527                 return fs_devices;
528
529         fs_devices->latest_devid = orig->latest_devid;
530         fs_devices->latest_trans = orig->latest_trans;
531         fs_devices->total_devices = orig->total_devices;
532
533         /* We have held the volume lock, it is safe to get the devices. */
534         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
535                 struct rcu_string *name;
536
537                 device = btrfs_alloc_device(NULL, &orig_dev->devid,
538                                             orig_dev->uuid);
539                 if (IS_ERR(device))
540                         goto error;
541
542                 /*
543                  * This is ok to do without rcu read locked because we hold the
544                  * uuid mutex so nothing we touch in here is going to disappear.
545                  */
546                 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
547                 if (!name) {
548                         kfree(device);
549                         goto error;
550                 }
551                 rcu_assign_pointer(device->name, name);
552
553                 list_add(&device->dev_list, &fs_devices->devices);
554                 device->fs_devices = fs_devices;
555                 fs_devices->num_devices++;
556         }
557         return fs_devices;
558 error:
559         free_fs_devices(fs_devices);
560         return ERR_PTR(-ENOMEM);
561 }
562
563 void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
564                                struct btrfs_fs_devices *fs_devices, int step)
565 {
566         struct btrfs_device *device, *next;
567
568         struct block_device *latest_bdev = NULL;
569         u64 latest_devid = 0;
570         u64 latest_transid = 0;
571
572         mutex_lock(&uuid_mutex);
573 again:
574         /* This is the initialized path, it is safe to release the devices. */
575         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
576                 if (device->in_fs_metadata) {
577                         if (!device->is_tgtdev_for_dev_replace &&
578                             (!latest_transid ||
579                              device->generation > latest_transid)) {
580                                 latest_devid = device->devid;
581                                 latest_transid = device->generation;
582                                 latest_bdev = device->bdev;
583                         }
584                         continue;
585                 }
586
587                 if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
588                         /*
589                          * In the first step, keep the device which has
590                          * the correct fsid and the devid that is used
591                          * for the dev_replace procedure.
592                          * In the second step, the dev_replace state is
593                          * read from the device tree and it is known
594                          * whether the procedure is really active or
595                          * not, which means whether this device is
596                          * used or whether it should be removed.
597                          */
598                         if (step == 0 || device->is_tgtdev_for_dev_replace) {
599                                 continue;
600                         }
601                 }
602                 if (device->bdev) {
603                         blkdev_put(device->bdev, device->mode);
604                         device->bdev = NULL;
605                         fs_devices->open_devices--;
606                 }
607                 if (device->writeable) {
608                         list_del_init(&device->dev_alloc_list);
609                         device->writeable = 0;
610                         if (!device->is_tgtdev_for_dev_replace)
611                                 fs_devices->rw_devices--;
612                 }
613                 list_del_init(&device->dev_list);
614                 fs_devices->num_devices--;
615                 rcu_string_free(device->name);
616                 kfree(device);
617         }
618
619         if (fs_devices->seed) {
620                 fs_devices = fs_devices->seed;
621                 goto again;
622         }
623
624         fs_devices->latest_bdev = latest_bdev;
625         fs_devices->latest_devid = latest_devid;
626         fs_devices->latest_trans = latest_transid;
627
628         mutex_unlock(&uuid_mutex);
629 }
630
631 static void __free_device(struct work_struct *work)
632 {
633         struct btrfs_device *device;
634
635         device = container_of(work, struct btrfs_device, rcu_work);
636
637         if (device->bdev)
638                 blkdev_put(device->bdev, device->mode);
639
640         rcu_string_free(device->name);
641         kfree(device);
642 }
643
644 static void free_device(struct rcu_head *head)
645 {
646         struct btrfs_device *device;
647
648         device = container_of(head, struct btrfs_device, rcu);
649
650         INIT_WORK(&device->rcu_work, __free_device);
651         schedule_work(&device->rcu_work);
652 }
653
654 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
655 {
656         struct btrfs_device *device;
657
658         if (--fs_devices->opened > 0)
659                 return 0;
660
661         mutex_lock(&fs_devices->device_list_mutex);
662         list_for_each_entry(device, &fs_devices->devices, dev_list) {
663                 struct btrfs_device *new_device;
664                 struct rcu_string *name;
665
666                 if (device->bdev)
667                         fs_devices->open_devices--;
668
669                 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
670                         list_del_init(&device->dev_alloc_list);
671                         fs_devices->rw_devices--;
672                 }
673
674                 if (device->can_discard)
675                         fs_devices->num_can_discard--;
676
677                 new_device = btrfs_alloc_device(NULL, &device->devid,
678                                                 device->uuid);
679                 BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
680
681                 /* Safe because we are under uuid_mutex */
682                 if (device->name) {
683                         name = rcu_string_strdup(device->name->str, GFP_NOFS);
684                         BUG_ON(!name); /* -ENOMEM */
685                         rcu_assign_pointer(new_device->name, name);
686                 }
687
688                 list_replace_rcu(&device->dev_list, &new_device->dev_list);
689                 new_device->fs_devices = device->fs_devices;
690
691                 call_rcu(&device->rcu, free_device);
692         }
693         mutex_unlock(&fs_devices->device_list_mutex);
694
695         WARN_ON(fs_devices->open_devices);
696         WARN_ON(fs_devices->rw_devices);
697         fs_devices->opened = 0;
698         fs_devices->seeding = 0;
699
700         return 0;
701 }
702
703 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
704 {
705         struct btrfs_fs_devices *seed_devices = NULL;
706         int ret;
707
708         mutex_lock(&uuid_mutex);
709         ret = __btrfs_close_devices(fs_devices);
710         if (!fs_devices->opened) {
711                 seed_devices = fs_devices->seed;
712                 fs_devices->seed = NULL;
713         }
714         mutex_unlock(&uuid_mutex);
715
716         while (seed_devices) {
717                 fs_devices = seed_devices;
718                 seed_devices = fs_devices->seed;
719                 __btrfs_close_devices(fs_devices);
720                 free_fs_devices(fs_devices);
721         }
722         /*
723          * Wait for rcu kworkers under __btrfs_close_devices
724          * to finish all blkdev_puts so device is really
725          * free when umount is done.
726          */
727         rcu_barrier();
728         return ret;
729 }
730
731 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
732                                 fmode_t flags, void *holder)
733 {
734         struct request_queue *q;
735         struct block_device *bdev;
736         struct list_head *head = &fs_devices->devices;
737         struct btrfs_device *device;
738         struct block_device *latest_bdev = NULL;
739         struct buffer_head *bh;
740         struct btrfs_super_block *disk_super;
741         u64 latest_devid = 0;
742         u64 latest_transid = 0;
743         u64 devid;
744         int seeding = 1;
745         int ret = 0;
746
747         flags |= FMODE_EXCL;
748
749         list_for_each_entry(device, head, dev_list) {
750                 if (device->bdev)
751                         continue;
752                 if (!device->name)
753                         continue;
754
755                 /* Just open everything we can; ignore failures here */
756                 if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
757                                             &bdev, &bh))
758                         continue;
759
760                 disk_super = (struct btrfs_super_block *)bh->b_data;
761                 devid = btrfs_stack_device_id(&disk_super->dev_item);
762                 if (devid != device->devid)
763                         goto error_brelse;
764
765                 if (memcmp(device->uuid, disk_super->dev_item.uuid,
766                            BTRFS_UUID_SIZE))
767                         goto error_brelse;
768
769                 device->generation = btrfs_super_generation(disk_super);
770                 if (!latest_transid || device->generation > latest_transid) {
771                         latest_devid = devid;
772                         latest_transid = device->generation;
773                         latest_bdev = bdev;
774                 }
775
776                 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
777                         device->writeable = 0;
778                 } else {
779                         device->writeable = !bdev_read_only(bdev);
780                         seeding = 0;
781                 }
782
783                 q = bdev_get_queue(bdev);
784                 if (blk_queue_discard(q)) {
785                         device->can_discard = 1;
786                         fs_devices->num_can_discard++;
787                 }
788
789                 device->bdev = bdev;
790                 device->in_fs_metadata = 0;
791                 device->mode = flags;
792
793                 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
794                         fs_devices->rotating = 1;
795
796                 fs_devices->open_devices++;
797                 if (device->writeable && !device->is_tgtdev_for_dev_replace) {
798                         fs_devices->rw_devices++;
799                         list_add(&device->dev_alloc_list,
800                                  &fs_devices->alloc_list);
801                 }
802                 brelse(bh);
803                 continue;
804
805 error_brelse:
806                 brelse(bh);
807                 blkdev_put(bdev, flags);
808                 continue;
809         }
810         if (fs_devices->open_devices == 0) {
811                 ret = -EINVAL;
812                 goto out;
813         }
814         fs_devices->seeding = seeding;
815         fs_devices->opened = 1;
816         fs_devices->latest_bdev = latest_bdev;
817         fs_devices->latest_devid = latest_devid;
818         fs_devices->latest_trans = latest_transid;
819         fs_devices->total_rw_bytes = 0;
820 out:
821         return ret;
822 }
823
824 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
825                        fmode_t flags, void *holder)
826 {
827         int ret;
828
829         mutex_lock(&uuid_mutex);
830         if (fs_devices->opened) {
831                 fs_devices->opened++;
832                 ret = 0;
833         } else {
834                 ret = __btrfs_open_devices(fs_devices, flags, holder);
835         }
836         mutex_unlock(&uuid_mutex);
837         return ret;
838 }
839
840 /*
841  * Look for a btrfs signature on a device. This may be called out of the mount path
842  * and we are not allowed to call set_blocksize during the scan. The superblock
843  * is read via pagecache
844  */
845 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
846                           struct btrfs_fs_devices **fs_devices_ret)
847 {
848         struct btrfs_super_block *disk_super;
849         struct block_device *bdev;
850         struct page *page;
851         void *p;
852         int ret = -EINVAL;
853         u64 devid;
854         u64 transid;
855         u64 total_devices;
856         u64 bytenr;
857         pgoff_t index;
858
859         /*
860          * we would like to check all the supers, but that would make
861          * a btrfs mount succeed after a mkfs from a different FS.
862          * So, we need to add a special mount option to scan for
863          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
864          */
865         bytenr = btrfs_sb_offset(0);
866         flags |= FMODE_EXCL;
867         mutex_lock(&uuid_mutex);
868
869         bdev = blkdev_get_by_path(path, flags, holder);
870
871         if (IS_ERR(bdev)) {
872                 ret = PTR_ERR(bdev);
873                 goto error;
874         }
875
876         /* make sure our super fits in the device */
877         if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
878                 goto error_bdev_put;
879
880         /* make sure our super fits in the page */
881         if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
882                 goto error_bdev_put;
883
884         /* make sure our super doesn't straddle pages on disk */
885         index = bytenr >> PAGE_CACHE_SHIFT;
886         if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
887                 goto error_bdev_put;
888
889         /* pull in the page with our super */
890         page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
891                                    index, GFP_NOFS);
892
893         if (IS_ERR_OR_NULL(page))
894                 goto error_bdev_put;
895
896         p = kmap(page);
897
898         /* align our pointer to the offset of the super block */
899         disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
900
901         if (btrfs_super_bytenr(disk_super) != bytenr ||
902             btrfs_super_magic(disk_super) != BTRFS_MAGIC)
903                 goto error_unmap;
904
905         devid = btrfs_stack_device_id(&disk_super->dev_item);
906         transid = btrfs_super_generation(disk_super);
907         total_devices = btrfs_super_num_devices(disk_super);
908
909         if (disk_super->label[0]) {
910                 if (disk_super->label[BTRFS_LABEL_SIZE - 1])
911                         disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
912                 printk(KERN_INFO "device label %s ", disk_super->label);
913         } else {
914                 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
915         }
916
917         printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
918
919         ret = device_list_add(path, disk_super, devid, fs_devices_ret);
920         if (!ret && fs_devices_ret)
921                 (*fs_devices_ret)->total_devices = total_devices;
922
923 error_unmap:
924         kunmap(page);
925         page_cache_release(page);
926
927 error_bdev_put:
928         blkdev_put(bdev, flags);
929 error:
930         mutex_unlock(&uuid_mutex);
931         return ret;
932 }
933
934 /* helper to account the used device space in the range */
935 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
936                                    u64 end, u64 *length)
937 {
938         struct btrfs_key key;
939         struct btrfs_root *root = device->dev_root;
940         struct btrfs_dev_extent *dev_extent;
941         struct btrfs_path *path;
942         u64 extent_end;
943         int ret;
944         int slot;
945         struct extent_buffer *l;
946
947         *length = 0;
948
949         if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
950                 return 0;
951
952         path = btrfs_alloc_path();
953         if (!path)
954                 return -ENOMEM;
955         path->reada = 2;
956
957         key.objectid = device->devid;
958         key.offset = start;
959         key.type = BTRFS_DEV_EXTENT_KEY;
960
961         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
962         if (ret < 0)
963                 goto out;
964         if (ret > 0) {
965                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
966                 if (ret < 0)
967                         goto out;
968         }
969
970         while (1) {
971                 l = path->nodes[0];
972                 slot = path->slots[0];
973                 if (slot >= btrfs_header_nritems(l)) {
974                         ret = btrfs_next_leaf(root, path);
975                         if (ret == 0)
976                                 continue;
977                         if (ret < 0)
978                                 goto out;
979
980                         break;
981                 }
982                 btrfs_item_key_to_cpu(l, &key, slot);
983
984                 if (key.objectid < device->devid)
985                         goto next;
986
987                 if (key.objectid > device->devid)
988                         break;
989
990                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
991                         goto next;
992
993                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
994                 extent_end = key.offset + btrfs_dev_extent_length(l,
995                                                                   dev_extent);
996                 if (key.offset <= start && extent_end > end) {
997                         *length = end - start + 1;
998                         break;
999                 } else if (key.offset <= start && extent_end > start)
1000                         *length += extent_end - start;
1001                 else if (key.offset > start && extent_end <= end)
1002                         *length += extent_end - key.offset;
1003                 else if (key.offset > start && key.offset <= end) {
1004                         *length += end - key.offset + 1;
1005                         break;
1006                 } else if (key.offset > end)
1007                         break;
1008
1009 next:
1010                 path->slots[0]++;
1011         }
1012         ret = 0;
1013 out:
1014         btrfs_free_path(path);
1015         return ret;
1016 }
1017
1018 static int contains_pending_extent(struct btrfs_trans_handle *trans,
1019                                    struct btrfs_device *device,
1020                                    u64 *start, u64 len)
1021 {
1022         struct extent_map *em;
1023         int ret = 0;
1024
1025         list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
1026                 struct map_lookup *map;
1027                 int i;
1028
1029                 map = (struct map_lookup *)em->bdev;
1030                 for (i = 0; i < map->num_stripes; i++) {
1031                         if (map->stripes[i].dev != device)
1032                                 continue;
1033                         if (map->stripes[i].physical >= *start + len ||
1034                             map->stripes[i].physical + em->orig_block_len <=
1035                             *start)
1036                                 continue;
1037                         *start = map->stripes[i].physical +
1038                                 em->orig_block_len;
1039                         ret = 1;
1040                 }
1041         }
1042
1043         return ret;
1044 }
1045
1046
1047 /*
1048  * find_free_dev_extent - find free space in the specified device
1049  * @device:     the device which we search the free space in
1050  * @num_bytes:  the size of the free space that we need
1051  * @start:      store the start of the free space.
1052  * @len:        the size of the free space. that we find, or the size of the max
1053  *              free space if we don't find suitable free space
1054  *
1055  * this uses a pretty simple search, the expectation is that it is
1056  * called very infrequently and that a given device has a small number
1057  * of extents
1058  *
1059  * @start is used to store the start of the free space if we find. But if we
1060  * don't find suitable free space, it will be used to store the start position
1061  * of the max free space.
1062  *
1063  * @len is used to store the size of the free space that we find.
1064  * But if we don't find suitable free space, it is used to store the size of
1065  * the max free space.
1066  */
1067 int find_free_dev_extent(struct btrfs_trans_handle *trans,
1068                          struct btrfs_device *device, u64 num_bytes,
1069                          u64 *start, u64 *len)
1070 {
1071         struct btrfs_key key;
1072         struct btrfs_root *root = device->dev_root;
1073         struct btrfs_dev_extent *dev_extent;
1074         struct btrfs_path *path;
1075         u64 hole_size;
1076         u64 max_hole_start;
1077         u64 max_hole_size;
1078         u64 extent_end;
1079         u64 search_start;
1080         u64 search_end = device->total_bytes;
1081         int ret;
1082         int slot;
1083         struct extent_buffer *l;
1084
1085         /* FIXME use last free of some kind */
1086
1087         /* we don't want to overwrite the superblock on the drive,
1088          * so we make sure to start at an offset of at least 1MB
1089          */
1090         search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
1091
1092         path = btrfs_alloc_path();
1093         if (!path)
1094                 return -ENOMEM;
1095 again:
1096         max_hole_start = search_start;
1097         max_hole_size = 0;
1098         hole_size = 0;
1099
1100         if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
1101                 ret = -ENOSPC;
1102                 goto out;
1103         }
1104
1105         path->reada = 2;
1106         path->search_commit_root = 1;
1107         path->skip_locking = 1;
1108
1109         key.objectid = device->devid;
1110         key.offset = search_start;
1111         key.type = BTRFS_DEV_EXTENT_KEY;
1112
1113         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1114         if (ret < 0)
1115                 goto out;
1116         if (ret > 0) {
1117                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1118                 if (ret < 0)
1119                         goto out;
1120         }
1121
1122         while (1) {
1123                 l = path->nodes[0];
1124                 slot = path->slots[0];
1125                 if (slot >= btrfs_header_nritems(l)) {
1126                         ret = btrfs_next_leaf(root, path);
1127                         if (ret == 0)
1128                                 continue;
1129                         if (ret < 0)
1130                                 goto out;
1131
1132                         break;
1133                 }
1134                 btrfs_item_key_to_cpu(l, &key, slot);
1135
1136                 if (key.objectid < device->devid)
1137                         goto next;
1138
1139                 if (key.objectid > device->devid)
1140                         break;
1141
1142                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
1143                         goto next;
1144
1145                 if (key.offset > search_start) {
1146                         hole_size = key.offset - search_start;
1147
1148                         /*
1149                          * Have to check before we set max_hole_start, otherwise
1150                          * we could end up sending back this offset anyway.
1151                          */
1152                         if (contains_pending_extent(trans, device,
1153                                                     &search_start,
1154                                                     hole_size))
1155                                 hole_size = 0;
1156
1157                         if (hole_size > max_hole_size) {
1158                                 max_hole_start = search_start;
1159                                 max_hole_size = hole_size;
1160                         }
1161
1162                         /*
1163                          * If this free space is greater than which we need,
1164                          * it must be the max free space that we have found
1165                          * until now, so max_hole_start must point to the start
1166                          * of this free space and the length of this free space
1167                          * is stored in max_hole_size. Thus, we return
1168                          * max_hole_start and max_hole_size and go back to the
1169                          * caller.
1170                          */
1171                         if (hole_size >= num_bytes) {
1172                                 ret = 0;
1173                                 goto out;
1174                         }
1175                 }
1176
1177                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1178                 extent_end = key.offset + btrfs_dev_extent_length(l,
1179                                                                   dev_extent);
1180                 if (extent_end > search_start)
1181                         search_start = extent_end;
1182 next:
1183                 path->slots[0]++;
1184                 cond_resched();
1185         }
1186
1187         /*
1188          * At this point, search_start should be the end of
1189          * allocated dev extents, and when shrinking the device,
1190          * search_end may be smaller than search_start.
1191          */
1192         if (search_end > search_start)
1193                 hole_size = search_end - search_start;
1194
1195         if (hole_size > max_hole_size) {
1196                 max_hole_start = search_start;
1197                 max_hole_size = hole_size;
1198         }
1199
1200         if (contains_pending_extent(trans, device, &search_start, hole_size)) {
1201                 btrfs_release_path(path);
1202                 goto again;
1203         }
1204
1205         /* See above. */
1206         if (hole_size < num_bytes)
1207                 ret = -ENOSPC;
1208         else
1209                 ret = 0;
1210
1211 out:
1212         btrfs_free_path(path);
1213         *start = max_hole_start;
1214         if (len)
1215                 *len = max_hole_size;
1216         return ret;
1217 }
1218
1219 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1220                           struct btrfs_device *device,
1221                           u64 start)
1222 {
1223         int ret;
1224         struct btrfs_path *path;
1225         struct btrfs_root *root = device->dev_root;
1226         struct btrfs_key key;
1227         struct btrfs_key found_key;
1228         struct extent_buffer *leaf = NULL;
1229         struct btrfs_dev_extent *extent = NULL;
1230
1231         path = btrfs_alloc_path();
1232         if (!path)
1233                 return -ENOMEM;
1234
1235         key.objectid = device->devid;
1236         key.offset = start;
1237         key.type = BTRFS_DEV_EXTENT_KEY;
1238 again:
1239         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1240         if (ret > 0) {
1241                 ret = btrfs_previous_item(root, path, key.objectid,
1242                                           BTRFS_DEV_EXTENT_KEY);
1243                 if (ret)
1244                         goto out;
1245                 leaf = path->nodes[0];
1246                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1247                 extent = btrfs_item_ptr(leaf, path->slots[0],
1248                                         struct btrfs_dev_extent);
1249                 BUG_ON(found_key.offset > start || found_key.offset +
1250                        btrfs_dev_extent_length(leaf, extent) < start);
1251                 key = found_key;
1252                 btrfs_release_path(path);
1253                 goto again;
1254         } else if (ret == 0) {
1255                 leaf = path->nodes[0];
1256                 extent = btrfs_item_ptr(leaf, path->slots[0],
1257                                         struct btrfs_dev_extent);
1258         } else {
1259                 btrfs_error(root->fs_info, ret, "Slot search failed");
1260                 goto out;
1261         }
1262
1263         if (device->bytes_used > 0) {
1264                 u64 len = btrfs_dev_extent_length(leaf, extent);
1265                 device->bytes_used -= len;
1266                 spin_lock(&root->fs_info->free_chunk_lock);
1267                 root->fs_info->free_chunk_space += len;
1268                 spin_unlock(&root->fs_info->free_chunk_lock);
1269         }
1270         ret = btrfs_del_item(trans, root, path);
1271         if (ret) {
1272                 btrfs_error(root->fs_info, ret,
1273                             "Failed to remove dev extent item");
1274         }
1275 out:
1276         btrfs_free_path(path);
1277         return ret;
1278 }
1279
1280 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1281                                   struct btrfs_device *device,
1282                                   u64 chunk_tree, u64 chunk_objectid,
1283                                   u64 chunk_offset, u64 start, u64 num_bytes)
1284 {
1285         int ret;
1286         struct btrfs_path *path;
1287         struct btrfs_root *root = device->dev_root;
1288         struct btrfs_dev_extent *extent;
1289         struct extent_buffer *leaf;
1290         struct btrfs_key key;
1291
1292         WARN_ON(!device->in_fs_metadata);
1293         WARN_ON(device->is_tgtdev_for_dev_replace);
1294         path = btrfs_alloc_path();
1295         if (!path)
1296                 return -ENOMEM;
1297
1298         key.objectid = device->devid;
1299         key.offset = start;
1300         key.type = BTRFS_DEV_EXTENT_KEY;
1301         ret = btrfs_insert_empty_item(trans, root, path, &key,
1302                                       sizeof(*extent));
1303         if (ret)
1304                 goto out;
1305
1306         leaf = path->nodes[0];
1307         extent = btrfs_item_ptr(leaf, path->slots[0],
1308                                 struct btrfs_dev_extent);
1309         btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1310         btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1311         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1312
1313         write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1314                     btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
1315
1316         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1317         btrfs_mark_buffer_dirty(leaf);
1318 out:
1319         btrfs_free_path(path);
1320         return ret;
1321 }
1322
1323 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1324 {
1325         struct extent_map_tree *em_tree;
1326         struct extent_map *em;
1327         struct rb_node *n;
1328         u64 ret = 0;
1329
1330         em_tree = &fs_info->mapping_tree.map_tree;
1331         read_lock(&em_tree->lock);
1332         n = rb_last(&em_tree->map);
1333         if (n) {
1334                 em = rb_entry(n, struct extent_map, rb_node);
1335                 ret = em->start + em->len;
1336         }
1337         read_unlock(&em_tree->lock);
1338
1339         return ret;
1340 }
1341
1342 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1343                                     u64 *devid_ret)
1344 {
1345         int ret;
1346         struct btrfs_key key;
1347         struct btrfs_key found_key;
1348         struct btrfs_path *path;
1349
1350         path = btrfs_alloc_path();
1351         if (!path)
1352                 return -ENOMEM;
1353
1354         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1355         key.type = BTRFS_DEV_ITEM_KEY;
1356         key.offset = (u64)-1;
1357
1358         ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1359         if (ret < 0)
1360                 goto error;
1361
1362         BUG_ON(ret == 0); /* Corruption */
1363
1364         ret = btrfs_previous_item(fs_info->chunk_root, path,
1365                                   BTRFS_DEV_ITEMS_OBJECTID,
1366                                   BTRFS_DEV_ITEM_KEY);
1367         if (ret) {
1368                 *devid_ret = 1;
1369         } else {
1370                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1371                                       path->slots[0]);
1372                 *devid_ret = found_key.offset + 1;
1373         }
1374         ret = 0;
1375 error:
1376         btrfs_free_path(path);
1377         return ret;
1378 }
1379
1380 /*
1381  * the device information is stored in the chunk root
1382  * the btrfs_device struct should be fully filled in
1383  */
1384 static int btrfs_add_device(struct btrfs_trans_handle *trans,
1385                             struct btrfs_root *root,
1386                             struct btrfs_device *device)
1387 {
1388         int ret;
1389         struct btrfs_path *path;
1390         struct btrfs_dev_item *dev_item;
1391         struct extent_buffer *leaf;
1392         struct btrfs_key key;
1393         unsigned long ptr;
1394
1395         root = root->fs_info->chunk_root;
1396
1397         path = btrfs_alloc_path();
1398         if (!path)
1399                 return -ENOMEM;
1400
1401         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1402         key.type = BTRFS_DEV_ITEM_KEY;
1403         key.offset = device->devid;
1404
1405         ret = btrfs_insert_empty_item(trans, root, path, &key,
1406                                       sizeof(*dev_item));
1407         if (ret)
1408                 goto out;
1409
1410         leaf = path->nodes[0];
1411         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1412
1413         btrfs_set_device_id(leaf, dev_item, device->devid);
1414         btrfs_set_device_generation(leaf, dev_item, 0);
1415         btrfs_set_device_type(leaf, dev_item, device->type);
1416         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1417         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1418         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1419         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1420         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1421         btrfs_set_device_group(leaf, dev_item, 0);
1422         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1423         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1424         btrfs_set_device_start_offset(leaf, dev_item, 0);
1425
1426         ptr = btrfs_device_uuid(dev_item);
1427         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1428         ptr = btrfs_device_fsid(dev_item);
1429         write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1430         btrfs_mark_buffer_dirty(leaf);
1431
1432         ret = 0;
1433 out:
1434         btrfs_free_path(path);
1435         return ret;
1436 }
1437
1438 static int btrfs_rm_dev_item(struct btrfs_root *root,
1439                              struct btrfs_device *device)
1440 {
1441         int ret;
1442         struct btrfs_path *path;
1443         struct btrfs_key key;
1444         struct btrfs_trans_handle *trans;
1445
1446         root = root->fs_info->chunk_root;
1447
1448         path = btrfs_alloc_path();
1449         if (!path)
1450                 return -ENOMEM;
1451
1452         trans = btrfs_start_transaction(root, 0);
1453         if (IS_ERR(trans)) {
1454                 btrfs_free_path(path);
1455                 return PTR_ERR(trans);
1456         }
1457         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1458         key.type = BTRFS_DEV_ITEM_KEY;
1459         key.offset = device->devid;
1460         lock_chunks(root);
1461
1462         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1463         if (ret < 0)
1464                 goto out;
1465
1466         if (ret > 0) {
1467                 ret = -ENOENT;
1468                 goto out;
1469         }
1470
1471         ret = btrfs_del_item(trans, root, path);
1472         if (ret)
1473                 goto out;
1474 out:
1475         btrfs_free_path(path);
1476         unlock_chunks(root);
1477         btrfs_commit_transaction(trans, root);
1478         return ret;
1479 }
1480
1481 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1482 {
1483         struct btrfs_device *device;
1484         struct btrfs_device *next_device;
1485         struct block_device *bdev;
1486         struct buffer_head *bh = NULL;
1487         struct btrfs_super_block *disk_super;
1488         struct btrfs_fs_devices *cur_devices;
1489         u64 all_avail;
1490         u64 devid;
1491         u64 num_devices;
1492         u8 *dev_uuid;
1493         unsigned seq;
1494         int ret = 0;
1495         bool clear_super = false;
1496
1497         mutex_lock(&uuid_mutex);
1498
1499         do {
1500                 seq = read_seqbegin(&root->fs_info->profiles_lock);
1501
1502                 all_avail = root->fs_info->avail_data_alloc_bits |
1503                             root->fs_info->avail_system_alloc_bits |
1504                             root->fs_info->avail_metadata_alloc_bits;
1505         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
1506
1507         num_devices = root->fs_info->fs_devices->num_devices;
1508         btrfs_dev_replace_lock(&root->fs_info->dev_replace);
1509         if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
1510                 WARN_ON(num_devices < 1);
1511                 num_devices--;
1512         }
1513         btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
1514
1515         if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
1516                 ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
1517                 goto out;
1518         }
1519
1520         if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
1521                 ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
1522                 goto out;
1523         }
1524
1525         if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
1526             root->fs_info->fs_devices->rw_devices <= 2) {
1527                 ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
1528                 goto out;
1529         }
1530         if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
1531             root->fs_info->fs_devices->rw_devices <= 3) {
1532                 ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
1533                 goto out;
1534         }
1535
1536         if (strcmp(device_path, "missing") == 0) {
1537                 struct list_head *devices;
1538                 struct btrfs_device *tmp;
1539
1540                 device = NULL;
1541                 devices = &root->fs_info->fs_devices->devices;
1542                 /*
1543                  * It is safe to read the devices since the volume_mutex
1544                  * is held.
1545                  */
1546                 list_for_each_entry(tmp, devices, dev_list) {
1547                         if (tmp->in_fs_metadata &&
1548                             !tmp->is_tgtdev_for_dev_replace &&
1549                             !tmp->bdev) {
1550                                 device = tmp;
1551                                 break;
1552                         }
1553                 }
1554                 bdev = NULL;
1555                 bh = NULL;
1556                 disk_super = NULL;
1557                 if (!device) {
1558                         ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
1559                         goto out;
1560                 }
1561         } else {
1562                 ret = btrfs_get_bdev_and_sb(device_path,
1563                                             FMODE_WRITE | FMODE_EXCL,
1564                                             root->fs_info->bdev_holder, 0,
1565                                             &bdev, &bh);
1566                 if (ret)
1567                         goto out;
1568                 disk_super = (struct btrfs_super_block *)bh->b_data;
1569                 devid = btrfs_stack_device_id(&disk_super->dev_item);
1570                 dev_uuid = disk_super->dev_item.uuid;
1571                 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1572                                            disk_super->fsid);
1573                 if (!device) {
1574                         ret = -ENOENT;
1575                         goto error_brelse;
1576                 }
1577         }
1578
1579         if (device->is_tgtdev_for_dev_replace) {
1580                 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
1581                 goto error_brelse;
1582         }
1583
1584         if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1585                 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
1586                 goto error_brelse;
1587         }
1588
1589         if (device->writeable) {
1590                 lock_chunks(root);
1591                 list_del_init(&device->dev_alloc_list);
1592                 unlock_chunks(root);
1593                 root->fs_info->fs_devices->rw_devices--;
1594                 clear_super = true;
1595         }
1596
1597         mutex_unlock(&uuid_mutex);
1598         ret = btrfs_shrink_device(device, 0);
1599         mutex_lock(&uuid_mutex);
1600         if (ret)
1601                 goto error_undo;
1602
1603         /*
1604          * TODO: the superblock still includes this device in its num_devices
1605          * counter although write_all_supers() is not locked out. This
1606          * could give a filesystem state which requires a degraded mount.
1607          */
1608         ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1609         if (ret)
1610                 goto error_undo;
1611
1612         spin_lock(&root->fs_info->free_chunk_lock);
1613         root->fs_info->free_chunk_space = device->total_bytes -
1614                 device->bytes_used;
1615         spin_unlock(&root->fs_info->free_chunk_lock);
1616
1617         device->in_fs_metadata = 0;
1618         btrfs_scrub_cancel_dev(root->fs_info, device);
1619
1620         /*
1621          * the device list mutex makes sure that we don't change
1622          * the device list while someone else is writing out all
1623          * the device supers. Whoever is writing all supers, should
1624          * lock the device list mutex before getting the number of
1625          * devices in the super block (super_copy). Conversely,
1626          * whoever updates the number of devices in the super block
1627          * (super_copy) should hold the device list mutex.
1628          */
1629
1630         cur_devices = device->fs_devices;
1631         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1632         list_del_rcu(&device->dev_list);
1633
1634         device->fs_devices->num_devices--;
1635         device->fs_devices->total_devices--;
1636
1637         if (device->missing)
1638                 root->fs_info->fs_devices->missing_devices--;
1639
1640         next_device = list_entry(root->fs_info->fs_devices->devices.next,
1641                                  struct btrfs_device, dev_list);
1642         if (device->bdev == root->fs_info->sb->s_bdev)
1643                 root->fs_info->sb->s_bdev = next_device->bdev;
1644         if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1645                 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1646
1647         if (device->bdev)
1648                 device->fs_devices->open_devices--;
1649
1650         call_rcu(&device->rcu, free_device);
1651
1652         num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1653         btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1654         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1655
1656         if (cur_devices->open_devices == 0) {
1657                 struct btrfs_fs_devices *fs_devices;
1658                 fs_devices = root->fs_info->fs_devices;
1659                 while (fs_devices) {
1660                         if (fs_devices->seed == cur_devices)
1661                                 break;
1662                         fs_devices = fs_devices->seed;
1663                 }
1664                 fs_devices->seed = cur_devices->seed;
1665                 cur_devices->seed = NULL;
1666                 lock_chunks(root);
1667                 __btrfs_close_devices(cur_devices);
1668                 unlock_chunks(root);
1669                 free_fs_devices(cur_devices);
1670         }
1671
1672         root->fs_info->num_tolerated_disk_barrier_failures =
1673                 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
1674
1675         /*
1676          * at this point, the device is zero sized.  We want to
1677          * remove it from the devices list and zero out the old super
1678          */
1679         if (clear_super && disk_super) {
1680                 /* make sure this device isn't detected as part of
1681                  * the FS anymore
1682                  */
1683                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1684                 set_buffer_dirty(bh);
1685                 sync_dirty_buffer(bh);
1686         }
1687
1688         ret = 0;
1689
1690         /* Notify udev that device has changed */
1691         if (bdev)
1692                 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
1693
1694 error_brelse:
1695         brelse(bh);
1696         if (bdev)
1697                 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1698 out:
1699         mutex_unlock(&uuid_mutex);
1700         return ret;
1701 error_undo:
1702         if (device->writeable) {
1703                 lock_chunks(root);
1704                 list_add(&device->dev_alloc_list,
1705                          &root->fs_info->fs_devices->alloc_list);
1706                 unlock_chunks(root);
1707                 root->fs_info->fs_devices->rw_devices++;
1708         }
1709         goto error_brelse;
1710 }
1711
1712 void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1713                                  struct btrfs_device *srcdev)
1714 {
1715         WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
1716         list_del_rcu(&srcdev->dev_list);
1717         list_del_rcu(&srcdev->dev_alloc_list);
1718         fs_info->fs_devices->num_devices--;
1719         if (srcdev->missing) {
1720                 fs_info->fs_devices->missing_devices--;
1721                 fs_info->fs_devices->rw_devices++;
1722         }
1723         if (srcdev->can_discard)
1724                 fs_info->fs_devices->num_can_discard--;
1725         if (srcdev->bdev)
1726                 fs_info->fs_devices->open_devices--;
1727
1728         call_rcu(&srcdev->rcu, free_device);
1729 }
1730
1731 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
1732                                       struct btrfs_device *tgtdev)
1733 {
1734         struct btrfs_device *next_device;
1735
1736         WARN_ON(!tgtdev);
1737         mutex_lock(&fs_info->fs_devices->device_list_mutex);
1738         if (tgtdev->bdev) {
1739                 btrfs_scratch_superblock(tgtdev);
1740                 fs_info->fs_devices->open_devices--;
1741         }
1742         fs_info->fs_devices->num_devices--;
1743         if (tgtdev->can_discard)
1744                 fs_info->fs_devices->num_can_discard++;
1745
1746         next_device = list_entry(fs_info->fs_devices->devices.next,
1747                                  struct btrfs_device, dev_list);
1748         if (tgtdev->bdev == fs_info->sb->s_bdev)
1749                 fs_info->sb->s_bdev = next_device->bdev;
1750         if (tgtdev->bdev == fs_info->fs_devices->latest_bdev)
1751                 fs_info->fs_devices->latest_bdev = next_device->bdev;
1752         list_del_rcu(&tgtdev->dev_list);
1753
1754         call_rcu(&tgtdev->rcu, free_device);
1755
1756         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1757 }
1758
1759 static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
1760                                      struct btrfs_device **device)
1761 {
1762         int ret = 0;
1763         struct btrfs_super_block *disk_super;
1764         u64 devid;
1765         u8 *dev_uuid;
1766         struct block_device *bdev;
1767         struct buffer_head *bh;
1768
1769         *device = NULL;
1770         ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
1771                                     root->fs_info->bdev_holder, 0, &bdev, &bh);
1772         if (ret)
1773                 return ret;
1774         disk_super = (struct btrfs_super_block *)bh->b_data;
1775         devid = btrfs_stack_device_id(&disk_super->dev_item);
1776         dev_uuid = disk_super->dev_item.uuid;
1777         *device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1778                                     disk_super->fsid);
1779         brelse(bh);
1780         if (!*device)
1781                 ret = -ENOENT;
1782         blkdev_put(bdev, FMODE_READ);
1783         return ret;
1784 }
1785
1786 int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
1787                                          char *device_path,
1788                                          struct btrfs_device **device)
1789 {
1790         *device = NULL;
1791         if (strcmp(device_path, "missing") == 0) {
1792                 struct list_head *devices;
1793                 struct btrfs_device *tmp;
1794
1795                 devices = &root->fs_info->fs_devices->devices;
1796                 /*
1797                  * It is safe to read the devices since the volume_mutex
1798                  * is held by the caller.
1799                  */
1800                 list_for_each_entry(tmp, devices, dev_list) {
1801                         if (tmp->in_fs_metadata && !tmp->bdev) {
1802                                 *device = tmp;
1803                                 break;
1804                         }
1805                 }
1806
1807                 if (!*device) {
1808                         pr_err("btrfs: no missing device found\n");
1809                         return -ENOENT;
1810                 }
1811
1812                 return 0;
1813         } else {
1814                 return btrfs_find_device_by_path(root, device_path, device);
1815         }
1816 }
1817
1818 /*
1819  * does all the dirty work required for changing file system's UUID.
1820  */
1821 static int btrfs_prepare_sprout(struct btrfs_root *root)
1822 {
1823         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1824         struct btrfs_fs_devices *old_devices;
1825         struct btrfs_fs_devices *seed_devices;
1826         struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1827         struct btrfs_device *device;
1828         u64 super_flags;
1829
1830         BUG_ON(!mutex_is_locked(&uuid_mutex));
1831         if (!fs_devices->seeding)
1832                 return -EINVAL;
1833
1834         seed_devices = __alloc_fs_devices();
1835         if (IS_ERR(seed_devices))
1836                 return PTR_ERR(seed_devices);
1837
1838         old_devices = clone_fs_devices(fs_devices);
1839         if (IS_ERR(old_devices)) {
1840                 kfree(seed_devices);
1841                 return PTR_ERR(old_devices);
1842         }
1843
1844         list_add(&old_devices->list, &fs_uuids);
1845
1846         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1847         seed_devices->opened = 1;
1848         INIT_LIST_HEAD(&seed_devices->devices);
1849         INIT_LIST_HEAD(&seed_devices->alloc_list);
1850         mutex_init(&seed_devices->device_list_mutex);
1851
1852         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1853         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1854                               synchronize_rcu);
1855
1856         list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1857         list_for_each_entry(device, &seed_devices->devices, dev_list) {
1858                 device->fs_devices = seed_devices;
1859         }
1860
1861         fs_devices->seeding = 0;
1862         fs_devices->num_devices = 0;
1863         fs_devices->open_devices = 0;
1864         fs_devices->total_devices = 0;
1865         fs_devices->seed = seed_devices;
1866
1867         generate_random_uuid(fs_devices->fsid);
1868         memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1869         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1870         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1871
1872         super_flags = btrfs_super_flags(disk_super) &
1873                       ~BTRFS_SUPER_FLAG_SEEDING;
1874         btrfs_set_super_flags(disk_super, super_flags);
1875
1876         return 0;
1877 }
1878
1879 /*
1880  * strore the expected generation for seed devices in device items.
1881  */
1882 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1883                                struct btrfs_root *root)
1884 {
1885         struct btrfs_path *path;
1886         struct extent_buffer *leaf;
1887         struct btrfs_dev_item *dev_item;
1888         struct btrfs_device *device;
1889         struct btrfs_key key;
1890         u8 fs_uuid[BTRFS_UUID_SIZE];
1891         u8 dev_uuid[BTRFS_UUID_SIZE];
1892         u64 devid;
1893         int ret;
1894
1895         path = btrfs_alloc_path();
1896         if (!path)
1897                 return -ENOMEM;
1898
1899         root = root->fs_info->chunk_root;
1900         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1901         key.offset = 0;
1902         key.type = BTRFS_DEV_ITEM_KEY;
1903
1904         while (1) {
1905                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1906                 if (ret < 0)
1907                         goto error;
1908
1909                 leaf = path->nodes[0];
1910 next_slot:
1911                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1912                         ret = btrfs_next_leaf(root, path);
1913                         if (ret > 0)
1914                                 break;
1915                         if (ret < 0)
1916                                 goto error;
1917                         leaf = path->nodes[0];
1918                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1919                         btrfs_release_path(path);
1920                         continue;
1921                 }
1922
1923                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1924                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1925                     key.type != BTRFS_DEV_ITEM_KEY)
1926                         break;
1927
1928                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1929                                           struct btrfs_dev_item);
1930                 devid = btrfs_device_id(leaf, dev_item);
1931                 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
1932                                    BTRFS_UUID_SIZE);
1933                 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
1934                                    BTRFS_UUID_SIZE);
1935                 device = btrfs_find_device(root->fs_info, devid, dev_uuid,
1936                                            fs_uuid);
1937                 BUG_ON(!device); /* Logic error */
1938
1939                 if (device->fs_devices->seeding) {
1940                         btrfs_set_device_generation(leaf, dev_item,
1941                                                     device->generation);
1942                         btrfs_mark_buffer_dirty(leaf);
1943                 }
1944
1945                 path->slots[0]++;
1946                 goto next_slot;
1947         }
1948         ret = 0;
1949 error:
1950         btrfs_free_path(path);
1951         return ret;
1952 }
1953
1954 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1955 {
1956         struct request_queue *q;
1957         struct btrfs_trans_handle *trans;
1958         struct btrfs_device *device;
1959         struct block_device *bdev;
1960         struct list_head *devices;
1961         struct super_block *sb = root->fs_info->sb;
1962         struct rcu_string *name;
1963         u64 total_bytes;
1964         int seeding_dev = 0;
1965         int ret = 0;
1966
1967         if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1968                 return -EROFS;
1969
1970         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1971                                   root->fs_info->bdev_holder);
1972         if (IS_ERR(bdev))
1973                 return PTR_ERR(bdev);
1974
1975         if (root->fs_info->fs_devices->seeding) {
1976                 seeding_dev = 1;
1977                 down_write(&sb->s_umount);
1978                 mutex_lock(&uuid_mutex);
1979         }
1980
1981         filemap_write_and_wait(bdev->bd_inode->i_mapping);
1982
1983         devices = &root->fs_info->fs_devices->devices;
1984
1985         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1986         list_for_each_entry(device, devices, dev_list) {
1987                 if (device->bdev == bdev) {
1988                         ret = -EEXIST;
1989                         mutex_unlock(
1990                                 &root->fs_info->fs_devices->device_list_mutex);
1991                         goto error;
1992                 }
1993         }
1994         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1995
1996         device = btrfs_alloc_device(root->fs_info, NULL, NULL);
1997         if (IS_ERR(device)) {
1998                 /* we can safely leave the fs_devices entry around */
1999                 ret = PTR_ERR(device);
2000                 goto error;
2001         }
2002
2003         name = rcu_string_strdup(device_path, GFP_NOFS);
2004         if (!name) {
2005                 kfree(device);
2006                 ret = -ENOMEM;
2007                 goto error;
2008         }
2009         rcu_assign_pointer(device->name, name);
2010
2011         trans = btrfs_start_transaction(root, 0);
2012         if (IS_ERR(trans)) {
2013                 rcu_string_free(device->name);
2014                 kfree(device);
2015                 ret = PTR_ERR(trans);
2016                 goto error;
2017         }
2018
2019         lock_chunks(root);
2020
2021         q = bdev_get_queue(bdev);
2022         if (blk_queue_discard(q))
2023                 device->can_discard = 1;
2024         device->writeable = 1;
2025         device->generation = trans->transid;
2026         device->io_width = root->sectorsize;
2027         device->io_align = root->sectorsize;
2028         device->sector_size = root->sectorsize;
2029         device->total_bytes = i_size_read(bdev->bd_inode);
2030         device->disk_total_bytes = device->total_bytes;
2031         device->dev_root = root->fs_info->dev_root;
2032         device->bdev = bdev;
2033         device->in_fs_metadata = 1;
2034         device->is_tgtdev_for_dev_replace = 0;
2035         device->mode = FMODE_EXCL;
2036         set_blocksize(device->bdev, 4096);
2037
2038         if (seeding_dev) {
2039                 sb->s_flags &= ~MS_RDONLY;
2040                 ret = btrfs_prepare_sprout(root);
2041                 BUG_ON(ret); /* -ENOMEM */
2042         }
2043
2044         device->fs_devices = root->fs_info->fs_devices;
2045
2046         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2047         list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
2048         list_add(&device->dev_alloc_list,
2049                  &root->fs_info->fs_devices->alloc_list);
2050         root->fs_info->fs_devices->num_devices++;
2051         root->fs_info->fs_devices->open_devices++;
2052         root->fs_info->fs_devices->rw_devices++;
2053         root->fs_info->fs_devices->total_devices++;
2054         if (device->can_discard)
2055                 root->fs_info->fs_devices->num_can_discard++;
2056         root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
2057
2058         spin_lock(&root->fs_info->free_chunk_lock);
2059         root->fs_info->free_chunk_space += device->total_bytes;
2060         spin_unlock(&root->fs_info->free_chunk_lock);
2061
2062         if (!blk_queue_nonrot(bdev_get_queue(bdev)))
2063                 root->fs_info->fs_devices->rotating = 1;
2064
2065         total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
2066         btrfs_set_super_total_bytes(root->fs_info->super_copy,
2067                                     total_bytes + device->total_bytes);
2068
2069         total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2070         btrfs_set_super_num_devices(root->fs_info->super_copy,
2071                                     total_bytes + 1);
2072         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2073
2074         if (seeding_dev) {
2075                 ret = init_first_rw_device(trans, root, device);
2076                 if (ret) {
2077                         btrfs_abort_transaction(trans, root, ret);
2078                         goto error_trans;
2079                 }
2080                 ret = btrfs_finish_sprout(trans, root);
2081                 if (ret) {
2082                         btrfs_abort_transaction(trans, root, ret);
2083                         goto error_trans;
2084                 }
2085         } else {
2086                 ret = btrfs_add_device(trans, root, device);
2087                 if (ret) {
2088                         btrfs_abort_transaction(trans, root, ret);
2089                         goto error_trans;
2090                 }
2091         }
2092
2093         /*
2094          * we've got more storage, clear any full flags on the space
2095          * infos
2096          */
2097         btrfs_clear_space_info_full(root->fs_info);
2098
2099         unlock_chunks(root);
2100         root->fs_info->num_tolerated_disk_barrier_failures =
2101                 btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
2102         ret = btrfs_commit_transaction(trans, root);
2103
2104         if (seeding_dev) {
2105                 mutex_unlock(&uuid_mutex);
2106                 up_write(&sb->s_umount);
2107
2108                 if (ret) /* transaction commit */
2109                         return ret;
2110
2111                 ret = btrfs_relocate_sys_chunks(root);
2112                 if (ret < 0)
2113                         btrfs_error(root->fs_info, ret,
2114                                     "Failed to relocate sys chunks after "
2115                                     "device initialization. This can be fixed "
2116                                     "using the \"btrfs balance\" command.");
2117                 trans = btrfs_attach_transaction(root);
2118                 if (IS_ERR(trans)) {
2119                         if (PTR_ERR(trans) == -ENOENT)
2120                                 return 0;
2121                         return PTR_ERR(trans);
2122                 }
2123                 ret = btrfs_commit_transaction(trans, root);
2124         }
2125
2126         return ret;
2127
2128 error_trans:
2129         unlock_chunks(root);
2130         btrfs_end_transaction(trans, root);
2131         rcu_string_free(device->name);
2132         kfree(device);
2133 error:
2134         blkdev_put(bdev, FMODE_EXCL);
2135         if (seeding_dev) {
2136                 mutex_unlock(&uuid_mutex);
2137                 up_write(&sb->s_umount);
2138         }
2139         return ret;
2140 }
2141
2142 int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
2143                                   struct btrfs_device **device_out)
2144 {
2145         struct request_queue *q;
2146         struct btrfs_device *device;
2147         struct block_device *bdev;
2148         struct btrfs_fs_info *fs_info = root->fs_info;
2149         struct list_head *devices;
2150         struct rcu_string *name;
2151         u64 devid = BTRFS_DEV_REPLACE_DEVID;
2152         int ret = 0;
2153
2154         *device_out = NULL;
2155         if (fs_info->fs_devices->seeding)
2156                 return -EINVAL;
2157
2158         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2159                                   fs_info->bdev_holder);
2160         if (IS_ERR(bdev))
2161                 return PTR_ERR(bdev);
2162
2163         filemap_write_and_wait(bdev->bd_inode->i_mapping);
2164
2165         devices = &fs_info->fs_devices->devices;
2166         list_for_each_entry(device, devices, dev_list) {
2167                 if (device->bdev == bdev) {
2168                         ret = -EEXIST;
2169                         goto error;
2170                 }
2171         }
2172
2173         device = btrfs_alloc_device(NULL, &devid, NULL);
2174         if (IS_ERR(device)) {
2175                 ret = PTR_ERR(device);
2176                 goto error;
2177         }
2178
2179         name = rcu_string_strdup(device_path, GFP_NOFS);
2180         if (!name) {
2181                 kfree(device);
2182                 ret = -ENOMEM;
2183                 goto error;
2184         }
2185         rcu_assign_pointer(device->name, name);
2186
2187         q = bdev_get_queue(bdev);
2188         if (blk_queue_discard(q))
2189                 device->can_discard = 1;
2190         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2191         device->writeable = 1;
2192         device->generation = 0;
2193         device->io_width = root->sectorsize;
2194         device->io_align = root->sectorsize;
2195         device->sector_size = root->sectorsize;
2196         device->total_bytes = i_size_read(bdev->bd_inode);
2197         device->disk_total_bytes = device->total_bytes;
2198         device->dev_root = fs_info->dev_root;
2199         device->bdev = bdev;
2200         device->in_fs_metadata = 1;
2201         device->is_tgtdev_for_dev_replace = 1;
2202         device->mode = FMODE_EXCL;
2203         set_blocksize(device->bdev, 4096);
2204         device->fs_devices = fs_info->fs_devices;
2205         list_add(&device->dev_list, &fs_info->fs_devices->devices);
2206         fs_info->fs_devices->num_devices++;
2207         fs_info->fs_devices->open_devices++;
2208         if (device->can_discard)
2209                 fs_info->fs_devices->num_can_discard++;
2210         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2211
2212         *device_out = device;
2213         return ret;
2214
2215 error:
2216         blkdev_put(bdev, FMODE_EXCL);
2217         return ret;
2218 }
2219
2220 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
2221                                               struct btrfs_device *tgtdev)
2222 {
2223         WARN_ON(fs_info->fs_devices->rw_devices == 0);
2224         tgtdev->io_width = fs_info->dev_root->sectorsize;
2225         tgtdev->io_align = fs_info->dev_root->sectorsize;
2226         tgtdev->sector_size = fs_info->dev_root->sectorsize;
2227         tgtdev->dev_root = fs_info->dev_root;
2228         tgtdev->in_fs_metadata = 1;
2229 }
2230
2231 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2232                                         struct btrfs_device *device)
2233 {
2234         int ret;
2235         struct btrfs_path *path;
2236         struct btrfs_root *root;
2237         struct btrfs_dev_item *dev_item;
2238         struct extent_buffer *leaf;
2239         struct btrfs_key key;
2240
2241         root = device->dev_root->fs_info->chunk_root;
2242
2243         path = btrfs_alloc_path();
2244         if (!path)
2245                 return -ENOMEM;
2246
2247         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2248         key.type = BTRFS_DEV_ITEM_KEY;
2249         key.offset = device->devid;
2250
2251         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2252         if (ret < 0)
2253                 goto out;
2254
2255         if (ret > 0) {
2256                 ret = -ENOENT;
2257                 goto out;
2258         }
2259
2260         leaf = path->nodes[0];
2261         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2262
2263         btrfs_set_device_id(leaf, dev_item, device->devid);
2264         btrfs_set_device_type(leaf, dev_item, device->type);
2265         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2266         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2267         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2268         btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
2269         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
2270         btrfs_mark_buffer_dirty(leaf);
2271
2272 out:
2273         btrfs_free_path(path);
2274         return ret;
2275 }
2276
2277 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
2278                       struct btrfs_device *device, u64 new_size)
2279 {
2280         struct btrfs_super_block *super_copy =
2281                 device->dev_root->fs_info->super_copy;
2282         u64 old_total = btrfs_super_total_bytes(super_copy);
2283         u64 diff = new_size - device->total_bytes;
2284
2285         if (!device->writeable)
2286                 return -EACCES;
2287         if (new_size <= device->total_bytes ||
2288             device->is_tgtdev_for_dev_replace)
2289                 return -EINVAL;
2290
2291         btrfs_set_super_total_bytes(super_copy, old_total + diff);
2292         device->fs_devices->total_rw_bytes += diff;
2293
2294         device->total_bytes = new_size;
2295         device->disk_total_bytes = new_size;
2296         btrfs_clear_space_info_full(device->dev_root->fs_info);
2297
2298         return btrfs_update_device(trans, device);
2299 }
2300
2301 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2302                       struct btrfs_device *device, u64 new_size)
2303 {
2304         int ret;
2305         lock_chunks(device->dev_root);
2306         ret = __btrfs_grow_device(trans, device, new_size);
2307         unlock_chunks(device->dev_root);
2308         return ret;
2309 }
2310
2311 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
2312                             struct btrfs_root *root,
2313                             u64 chunk_tree, u64 chunk_objectid,
2314                             u64 chunk_offset)
2315 {
2316         int ret;
2317         struct btrfs_path *path;
2318         struct btrfs_key key;
2319
2320         root = root->fs_info->chunk_root;
2321         path = btrfs_alloc_path();
2322         if (!path)
2323                 return -ENOMEM;
2324
2325         key.objectid = chunk_objectid;
2326         key.offset = chunk_offset;
2327         key.type = BTRFS_CHUNK_ITEM_KEY;
2328
2329         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2330         if (ret < 0)
2331                 goto out;
2332         else if (ret > 0) { /* Logic error or corruption */
2333                 btrfs_error(root->fs_info, -ENOENT,
2334                             "Failed lookup while freeing chunk.");
2335                 ret = -ENOENT;
2336                 goto out;
2337         }
2338
2339         ret = btrfs_del_item(trans, root, path);
2340         if (ret < 0)
2341                 btrfs_error(root->fs_info, ret,
2342                             "Failed to delete chunk item.");
2343 out:
2344         btrfs_free_path(path);
2345         return ret;
2346 }
2347
2348 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
2349                         chunk_offset)
2350 {
2351         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
2352         struct btrfs_disk_key *disk_key;
2353         struct btrfs_chunk *chunk;
2354         u8 *ptr;
2355         int ret = 0;
2356         u32 num_stripes;
2357         u32 array_size;
2358         u32 len = 0;
2359         u32 cur;
2360         struct btrfs_key key;
2361
2362         array_size = btrfs_super_sys_array_size(super_copy);
2363
2364         ptr = super_copy->sys_chunk_array;
2365         cur = 0;
2366
2367         while (cur < array_size) {
2368                 disk_key = (struct btrfs_disk_key *)ptr;
2369                 btrfs_disk_key_to_cpu(&key, disk_key);
2370
2371                 len = sizeof(*disk_key);
2372
2373                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2374                         chunk = (struct btrfs_chunk *)(ptr + len);
2375                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2376                         len += btrfs_chunk_item_size(num_stripes);
2377                 } else {
2378                         ret = -EIO;
2379                         break;
2380                 }
2381                 if (key.objectid == chunk_objectid &&
2382                     key.offset == chunk_offset) {
2383                         memmove(ptr, ptr + len, array_size - (cur + len));
2384                         array_size -= len;
2385                         btrfs_set_super_sys_array_size(super_copy, array_size);
2386                 } else {
2387                         ptr += len;
2388                         cur += len;
2389                 }
2390         }
2391         return ret;
2392 }
2393
2394 static int btrfs_relocate_chunk(struct btrfs_root *root,
2395                          u64 chunk_tree, u64 chunk_objectid,
2396                          u64 chunk_offset)
2397 {
2398         struct extent_map_tree *em_tree;
2399         struct btrfs_root *extent_root;
2400         struct btrfs_trans_handle *trans;
2401         struct extent_map *em;
2402         struct map_lookup *map;
2403         int ret;
2404         int i;
2405
2406         root = root->fs_info->chunk_root;
2407         extent_root = root->fs_info->extent_root;
2408         em_tree = &root->fs_info->mapping_tree.map_tree;
2409
2410         ret = btrfs_can_relocate(extent_root, chunk_offset);
2411         if (ret)
2412                 return -ENOSPC;
2413
2414         /* step one, relocate all the extents inside this chunk */
2415         ret = btrfs_relocate_block_group(extent_root, chunk_offset);
2416         if (ret)
2417                 return ret;
2418
2419         trans = btrfs_start_transaction(root, 0);
2420         if (IS_ERR(trans)) {
2421                 ret = PTR_ERR(trans);
2422                 btrfs_std_error(root->fs_info, ret);
2423                 return ret;
2424         }
2425
2426         lock_chunks(root);
2427
2428         /*
2429          * step two, delete the device extents and the
2430          * chunk tree entries
2431          */
2432         read_lock(&em_tree->lock);
2433         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2434         read_unlock(&em_tree->lock);
2435
2436         BUG_ON(!em || em->start > chunk_offset ||
2437                em->start + em->len < chunk_offset);
2438         map = (struct map_lookup *)em->bdev;
2439
2440         for (i = 0; i < map->num_stripes; i++) {
2441                 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2442                                             map->stripes[i].physical);
2443                 BUG_ON(ret);
2444
2445                 if (map->stripes[i].dev) {
2446                         ret = btrfs_update_device(trans, map->stripes[i].dev);
2447                         BUG_ON(ret);
2448                 }
2449         }
2450         ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2451                                chunk_offset);
2452
2453         BUG_ON(ret);
2454
2455         trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2456
2457         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2458                 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2459                 BUG_ON(ret);
2460         }
2461
2462         ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2463         BUG_ON(ret);
2464
2465         write_lock(&em_tree->lock);
2466         remove_extent_mapping(em_tree, em);
2467         write_unlock(&em_tree->lock);
2468
2469         kfree(map);
2470         em->bdev = NULL;
2471
2472         /* once for the tree */
2473         free_extent_map(em);
2474         /* once for us */
2475         free_extent_map(em);
2476
2477         unlock_chunks(root);
2478         btrfs_end_transaction(trans, root);
2479         return 0;
2480 }
2481
2482 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2483 {
2484         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2485         struct btrfs_path *path;
2486         struct extent_buffer *leaf;
2487         struct btrfs_chunk *chunk;
2488         struct btrfs_key key;
2489         struct btrfs_key found_key;
2490         u64 chunk_tree = chunk_root->root_key.objectid;
2491         u64 chunk_type;
2492         bool retried = false;
2493         int failed = 0;
2494         int ret;
2495
2496         path = btrfs_alloc_path();
2497         if (!path)
2498                 return -ENOMEM;
2499
2500 again:
2501         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2502         key.offset = (u64)-1;
2503         key.type = BTRFS_CHUNK_ITEM_KEY;
2504
2505         while (1) {
2506                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2507                 if (ret < 0)
2508                         goto error;
2509                 BUG_ON(ret == 0); /* Corruption */
2510
2511                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2512                                           key.type);
2513                 if (ret < 0)
2514                         goto error;
2515                 if (ret > 0)
2516                         break;
2517
2518                 leaf = path->nodes[0];
2519                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2520
2521                 chunk = btrfs_item_ptr(leaf, path->slots[0],
2522                                        struct btrfs_chunk);
2523                 chunk_type = btrfs_chunk_type(leaf, chunk);
2524                 btrfs_release_path(path);
2525
2526                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2527                         ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2528                                                    found_key.objectid,
2529                                                    found_key.offset);
2530                         if (ret == -ENOSPC)
2531                                 failed++;
2532                         else if (ret)
2533                                 BUG();
2534                 }
2535
2536                 if (found_key.offset == 0)
2537                         break;
2538                 key.offset = found_key.offset - 1;
2539         }
2540         ret = 0;
2541         if (failed && !retried) {
2542                 failed = 0;
2543                 retried = true;
2544                 goto again;
2545         } else if (failed && retried) {
2546                 WARN_ON(1);
2547                 ret = -ENOSPC;
2548         }
2549 error:
2550         btrfs_free_path(path);
2551         return ret;
2552 }
2553
2554 static int insert_balance_item(struct btrfs_root *root,
2555                                struct btrfs_balance_control *bctl)
2556 {
2557         struct btrfs_trans_handle *trans;
2558         struct btrfs_balance_item *item;
2559         struct btrfs_disk_balance_args disk_bargs;
2560         struct btrfs_path *path;
2561         struct extent_buffer *leaf;
2562         struct btrfs_key key;
2563         int ret, err;
2564
2565         path = btrfs_alloc_path();
2566         if (!path)
2567                 return -ENOMEM;
2568
2569         trans = btrfs_start_transaction(root, 0);
2570         if (IS_ERR(trans)) {
2571                 btrfs_free_path(path);
2572                 return PTR_ERR(trans);
2573         }
2574
2575         key.objectid = BTRFS_BALANCE_OBJECTID;
2576         key.type = BTRFS_BALANCE_ITEM_KEY;
2577         key.offset = 0;
2578
2579         ret = btrfs_insert_empty_item(trans, root, path, &key,
2580                                       sizeof(*item));
2581         if (ret)
2582                 goto out;
2583
2584         leaf = path->nodes[0];
2585         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2586
2587         memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2588
2589         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2590         btrfs_set_balance_data(leaf, item, &disk_bargs);
2591         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2592         btrfs_set_balance_meta(leaf, item, &disk_bargs);
2593         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2594         btrfs_set_balance_sys(leaf, item, &disk_bargs);
2595
2596         btrfs_set_balance_flags(leaf, item, bctl->flags);
2597
2598         btrfs_mark_buffer_dirty(leaf);
2599 out:
2600         btrfs_free_path(path);
2601         err = btrfs_commit_transaction(trans, root);
2602         if (err && !ret)
2603                 ret = err;
2604         return ret;
2605 }
2606
2607 static int del_balance_item(struct btrfs_root *root)
2608 {
2609         struct btrfs_trans_handle *trans;
2610         struct btrfs_path *path;
2611         struct btrfs_key key;
2612         int ret, err;
2613
2614         path = btrfs_alloc_path();
2615         if (!path)
2616                 return -ENOMEM;
2617
2618         trans = btrfs_start_transaction(root, 0);
2619         if (IS_ERR(trans)) {
2620                 btrfs_free_path(path);
2621                 return PTR_ERR(trans);
2622         }
2623
2624         key.objectid = BTRFS_BALANCE_OBJECTID;
2625         key.type = BTRFS_BALANCE_ITEM_KEY;
2626         key.offset = 0;
2627
2628         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2629         if (ret < 0)
2630                 goto out;
2631         if (ret > 0) {
2632                 ret = -ENOENT;
2633                 goto out;
2634         }
2635
2636         ret = btrfs_del_item(trans, root, path);
2637 out:
2638         btrfs_free_path(path);
2639         err = btrfs_commit_transaction(trans, root);
2640         if (err && !ret)
2641                 ret = err;
2642         return ret;
2643 }
2644
2645 /*
2646  * This is a heuristic used to reduce the number of chunks balanced on
2647  * resume after balance was interrupted.
2648  */
2649 static void update_balance_args(struct btrfs_balance_control *bctl)
2650 {
2651         /*
2652          * Turn on soft mode for chunk types that were being converted.
2653          */
2654         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2655                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2656         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2657                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2658         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2659                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2660
2661         /*
2662          * Turn on usage filter if is not already used.  The idea is
2663          * that chunks that we have already balanced should be
2664          * reasonably full.  Don't do it for chunks that are being
2665          * converted - that will keep us from relocating unconverted
2666          * (albeit full) chunks.
2667          */
2668         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2669             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2670                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2671                 bctl->data.usage = 90;
2672         }
2673         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2674             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2675                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2676                 bctl->sys.usage = 90;
2677         }
2678         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2679             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2680                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2681                 bctl->meta.usage = 90;
2682         }
2683 }
2684
2685 /*
2686  * Should be called with both balance and volume mutexes held to
2687  * serialize other volume operations (add_dev/rm_dev/resize) with
2688  * restriper.  Same goes for unset_balance_control.
2689  */
2690 static void set_balance_control(struct btrfs_balance_control *bctl)
2691 {
2692         struct btrfs_fs_info *fs_info = bctl->fs_info;
2693
2694         BUG_ON(fs_info->balance_ctl);
2695
2696         spin_lock(&fs_info->balance_lock);
2697         fs_info->balance_ctl = bctl;
2698         spin_unlock(&fs_info->balance_lock);
2699 }
2700
2701 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2702 {
2703         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2704
2705         BUG_ON(!fs_info->balance_ctl);
2706
2707         spin_lock(&fs_info->balance_lock);
2708         fs_info->balance_ctl = NULL;
2709         spin_unlock(&fs_info->balance_lock);
2710
2711         kfree(bctl);
2712 }
2713
2714 /*
2715  * Balance filters.  Return 1 if chunk should be filtered out
2716  * (should not be balanced).
2717  */
2718 static int chunk_profiles_filter(u64 chunk_type,
2719                                  struct btrfs_balance_args *bargs)
2720 {
2721         chunk_type = chunk_to_extended(chunk_type) &
2722                                 BTRFS_EXTENDED_PROFILE_MASK;
2723
2724         if (bargs->profiles & chunk_type)
2725                 return 0;
2726
2727         return 1;
2728 }
2729
2730 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2731                               struct btrfs_balance_args *bargs)
2732 {
2733         struct btrfs_block_group_cache *cache;
2734         u64 chunk_used, user_thresh;
2735         int ret = 1;
2736
2737         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2738         chunk_used = btrfs_block_group_used(&cache->item);
2739
2740         if (bargs->usage == 0)
2741                 user_thresh = 1;
2742         else if (bargs->usage > 100)
2743                 user_thresh = cache->key.offset;
2744         else
2745                 user_thresh = div_factor_fine(cache->key.offset,
2746                                               bargs->usage);
2747
2748         if (chunk_used < user_thresh)
2749                 ret = 0;
2750
2751         btrfs_put_block_group(cache);
2752         return ret;
2753 }
2754
2755 static int chunk_devid_filter(struct extent_buffer *leaf,
2756                               struct btrfs_chunk *chunk,
2757                               struct btrfs_balance_args *bargs)
2758 {
2759         struct btrfs_stripe *stripe;
2760         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2761         int i;
2762
2763         for (i = 0; i < num_stripes; i++) {
2764                 stripe = btrfs_stripe_nr(chunk, i);
2765                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2766                         return 0;
2767         }
2768
2769         return 1;
2770 }
2771
2772 /* [pstart, pend) */
2773 static int chunk_drange_filter(struct extent_buffer *leaf,
2774                                struct btrfs_chunk *chunk,
2775                                u64 chunk_offset,
2776                                struct btrfs_balance_args *bargs)
2777 {
2778         struct btrfs_stripe *stripe;
2779         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2780         u64 stripe_offset;
2781         u64 stripe_length;
2782         int factor;
2783         int i;
2784
2785         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2786                 return 0;
2787
2788         if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2789              BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
2790                 factor = num_stripes / 2;
2791         } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
2792                 factor = num_stripes - 1;
2793         } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
2794                 factor = num_stripes - 2;
2795         } else {
2796                 factor = num_stripes;
2797         }
2798
2799         for (i = 0; i < num_stripes; i++) {
2800                 stripe = btrfs_stripe_nr(chunk, i);
2801                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2802                         continue;
2803
2804                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2805                 stripe_length = btrfs_chunk_length(leaf, chunk);
2806                 do_div(stripe_length, factor);
2807
2808                 if (stripe_offset < bargs->pend &&
2809                     stripe_offset + stripe_length > bargs->pstart)
2810                         return 0;
2811         }
2812
2813         return 1;
2814 }
2815
2816 /* [vstart, vend) */
2817 static int chunk_vrange_filter(struct extent_buffer *leaf,
2818                                struct btrfs_chunk *chunk,
2819                                u64 chunk_offset,
2820                                struct btrfs_balance_args *bargs)
2821 {
2822         if (chunk_offset < bargs->vend &&
2823             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2824                 /* at least part of the chunk is inside this vrange */
2825                 return 0;
2826
2827         return 1;
2828 }
2829
2830 static int chunk_soft_convert_filter(u64 chunk_type,
2831                                      struct btrfs_balance_args *bargs)
2832 {
2833         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2834                 return 0;
2835
2836         chunk_type = chunk_to_extended(chunk_type) &
2837                                 BTRFS_EXTENDED_PROFILE_MASK;
2838
2839         if (bargs->target == chunk_type)
2840                 return 1;
2841
2842         return 0;
2843 }
2844
2845 static int should_balance_chunk(struct btrfs_root *root,
2846                                 struct extent_buffer *leaf,
2847                                 struct btrfs_chunk *chunk, u64 chunk_offset)
2848 {
2849         struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2850         struct btrfs_balance_args *bargs = NULL;
2851         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2852
2853         /* type filter */
2854         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2855               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2856                 return 0;
2857         }
2858
2859         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2860                 bargs = &bctl->data;
2861         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2862                 bargs = &bctl->sys;
2863         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2864                 bargs = &bctl->meta;
2865
2866         /* profiles filter */
2867         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2868             chunk_profiles_filter(chunk_type, bargs)) {
2869                 return 0;
2870         }
2871
2872         /* usage filter */
2873         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2874             chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2875                 return 0;
2876         }
2877
2878         /* devid filter */
2879         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2880             chunk_devid_filter(leaf, chunk, bargs)) {
2881                 return 0;
2882         }
2883
2884         /* drange filter, makes sense only with devid filter */
2885         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2886             chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2887                 return 0;
2888         }
2889
2890         /* vrange filter */
2891         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2892             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2893                 return 0;
2894         }
2895
2896         /* soft profile changing mode */
2897         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2898             chunk_soft_convert_filter(chunk_type, bargs)) {
2899                 return 0;
2900         }
2901
2902         return 1;
2903 }
2904
2905 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2906 {
2907         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2908         struct btrfs_root *chunk_root = fs_info->chunk_root;
2909         struct btrfs_root *dev_root = fs_info->dev_root;
2910         struct list_head *devices;
2911         struct btrfs_device *device;
2912         u64 old_size;
2913         u64 size_to_free;
2914         struct btrfs_chunk *chunk;
2915         struct btrfs_path *path;
2916         struct btrfs_key key;
2917         struct btrfs_key found_key;
2918         struct btrfs_trans_handle *trans;
2919         struct extent_buffer *leaf;
2920         int slot;
2921         int ret;
2922         int enospc_errors = 0;
2923         bool counting = true;
2924
2925         /* step one make some room on all the devices */
2926         devices = &fs_info->fs_devices->devices;
2927         list_for_each_entry(device, devices, dev_list) {
2928                 old_size = device->total_bytes;
2929                 size_to_free = div_factor(old_size, 1);
2930                 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2931                 if (!device->writeable ||
2932                     device->total_bytes - device->bytes_used > size_to_free ||
2933                     device->is_tgtdev_for_dev_replace)
2934                         continue;
2935
2936                 ret = btrfs_shrink_device(device, old_size - size_to_free);
2937                 if (ret == -ENOSPC)
2938                         break;
2939                 BUG_ON(ret);
2940
2941                 trans = btrfs_start_transaction(dev_root, 0);
2942                 BUG_ON(IS_ERR(trans));
2943
2944                 ret = btrfs_grow_device(trans, device, old_size);
2945                 BUG_ON(ret);
2946
2947                 btrfs_end_transaction(trans, dev_root);
2948         }
2949
2950         /* step two, relocate all the chunks */
2951         path = btrfs_alloc_path();
2952         if (!path) {
2953                 ret = -ENOMEM;
2954                 goto error;
2955         }
2956
2957         /* zero out stat counters */
2958         spin_lock(&fs_info->balance_lock);
2959         memset(&bctl->stat, 0, sizeof(bctl->stat));
2960         spin_unlock(&fs_info->balance_lock);
2961 again:
2962         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2963         key.offset = (u64)-1;
2964         key.type = BTRFS_CHUNK_ITEM_KEY;
2965
2966         while (1) {
2967                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2968                     atomic_read(&fs_info->balance_cancel_req)) {
2969                         ret = -ECANCELED;
2970                         goto error;
2971                 }
2972
2973                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2974                 if (ret < 0)
2975                         goto error;
2976
2977                 /*
2978                  * this shouldn't happen, it means the last relocate
2979                  * failed
2980                  */
2981                 if (ret == 0)
2982                         BUG(); /* FIXME break ? */
2983
2984                 ret = btrfs_previous_item(chunk_root, path, 0,
2985                                           BTRFS_CHUNK_ITEM_KEY);
2986                 if (ret) {
2987                         ret = 0;
2988                         break;
2989                 }
2990
2991                 leaf = path->nodes[0];
2992                 slot = path->slots[0];
2993                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2994
2995                 if (found_key.objectid != key.objectid)
2996                         break;
2997
2998                 /* chunk zero is special */
2999                 if (found_key.offset == 0)
3000                         break;
3001
3002                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3003
3004                 if (!counting) {
3005                         spin_lock(&fs_info->balance_lock);
3006                         bctl->stat.considered++;
3007                         spin_unlock(&fs_info->balance_lock);
3008                 }
3009
3010                 ret = should_balance_chunk(chunk_root, leaf, chunk,
3011                                            found_key.offset);
3012                 btrfs_release_path(path);
3013                 if (!ret)
3014                         goto loop;
3015
3016                 if (counting) {
3017                         spin_lock(&fs_info->balance_lock);
3018                         bctl->stat.expected++;
3019                         spin_unlock(&fs_info->balance_lock);
3020                         goto loop;
3021                 }
3022
3023                 ret = btrfs_relocate_chunk(chunk_root,
3024                                            chunk_root->root_key.objectid,
3025                                            found_key.objectid,
3026                                            found_key.offset);
3027                 if (ret && ret != -ENOSPC)
3028                         goto error;
3029                 if (ret == -ENOSPC) {
3030                         enospc_errors++;
3031                 } else {
3032                         spin_lock(&fs_info->balance_lock);
3033                         bctl->stat.completed++;
3034                         spin_unlock(&fs_info->balance_lock);
3035                 }
3036 loop:
3037                 key.offset = found_key.offset - 1;
3038         }
3039
3040         if (counting) {
3041                 btrfs_release_path(path);
3042                 counting = false;
3043                 goto again;
3044         }
3045 error:
3046         btrfs_free_path(path);
3047         if (enospc_errors) {
3048                 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
3049                        enospc_errors);
3050                 if (!ret)
3051                         ret = -ENOSPC;
3052         }
3053
3054         return ret;
3055 }
3056
3057 /**
3058  * alloc_profile_is_valid - see if a given profile is valid and reduced
3059  * @flags: profile to validate
3060  * @extended: if true @flags is treated as an extended profile
3061  */
3062 static int alloc_profile_is_valid(u64 flags, int extended)
3063 {
3064         u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3065                                BTRFS_BLOCK_GROUP_PROFILE_MASK);
3066
3067         flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3068
3069         /* 1) check that all other bits are zeroed */
3070         if (flags & ~mask)
3071                 return 0;
3072
3073         /* 2) see if profile is reduced */
3074         if (flags == 0)
3075                 return !extended; /* "0" is valid for usual profiles */
3076
3077         /* true if exactly one bit set */
3078         return (flags & (flags - 1)) == 0;
3079 }
3080
3081 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3082 {
3083         /* cancel requested || normal exit path */
3084         return atomic_read(&fs_info->balance_cancel_req) ||
3085                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3086                  atomic_read(&fs_info->balance_cancel_req) == 0);
3087 }
3088
3089 static void __cancel_balance(struct btrfs_fs_info *fs_info)
3090 {
3091         int ret;
3092
3093         unset_balance_control(fs_info);
3094         ret = del_balance_item(fs_info->tree_root);
3095         if (ret)
3096                 btrfs_std_error(fs_info, ret);
3097
3098         atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3099 }
3100
3101 /*
3102  * Should be called with both balance and volume mutexes held
3103  */
3104 int btrfs_balance(struct btrfs_balance_control *bctl,
3105                   struct btrfs_ioctl_balance_args *bargs)
3106 {
3107         struct btrfs_fs_info *fs_info = bctl->fs_info;
3108         u64 allowed;
3109         int mixed = 0;
3110         int ret;
3111         u64 num_devices;
3112         unsigned seq;
3113
3114         if (btrfs_fs_closing(fs_info) ||
3115             atomic_read(&fs_info->balance_pause_req) ||
3116             atomic_read(&fs_info->balance_cancel_req)) {
3117                 ret = -EINVAL;
3118                 goto out;
3119         }
3120
3121         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
3122         if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
3123                 mixed = 1;
3124
3125         /*
3126          * In case of mixed groups both data and meta should be picked,
3127          * and identical options should be given for both of them.
3128          */
3129         allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
3130         if (mixed && (bctl->flags & allowed)) {
3131                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
3132                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
3133                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
3134                         printk(KERN_ERR "btrfs: with mixed groups data and "
3135                                "metadata balance options must be the same\n");
3136                         ret = -EINVAL;
3137                         goto out;
3138                 }
3139         }
3140
3141         num_devices = fs_info->fs_devices->num_devices;
3142         btrfs_dev_replace_lock(&fs_info->dev_replace);
3143         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
3144                 BUG_ON(num_devices < 1);
3145                 num_devices--;
3146         }
3147         btrfs_dev_replace_unlock(&fs_info->dev_replace);
3148         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3149         if (num_devices == 1)
3150                 allowed |= BTRFS_BLOCK_GROUP_DUP;
3151         else if (num_devices > 1)
3152                 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3153         if (num_devices > 2)
3154                 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3155         if (num_devices > 3)
3156                 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3157                             BTRFS_BLOCK_GROUP_RAID6);
3158         if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3159             (!alloc_profile_is_valid(bctl->data.target, 1) ||
3160              (bctl->data.target & ~allowed))) {
3161                 printk(KERN_ERR "btrfs: unable to start balance with target "
3162                        "data profile %llu\n",
3163                        bctl->data.target);
3164                 ret = -EINVAL;
3165                 goto out;
3166         }
3167         if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3168             (!alloc_profile_is_valid(bctl->meta.target, 1) ||
3169              (bctl->meta.target & ~allowed))) {
3170                 printk(KERN_ERR "btrfs: unable to start balance with target "
3171                        "metadata profile %llu\n",
3172                        bctl->meta.target);
3173                 ret = -EINVAL;
3174                 goto out;
3175         }
3176         if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3177             (!alloc_profile_is_valid(bctl->sys.target, 1) ||
3178              (bctl->sys.target & ~allowed))) {
3179                 printk(KERN_ERR "btrfs: unable to start balance with target "
3180                        "system profile %llu\n",
3181                        bctl->sys.target);
3182                 ret = -EINVAL;
3183                 goto out;
3184         }
3185
3186         /* allow dup'ed data chunks only in mixed mode */
3187         if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3188             (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
3189                 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
3190                 ret = -EINVAL;
3191                 goto out;
3192         }
3193
3194         /* allow to reduce meta or sys integrity only if force set */
3195         allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3196                         BTRFS_BLOCK_GROUP_RAID10 |
3197                         BTRFS_BLOCK_GROUP_RAID5 |
3198                         BTRFS_BLOCK_GROUP_RAID6;
3199         do {
3200                 seq = read_seqbegin(&fs_info->profiles_lock);
3201
3202                 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3203                      (fs_info->avail_system_alloc_bits & allowed) &&
3204                      !(bctl->sys.target & allowed)) ||
3205                     ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3206                      (fs_info->avail_metadata_alloc_bits & allowed) &&
3207                      !(bctl->meta.target & allowed))) {
3208                         if (bctl->flags & BTRFS_BALANCE_FORCE) {
3209                                 printk(KERN_INFO "btrfs: force reducing metadata "
3210                                        "integrity\n");
3211                         } else {
3212                                 printk(KERN_ERR "btrfs: balance will reduce metadata "
3213                                        "integrity, use force if you want this\n");
3214                                 ret = -EINVAL;
3215                                 goto out;
3216                         }
3217                 }
3218         } while (read_seqretry(&fs_info->profiles_lock, seq));
3219
3220         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3221                 int num_tolerated_disk_barrier_failures;
3222                 u64 target = bctl->sys.target;
3223
3224                 num_tolerated_disk_barrier_failures =
3225                         btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3226                 if (num_tolerated_disk_barrier_failures > 0 &&
3227                     (target &
3228                      (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3229                       BTRFS_AVAIL_ALLOC_BIT_SINGLE)))
3230                         num_tolerated_disk_barrier_failures = 0;
3231                 else if (num_tolerated_disk_barrier_failures > 1 &&
3232                          (target &
3233                           (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)))
3234                         num_tolerated_disk_barrier_failures = 1;
3235
3236                 fs_info->num_tolerated_disk_barrier_failures =
3237                         num_tolerated_disk_barrier_failures;
3238         }
3239
3240         ret = insert_balance_item(fs_info->tree_root, bctl);
3241         if (ret && ret != -EEXIST)
3242                 goto out;
3243
3244         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
3245                 BUG_ON(ret == -EEXIST);
3246                 set_balance_control(bctl);
3247         } else {
3248                 BUG_ON(ret != -EEXIST);
3249                 spin_lock(&fs_info->balance_lock);
3250                 update_balance_args(bctl);
3251                 spin_unlock(&fs_info->balance_lock);
3252         }
3253
3254         atomic_inc(&fs_info->balance_running);
3255         mutex_unlock(&fs_info->balance_mutex);
3256
3257         ret = __btrfs_balance(fs_info);
3258
3259         mutex_lock(&fs_info->balance_mutex);
3260         atomic_dec(&fs_info->balance_running);
3261
3262         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3263                 fs_info->num_tolerated_disk_barrier_failures =
3264                         btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
3265         }
3266
3267         if (bargs) {
3268                 memset(bargs, 0, sizeof(*bargs));
3269                 update_ioctl_balance_args(fs_info, 0, bargs);
3270         }
3271
3272         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
3273             balance_need_close(fs_info)) {
3274                 __cancel_balance(fs_info);
3275         }
3276
3277         wake_up(&fs_info->balance_wait_q);
3278
3279         return ret;
3280 out:
3281         if (bctl->flags & BTRFS_BALANCE_RESUME)
3282                 __cancel_balance(fs_info);
3283         else {
3284                 kfree(bctl);
3285                 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3286         }
3287         return ret;
3288 }
3289
3290 static int balance_kthread(void *data)
3291 {
3292         struct btrfs_fs_info *fs_info = data;
3293         int ret = 0;
3294
3295         mutex_lock(&fs_info->volume_mutex);
3296         mutex_lock(&fs_info->balance_mutex);
3297
3298         if (fs_info->balance_ctl) {
3299                 printk(KERN_INFO "btrfs: continuing balance\n");
3300                 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3301         }
3302
3303         mutex_unlock(&fs_info->balance_mutex);
3304         mutex_unlock(&fs_info->volume_mutex);
3305
3306         return ret;
3307 }
3308
3309 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3310 {
3311         struct task_struct *tsk;
3312
3313         spin_lock(&fs_info->balance_lock);
3314         if (!fs_info->balance_ctl) {
3315                 spin_unlock(&fs_info->balance_lock);
3316                 return 0;
3317         }
3318         spin_unlock(&fs_info->balance_lock);
3319
3320         if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
3321                 printk(KERN_INFO "btrfs: force skipping balance\n");
3322                 return 0;
3323         }
3324
3325         tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3326         return PTR_RET(tsk);
3327 }
3328
3329 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3330 {
3331         struct btrfs_balance_control *bctl;
3332         struct btrfs_balance_item *item;
3333         struct btrfs_disk_balance_args disk_bargs;
3334         struct btrfs_path *path;
3335         struct extent_buffer *leaf;
3336         struct btrfs_key key;
3337         int ret;
3338
3339         path = btrfs_alloc_path();
3340         if (!path)
3341                 return -ENOMEM;
3342
3343         key.objectid = BTRFS_BALANCE_OBJECTID;
3344         key.type = BTRFS_BALANCE_ITEM_KEY;
3345         key.offset = 0;
3346
3347         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
3348         if (ret < 0)
3349                 goto out;
3350         if (ret > 0) { /* ret = -ENOENT; */
3351                 ret = 0;
3352                 goto out;
3353         }
3354
3355         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3356         if (!bctl) {
3357                 ret = -ENOMEM;
3358                 goto out;
3359         }
3360
3361         leaf = path->nodes[0];
3362         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3363
3364         bctl->fs_info = fs_info;
3365         bctl->flags = btrfs_balance_flags(leaf, item);
3366         bctl->flags |= BTRFS_BALANCE_RESUME;
3367
3368         btrfs_balance_data(leaf, item, &disk_bargs);
3369         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
3370         btrfs_balance_meta(leaf, item, &disk_bargs);
3371         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
3372         btrfs_balance_sys(leaf, item, &disk_bargs);
3373         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3374
3375         WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3376
3377         mutex_lock(&fs_info->volume_mutex);
3378         mutex_lock(&fs_info->balance_mutex);
3379
3380         set_balance_control(bctl);
3381
3382         mutex_unlock(&fs_info->balance_mutex);
3383         mutex_unlock(&fs_info->volume_mutex);
3384 out:
3385         btrfs_free_path(path);
3386         return ret;
3387 }
3388
3389 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
3390 {
3391         int ret = 0;
3392
3393         mutex_lock(&fs_info->balance_mutex);
3394         if (!fs_info->balance_ctl) {
3395                 mutex_unlock(&fs_info->balance_mutex);
3396                 return -ENOTCONN;
3397         }
3398
3399         if (atomic_read(&fs_info->balance_running)) {
3400                 atomic_inc(&fs_info->balance_pause_req);
3401                 mutex_unlock(&fs_info->balance_mutex);
3402
3403                 wait_event(fs_info->balance_wait_q,
3404                            atomic_read(&fs_info->balance_running) == 0);
3405
3406                 mutex_lock(&fs_info->balance_mutex);
3407                 /* we are good with balance_ctl ripped off from under us */
3408                 BUG_ON(atomic_read(&fs_info->balance_running));
3409                 atomic_dec(&fs_info->balance_pause_req);
3410         } else {
3411                 ret = -ENOTCONN;
3412         }
3413
3414         mutex_unlock(&fs_info->balance_mutex);
3415         return ret;
3416 }
3417
3418 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
3419 {
3420         mutex_lock(&fs_info->balance_mutex);
3421         if (!fs_info->balance_ctl) {
3422                 mutex_unlock(&fs_info->balance_mutex);
3423                 return -ENOTCONN;
3424         }
3425
3426         atomic_inc(&fs_info->balance_cancel_req);
3427         /*
3428          * if we are running just wait and return, balance item is
3429          * deleted in btrfs_balance in this case
3430          */
3431         if (atomic_read(&fs_info->balance_running)) {
3432                 mutex_unlock(&fs_info->balance_mutex);
3433                 wait_event(fs_info->balance_wait_q,
3434                            atomic_read(&fs_info->balance_running) == 0);
3435                 mutex_lock(&fs_info->balance_mutex);
3436         } else {
3437                 /* __cancel_balance needs volume_mutex */
3438                 mutex_unlock(&fs_info->balance_mutex);
3439                 mutex_lock(&fs_info->volume_mutex);
3440                 mutex_lock(&fs_info->balance_mutex);
3441
3442                 if (fs_info->balance_ctl)
3443                         __cancel_balance(fs_info);
3444
3445                 mutex_unlock(&fs_info->volume_mutex);
3446         }
3447
3448         BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
3449         atomic_dec(&fs_info->balance_cancel_req);
3450         mutex_unlock(&fs_info->balance_mutex);
3451         return 0;
3452 }
3453
3454 static int btrfs_uuid_scan_kthread(void *data)
3455 {
3456         struct btrfs_fs_info *fs_info = data;
3457         struct btrfs_root *root = fs_info->tree_root;
3458         struct btrfs_key key;
3459         struct btrfs_key max_key;
3460         struct btrfs_path *path = NULL;
3461         int ret = 0;
3462         struct extent_buffer *eb;
3463         int slot;
3464         struct btrfs_root_item root_item;
3465         u32 item_size;
3466         struct btrfs_trans_handle *trans;
3467
3468         path = btrfs_alloc_path();
3469         if (!path) {
3470                 ret = -ENOMEM;
3471                 goto out;
3472         }
3473
3474         key.objectid = 0;
3475         key.type = BTRFS_ROOT_ITEM_KEY;
3476         key.offset = 0;
3477
3478         max_key.objectid = (u64)-1;
3479         max_key.type = BTRFS_ROOT_ITEM_KEY;
3480         max_key.offset = (u64)-1;
3481
3482         path->keep_locks = 1;
3483
3484         while (1) {
3485                 ret = btrfs_search_forward(root, &key, &max_key, path, 0);
3486                 if (ret) {
3487                         if (ret > 0)
3488                                 ret = 0;
3489                         break;
3490                 }
3491
3492                 if (key.type != BTRFS_ROOT_ITEM_KEY ||
3493                     (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
3494                      key.objectid != BTRFS_FS_TREE_OBJECTID) ||
3495                     key.objectid > BTRFS_LAST_FREE_OBJECTID)
3496                         goto skip;
3497
3498                 eb = path->nodes[0];
3499                 slot = path->slots[0];
3500                 item_size = btrfs_item_size_nr(eb, slot);
3501                 if (item_size < sizeof(root_item))
3502                         goto skip;
3503
3504                 trans = NULL;
3505                 read_extent_buffer(eb, &root_item,
3506                                    btrfs_item_ptr_offset(eb, slot),
3507                                    (int)sizeof(root_item));
3508                 if (btrfs_root_refs(&root_item) == 0)
3509                         goto skip;
3510                 if (!btrfs_is_empty_uuid(root_item.uuid)) {
3511                         /*
3512                          * 1 - subvol uuid item
3513                          * 1 - received_subvol uuid item
3514                          */
3515                         trans = btrfs_start_transaction(fs_info->uuid_root, 2);
3516                         if (IS_ERR(trans)) {
3517                                 ret = PTR_ERR(trans);
3518                                 break;
3519                         }
3520                         ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3521                                                   root_item.uuid,
3522                                                   BTRFS_UUID_KEY_SUBVOL,
3523                                                   key.objectid);
3524                         if (ret < 0) {
3525                                 pr_warn("btrfs: uuid_tree_add failed %d\n",
3526                                         ret);
3527                                 btrfs_end_transaction(trans,
3528                                                       fs_info->uuid_root);
3529                                 break;
3530                         }
3531                 }
3532
3533                 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
3534                         if (!trans) {
3535                                 /* 1 - received_subvol uuid item */
3536                                 trans = btrfs_start_transaction(
3537                                                 fs_info->uuid_root, 1);
3538                                 if (IS_ERR(trans)) {
3539                                         ret = PTR_ERR(trans);
3540                                         break;
3541                                 }
3542                         }
3543                         ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
3544                                                   root_item.received_uuid,
3545                                                  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
3546                                                   key.objectid);
3547                         if (ret < 0) {
3548                                 pr_warn("btrfs: uuid_tree_add failed %d\n",
3549                                         ret);
3550                                 btrfs_end_transaction(trans,
3551                                                       fs_info->uuid_root);
3552                                 break;
3553                         }
3554                 }
3555
3556                 if (trans) {
3557                         ret = btrfs_end_transaction(trans, fs_info->uuid_root);
3558                         if (ret)
3559                                 break;
3560                 }
3561
3562 skip:
3563                 btrfs_release_path(path);
3564                 if (key.offset < (u64)-1) {
3565                         key.offset++;
3566                 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
3567                         key.offset = 0;
3568                         key.type = BTRFS_ROOT_ITEM_KEY;
3569                 } else if (key.objectid < (u64)-1) {
3570                         key.offset = 0;
3571                         key.type = BTRFS_ROOT_ITEM_KEY;
3572                         key.objectid++;
3573                 } else {
3574                         break;
3575                 }
3576                 cond_resched();
3577         }
3578
3579 out:
3580         btrfs_free_path(path);
3581         if (ret)
3582                 pr_warn("btrfs: btrfs_uuid_scan_kthread failed %d\n", ret);
3583         else
3584                 fs_info->update_uuid_tree_gen = 1;
3585         up(&fs_info->uuid_tree_rescan_sem);
3586         return 0;
3587 }
3588
3589 /*
3590  * Callback for btrfs_uuid_tree_iterate().
3591  * returns:
3592  * 0    check succeeded, the entry is not outdated.
3593  * < 0  if an error occured.
3594  * > 0  if the check failed, which means the caller shall remove the entry.
3595  */
3596 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
3597                                        u8 *uuid, u8 type, u64 subid)
3598 {
3599         struct btrfs_key key;
3600         int ret = 0;
3601         struct btrfs_root *subvol_root;
3602
3603         if (type != BTRFS_UUID_KEY_SUBVOL &&
3604             type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
3605                 goto out;
3606
3607         key.objectid = subid;
3608         key.type = BTRFS_ROOT_ITEM_KEY;
3609         key.offset = (u64)-1;
3610         subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
3611         if (IS_ERR(subvol_root)) {
3612                 ret = PTR_ERR(subvol_root);
3613                 if (ret == -ENOENT)
3614                         ret = 1;
3615                 goto out;
3616         }
3617
3618         switch (type) {
3619         case BTRFS_UUID_KEY_SUBVOL:
3620                 if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
3621                         ret = 1;
3622                 break;
3623         case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
3624                 if (memcmp(uuid, subvol_root->root_item.received_uuid,
3625                            BTRFS_UUID_SIZE))
3626                         ret = 1;
3627                 break;
3628         }
3629
3630 out:
3631         return ret;
3632 }
3633
3634 static int btrfs_uuid_rescan_kthread(void *data)
3635 {
3636         struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
3637         int ret;
3638
3639         /*
3640          * 1st step is to iterate through the existing UUID tree and
3641          * to delete all entries that contain outdated data.
3642          * 2nd step is to add all missing entries to the UUID tree.
3643          */
3644         ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
3645         if (ret < 0) {
3646                 pr_warn("btrfs: iterating uuid_tree failed %d\n", ret);
3647                 up(&fs_info->uuid_tree_rescan_sem);
3648                 return ret;
3649         }
3650         return btrfs_uuid_scan_kthread(data);
3651 }
3652
3653 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
3654 {
3655         struct btrfs_trans_handle *trans;
3656         struct btrfs_root *tree_root = fs_info->tree_root;
3657         struct btrfs_root *uuid_root;
3658         struct task_struct *task;
3659         int ret;
3660
3661         /*
3662          * 1 - root node
3663          * 1 - root item
3664          */
3665         trans = btrfs_start_transaction(tree_root, 2);
3666         if (IS_ERR(trans))
3667                 return PTR_ERR(trans);
3668
3669         uuid_root = btrfs_create_tree(trans, fs_info,
3670                                       BTRFS_UUID_TREE_OBJECTID);
3671         if (IS_ERR(uuid_root)) {
3672                 btrfs_abort_transaction(trans, tree_root,
3673                                         PTR_ERR(uuid_root));
3674                 return PTR_ERR(uuid_root);
3675         }
3676
3677         fs_info->uuid_root = uuid_root;
3678
3679         ret = btrfs_commit_transaction(trans, tree_root);
3680         if (ret)
3681                 return ret;
3682
3683         down(&fs_info->uuid_tree_rescan_sem);
3684         task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
3685         if (IS_ERR(task)) {
3686                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
3687                 pr_warn("btrfs: failed to start uuid_scan task\n");
3688                 up(&fs_info->uuid_tree_rescan_sem);
3689                 return PTR_ERR(task);
3690         }
3691
3692         return 0;
3693 }
3694
3695 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
3696 {
3697         struct task_struct *task;
3698
3699         down(&fs_info->uuid_tree_rescan_sem);
3700         task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
3701         if (IS_ERR(task)) {
3702                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
3703                 pr_warn("btrfs: failed to start uuid_rescan task\n");
3704                 up(&fs_info->uuid_tree_rescan_sem);
3705                 return PTR_ERR(task);
3706         }
3707
3708         return 0;
3709 }
3710
3711 /*
3712  * shrinking a device means finding all of the device extents past
3713  * the new size, and then following the back refs to the chunks.
3714  * The chunk relocation code actually frees the device extent
3715  */
3716 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
3717 {
3718         struct btrfs_trans_handle *trans;
3719         struct btrfs_root *root = device->dev_root;
3720         struct btrfs_dev_extent *dev_extent = NULL;
3721         struct btrfs_path *path;
3722         u64 length;
3723         u64 chunk_tree;
3724         u64 chunk_objectid;
3725         u64 chunk_offset;
3726         int ret;
3727         int slot;
3728         int failed = 0;
3729         bool retried = false;
3730         struct extent_buffer *l;
3731         struct btrfs_key key;
3732         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3733         u64 old_total = btrfs_super_total_bytes(super_copy);
3734         u64 old_size = device->total_bytes;
3735         u64 diff = device->total_bytes - new_size;
3736
3737         if (device->is_tgtdev_for_dev_replace)
3738                 return -EINVAL;
3739
3740         path = btrfs_alloc_path();
3741         if (!path)
3742                 return -ENOMEM;
3743
3744         path->reada = 2;
3745
3746         lock_chunks(root);
3747
3748         device->total_bytes = new_size;
3749         if (device->writeable) {
3750                 device->fs_devices->total_rw_bytes -= diff;
3751                 spin_lock(&root->fs_info->free_chunk_lock);
3752                 root->fs_info->free_chunk_space -= diff;
3753                 spin_unlock(&root->fs_info->free_chunk_lock);
3754         }
3755         unlock_chunks(root);
3756
3757 again:
3758         key.objectid = device->devid;
3759         key.offset = (u64)-1;
3760         key.type = BTRFS_DEV_EXTENT_KEY;
3761
3762         do {
3763                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3764                 if (ret < 0)
3765                         goto done;
3766
3767                 ret = btrfs_previous_item(root, path, 0, key.type);
3768                 if (ret < 0)
3769                         goto done;
3770                 if (ret) {
3771                         ret = 0;
3772                         btrfs_release_path(path);
3773                         break;
3774                 }
3775
3776                 l = path->nodes[0];
3777                 slot = path->slots[0];
3778                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3779
3780                 if (key.objectid != device->devid) {
3781                         btrfs_release_path(path);
3782                         break;
3783                 }
3784
3785                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3786                 length = btrfs_dev_extent_length(l, dev_extent);
3787
3788                 if (key.offset + length <= new_size) {
3789                         btrfs_release_path(path);
3790                         break;
3791                 }
3792
3793                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3794                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3795                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3796                 btrfs_release_path(path);
3797
3798                 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3799                                            chunk_offset);
3800                 if (ret && ret != -ENOSPC)
3801                         goto done;
3802                 if (ret == -ENOSPC)
3803                         failed++;
3804         } while (key.offset-- > 0);
3805
3806         if (failed && !retried) {
3807                 failed = 0;
3808                 retried = true;
3809                 goto again;
3810         } else if (failed && retried) {
3811                 ret = -ENOSPC;
3812                 lock_chunks(root);
3813
3814                 device->total_bytes = old_size;
3815                 if (device->writeable)
3816                         device->fs_devices->total_rw_bytes += diff;
3817                 spin_lock(&root->fs_info->free_chunk_lock);
3818                 root->fs_info->free_chunk_space += diff;
3819                 spin_unlock(&root->fs_info->free_chunk_lock);
3820                 unlock_chunks(root);
3821                 goto done;
3822         }
3823
3824         /* Shrinking succeeded, else we would be at "done". */
3825         trans = btrfs_start_transaction(root, 0);
3826         if (IS_ERR(trans)) {
3827                 ret = PTR_ERR(trans);
3828                 goto done;
3829         }
3830
3831         lock_chunks(root);
3832
3833         device->disk_total_bytes = new_size;
3834         /* Now btrfs_update_device() will change the on-disk size. */
3835         ret = btrfs_update_device(trans, device);
3836         if (ret) {
3837                 unlock_chunks(root);
3838                 btrfs_end_transaction(trans, root);
3839                 goto done;
3840         }
3841         WARN_ON(diff > old_total);
3842         btrfs_set_super_total_bytes(super_copy, old_total - diff);
3843         unlock_chunks(root);
3844         btrfs_end_transaction(trans, root);
3845 done:
3846         btrfs_free_path(path);
3847         return ret;
3848 }
3849
3850 static int btrfs_add_system_chunk(struct btrfs_root *root,
3851                            struct btrfs_key *key,
3852                            struct btrfs_chunk *chunk, int item_size)
3853 {
3854         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3855         struct btrfs_disk_key disk_key;
3856         u32 array_size;
3857         u8 *ptr;
3858
3859         array_size = btrfs_super_sys_array_size(super_copy);
3860         if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3861                 return -EFBIG;
3862
3863         ptr = super_copy->sys_chunk_array + array_size;
3864         btrfs_cpu_key_to_disk(&disk_key, key);
3865         memcpy(ptr, &disk_key, sizeof(disk_key));
3866         ptr += sizeof(disk_key);
3867         memcpy(ptr, chunk, item_size);
3868         item_size += sizeof(disk_key);
3869         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3870         return 0;
3871 }
3872
3873 /*
3874  * sort the devices in descending order by max_avail, total_avail
3875  */
3876 static int btrfs_cmp_device_info(const void *a, const void *b)
3877 {
3878         const struct btrfs_device_info *di_a = a;
3879         const struct btrfs_device_info *di_b = b;
3880
3881         if (di_a->max_avail > di_b->max_avail)
3882                 return -1;
3883         if (di_a->max_avail < di_b->max_avail)
3884                 return 1;
3885         if (di_a->total_avail > di_b->total_avail)
3886                 return -1;
3887         if (di_a->total_avail < di_b->total_avail)
3888                 return 1;
3889         return 0;
3890 }
3891
3892 static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3893         [BTRFS_RAID_RAID10] = {
3894                 .sub_stripes    = 2,
3895                 .dev_stripes    = 1,
3896                 .devs_max       = 0,    /* 0 == as many as possible */
3897                 .devs_min       = 4,
3898                 .devs_increment = 2,
3899                 .ncopies        = 2,
3900         },
3901         [BTRFS_RAID_RAID1] = {
3902                 .sub_stripes    = 1,
3903                 .dev_stripes    = 1,
3904                 .devs_max       = 2,
3905                 .devs_min       = 2,
3906                 .devs_increment = 2,
3907                 .ncopies        = 2,
3908         },
3909         [BTRFS_RAID_DUP] = {
3910                 .sub_stripes    = 1,
3911                 .dev_stripes    = 2,
3912                 .devs_max       = 1,
3913                 .devs_min       = 1,
3914                 .devs_increment = 1,
3915                 .ncopies        = 2,
3916         },
3917         [BTRFS_RAID_RAID0] = {
3918                 .sub_stripes    = 1,
3919                 .dev_stripes    = 1,
3920                 .devs_max       = 0,
3921                 .devs_min       = 2,
3922                 .devs_increment = 1,
3923                 .ncopies        = 1,
3924         },
3925         [BTRFS_RAID_SINGLE] = {
3926                 .sub_stripes    = 1,
3927                 .dev_stripes    = 1,
3928                 .devs_max       = 1,
3929                 .devs_min       = 1,
3930                 .devs_increment = 1,
3931                 .ncopies        = 1,
3932         },
3933         [BTRFS_RAID_RAID5] = {
3934                 .sub_stripes    = 1,
3935                 .dev_stripes    = 1,
3936                 .devs_max       = 0,
3937                 .devs_min       = 2,
3938                 .devs_increment = 1,
3939                 .ncopies        = 2,
3940         },
3941         [BTRFS_RAID_RAID6] = {
3942                 .sub_stripes    = 1,
3943                 .dev_stripes    = 1,
3944                 .devs_max       = 0,
3945                 .devs_min       = 3,
3946                 .devs_increment = 1,
3947                 .ncopies        = 3,
3948         },
3949 };
3950
3951 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
3952 {
3953         /* TODO allow them to set a preferred stripe size */
3954         return 64 * 1024;
3955 }
3956
3957 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
3958 {
3959         if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
3960                 return;
3961
3962         btrfs_set_fs_incompat(info, RAID56);
3963 }
3964
3965 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3966                                struct btrfs_root *extent_root, u64 start,
3967                                u64 type)
3968 {
3969         struct btrfs_fs_info *info = extent_root->fs_info;
3970         struct btrfs_fs_devices *fs_devices = info->fs_devices;
3971         struct list_head *cur;
3972         struct map_lookup *map = NULL;
3973         struct extent_map_tree *em_tree;
3974         struct extent_map *em;
3975         struct btrfs_device_info *devices_info = NULL;
3976         u64 total_avail;
3977         int num_stripes;        /* total number of stripes to allocate */
3978         int data_stripes;       /* number of stripes that count for
3979                                    block group size */
3980         int sub_stripes;        /* sub_stripes info for map */
3981         int dev_stripes;        /* stripes per dev */
3982         int devs_max;           /* max devs to use */
3983         int devs_min;           /* min devs needed */
3984         int devs_increment;     /* ndevs has to be a multiple of this */
3985         int ncopies;            /* how many copies to data has */
3986         int ret;
3987         u64 max_stripe_size;
3988         u64 max_chunk_size;
3989         u64 stripe_size;
3990         u64 num_bytes;
3991         u64 raid_stripe_len = BTRFS_STRIPE_LEN;
3992         int ndevs;
3993         int i;
3994         int j;
3995         int index;
3996
3997         BUG_ON(!alloc_profile_is_valid(type, 0));
3998
3999         if (list_empty(&fs_devices->alloc_list))
4000                 return -ENOSPC;
4001
4002         index = __get_raid_index(type);
4003
4004         sub_stripes = btrfs_raid_array[index].sub_stripes;
4005         dev_stripes = btrfs_raid_array[index].dev_stripes;
4006         devs_max = btrfs_raid_array[index].devs_max;
4007         devs_min = btrfs_raid_array[index].devs_min;
4008         devs_increment = btrfs_raid_array[index].devs_increment;
4009         ncopies = btrfs_raid_array[index].ncopies;
4010
4011         if (type & BTRFS_BLOCK_GROUP_DATA) {
4012                 max_stripe_size = 1024 * 1024 * 1024;
4013                 max_chunk_size = 10 * max_stripe_size;
4014         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4015                 /* for larger filesystems, use larger metadata chunks */
4016                 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
4017                         max_stripe_size = 1024 * 1024 * 1024;
4018                 else
4019                         max_stripe_size = 256 * 1024 * 1024;
4020                 max_chunk_size = max_stripe_size;
4021         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4022                 max_stripe_size = 32 * 1024 * 1024;
4023                 max_chunk_size = 2 * max_stripe_size;
4024         } else {
4025                 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
4026                        type);
4027                 BUG_ON(1);
4028         }
4029
4030         /* we don't want a chunk larger than 10% of writeable space */
4031         max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4032                              max_chunk_size);
4033
4034         devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
4035                                GFP_NOFS);
4036         if (!devices_info)
4037                 return -ENOMEM;
4038
4039         cur = fs_devices->alloc_list.next;
4040
4041         /*
4042          * in the first pass through the devices list, we gather information
4043          * about the available holes on each device.
4044          */
4045         ndevs = 0;
4046         while (cur != &fs_devices->alloc_list) {
4047                 struct btrfs_device *device;
4048                 u64 max_avail;
4049                 u64 dev_offset;
4050
4051                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
4052
4053                 cur = cur->next;
4054
4055                 if (!device->writeable) {
4056                         WARN(1, KERN_ERR
4057                                "btrfs: read-only device in alloc_list\n");
4058                         continue;
4059                 }
4060
4061                 if (!device->in_fs_metadata ||
4062                     device->is_tgtdev_for_dev_replace)
4063                         continue;
4064
4065                 if (device->total_bytes > device->bytes_used)
4066                         total_avail = device->total_bytes - device->bytes_used;
4067                 else
4068                         total_avail = 0;
4069
4070                 /* If there is no space on this device, skip it. */
4071                 if (total_avail == 0)
4072                         continue;
4073
4074                 ret = find_free_dev_extent(trans, device,
4075                                            max_stripe_size * dev_stripes,
4076                                            &dev_offset, &max_avail);
4077                 if (ret && ret != -ENOSPC)
4078                         goto error;
4079
4080                 if (ret == 0)
4081                         max_avail = max_stripe_size * dev_stripes;
4082
4083                 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
4084                         continue;
4085
4086                 if (ndevs == fs_devices->rw_devices) {
4087                         WARN(1, "%s: found more than %llu devices\n",
4088                              __func__, fs_devices->rw_devices);
4089                         break;
4090                 }
4091                 devices_info[ndevs].dev_offset = dev_offset;
4092                 devices_info[ndevs].max_avail = max_avail;
4093                 devices_info[ndevs].total_avail = total_avail;
4094                 devices_info[ndevs].dev = device;
4095                 ++ndevs;
4096         }
4097
4098         /*
4099          * now sort the devices by hole size / available space
4100          */
4101         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4102              btrfs_cmp_device_info, NULL);
4103
4104         /* round down to number of usable stripes */
4105         ndevs -= ndevs % devs_increment;
4106
4107         if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
4108                 ret = -ENOSPC;
4109                 goto error;
4110         }
4111
4112         if (devs_max && ndevs > devs_max)
4113                 ndevs = devs_max;
4114         /*
4115          * the primary goal is to maximize the number of stripes, so use as many
4116          * devices as possible, even if the stripes are not maximum sized.
4117          */
4118         stripe_size = devices_info[ndevs-1].max_avail;
4119         num_stripes = ndevs * dev_stripes;
4120
4121         /*
4122          * this will have to be fixed for RAID1 and RAID10 over
4123          * more drives
4124          */
4125         data_stripes = num_stripes / ncopies;
4126
4127         if (type & BTRFS_BLOCK_GROUP_RAID5) {
4128                 raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
4129                                  btrfs_super_stripesize(info->super_copy));
4130                 data_stripes = num_stripes - 1;
4131         }
4132         if (type & BTRFS_BLOCK_GROUP_RAID6) {
4133                 raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
4134                                  btrfs_super_stripesize(info->super_copy));
4135                 data_stripes = num_stripes - 2;
4136         }
4137
4138         /*
4139          * Use the number of data stripes to figure out how big this chunk
4140          * is really going to be in terms of logical address space,
4141          * and compare that answer with the max chunk size
4142          */
4143         if (stripe_size * data_stripes > max_chunk_size) {
4144                 u64 mask = (1ULL << 24) - 1;
4145                 stripe_size = max_chunk_size;
4146                 do_div(stripe_size, data_stripes);
4147
4148                 /* bump the answer up to a 16MB boundary */
4149                 stripe_size = (stripe_size + mask) & ~mask;
4150
4151                 /* but don't go higher than the limits we found
4152                  * while searching for free extents
4153                  */
4154                 if (stripe_size > devices_info[ndevs-1].max_avail)
4155                         stripe_size = devices_info[ndevs-1].max_avail;
4156         }
4157
4158         do_div(stripe_size, dev_stripes);
4159
4160         /* align to BTRFS_STRIPE_LEN */
4161         do_div(stripe_size, raid_stripe_len);
4162         stripe_size *= raid_stripe_len;
4163
4164         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4165         if (!map) {
4166                 ret = -ENOMEM;
4167                 goto error;
4168         }
4169         map->num_stripes = num_stripes;
4170
4171         for (i = 0; i < ndevs; ++i) {
4172                 for (j = 0; j < dev_stripes; ++j) {
4173                         int s = i * dev_stripes + j;
4174                         map->stripes[s].dev = devices_info[i].dev;
4175                         map->stripes[s].physical = devices_info[i].dev_offset +
4176                                                    j * stripe_size;
4177                 }
4178         }
4179         map->sector_size = extent_root->sectorsize;
4180         map->stripe_len = raid_stripe_len;
4181         map->io_align = raid_stripe_len;
4182         map->io_width = raid_stripe_len;
4183         map->type = type;
4184         map->sub_stripes = sub_stripes;
4185
4186         num_bytes = stripe_size * data_stripes;
4187
4188         trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
4189
4190         em = alloc_extent_map();
4191         if (!em) {
4192                 ret = -ENOMEM;
4193                 goto error;
4194         }
4195         em->bdev = (struct block_device *)map;
4196         em->start = start;
4197         em->len = num_bytes;
4198         em->block_start = 0;
4199         em->block_len = em->len;
4200         em->orig_block_len = stripe_size;
4201
4202         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4203         write_lock(&em_tree->lock);
4204         ret = add_extent_mapping(em_tree, em, 0);
4205         if (!ret) {
4206                 list_add_tail(&em->list, &trans->transaction->pending_chunks);
4207                 atomic_inc(&em->refs);
4208         }
4209         write_unlock(&em_tree->lock);
4210         if (ret) {
4211                 free_extent_map(em);
4212                 goto error;
4213         }
4214
4215         ret = btrfs_make_block_group(trans, extent_root, 0, type,
4216                                      BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4217                                      start, num_bytes);
4218         if (ret)
4219                 goto error_del_extent;
4220
4221         free_extent_map(em);
4222         check_raid56_incompat_flag(extent_root->fs_info, type);
4223
4224         kfree(devices_info);
4225         return 0;
4226
4227 error_del_extent:
4228         write_lock(&em_tree->lock);
4229         remove_extent_mapping(em_tree, em);
4230         write_unlock(&em_tree->lock);
4231
4232         /* One for our allocation */
4233         free_extent_map(em);
4234         /* One for the tree reference */
4235         free_extent_map(em);
4236 error:
4237         kfree(map);
4238         kfree(devices_info);
4239         return ret;
4240 }
4241
4242 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
4243                                 struct btrfs_root *extent_root,
4244                                 u64 chunk_offset, u64 chunk_size)
4245 {
4246         struct btrfs_key key;
4247         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
4248         struct btrfs_device *device;
4249         struct btrfs_chunk *chunk;
4250         struct btrfs_stripe *stripe;
4251         struct extent_map_tree *em_tree;
4252         struct extent_map *em;
4253         struct map_lookup *map;
4254         size_t item_size;
4255         u64 dev_offset;
4256         u64 stripe_size;
4257         int i = 0;
4258         int ret;
4259
4260         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
4261         read_lock(&em_tree->lock);
4262         em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
4263         read_unlock(&em_tree->lock);
4264
4265         if (!em) {
4266                 btrfs_crit(extent_root->fs_info, "unable to find logical "
4267                            "%Lu len %Lu", chunk_offset, chunk_size);
4268                 return -EINVAL;
4269         }
4270
4271         if (em->start != chunk_offset || em->len != chunk_size) {
4272                 btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
4273                           " %Lu-%Lu, found %Lu-%Lu\n", chunk_offset,
4274                           chunk_size, em->start, em->len);
4275                 free_extent_map(em);
4276                 return -EINVAL;
4277         }
4278
4279         map = (struct map_lookup *)em->bdev;
4280         item_size = btrfs_chunk_item_size(map->num_stripes);
4281         stripe_size = em->orig_block_len;
4282
4283         chunk = kzalloc(item_size, GFP_NOFS);
4284         if (!chunk) {
4285                 ret = -ENOMEM;
4286                 goto out;
4287         }
4288
4289         for (i = 0; i < map->num_stripes; i++) {
4290                 device = map->stripes[i].dev;
4291                 dev_offset = map->stripes[i].physical;
4292
4293                 device->bytes_used += stripe_size;
4294                 ret = btrfs_update_device(trans, device);
4295                 if (ret)
4296                         goto out;
4297                 ret = btrfs_alloc_dev_extent(trans, device,
4298                                              chunk_root->root_key.objectid,
4299                                              BTRFS_FIRST_CHUNK_TREE_OBJECTID,
4300                                              chunk_offset, dev_offset,
4301                                              stripe_size);
4302                 if (ret)
4303                         goto out;
4304         }
4305
4306         spin_lock(&extent_root->fs_info->free_chunk_lock);
4307         extent_root->fs_info->free_chunk_space -= (stripe_size *
4308                                                    map->num_stripes);
4309         spin_unlock(&extent_root->fs_info->free_chunk_lock);
4310
4311         stripe = &chunk->stripe;
4312         for (i = 0; i < map->num_stripes; i++) {
4313                 device = map->stripes[i].dev;
4314                 dev_offset = map->stripes[i].physical;
4315
4316                 btrfs_set_stack_stripe_devid(stripe, device->devid);
4317                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
4318                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
4319                 stripe++;
4320         }
4321
4322         btrfs_set_stack_chunk_length(chunk, chunk_size);
4323         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
4324         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
4325         btrfs_set_stack_chunk_type(chunk, map->type);
4326         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
4327         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
4328         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
4329         btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
4330         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
4331
4332         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
4333         key.type = BTRFS_CHUNK_ITEM_KEY;
4334         key.offset = chunk_offset;
4335
4336         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
4337         if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
4338                 /*
4339                  * TODO: Cleanup of inserted chunk root in case of
4340                  * failure.
4341                  */
4342                 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
4343                                              item_size);
4344         }
4345
4346 out:
4347         kfree(chunk);
4348         free_extent_map(em);
4349         return ret;
4350 }
4351
4352 /*
4353  * Chunk allocation falls into two parts. The first part does works
4354  * that make the new allocated chunk useable, but not do any operation
4355  * that modifies the chunk tree. The second part does the works that
4356  * require modifying the chunk tree. This division is important for the
4357  * bootstrap process of adding storage to a seed btrfs.
4358  */
4359 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
4360                       struct btrfs_root *extent_root, u64 type)
4361 {
4362         u64 chunk_offset;
4363
4364         chunk_offset = find_next_chunk(extent_root->fs_info);
4365         return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
4366 }
4367
4368 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
4369                                          struct btrfs_root *root,
4370                                          struct btrfs_device *device)
4371 {
4372         u64 chunk_offset;
4373         u64 sys_chunk_offset;
4374         u64 alloc_profile;
4375         struct btrfs_fs_info *fs_info = root->fs_info;
4376         struct btrfs_root *extent_root = fs_info->extent_root;
4377         int ret;
4378
4379         chunk_offset = find_next_chunk(fs_info);
4380         alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
4381         ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
4382                                   alloc_profile);
4383         if (ret)
4384                 return ret;
4385
4386         sys_chunk_offset = find_next_chunk(root->fs_info);
4387         alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
4388         ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
4389                                   alloc_profile);
4390         if (ret) {
4391                 btrfs_abort_transaction(trans, root, ret);
4392                 goto out;
4393         }
4394
4395         ret = btrfs_add_device(trans, fs_info->chunk_root, device);
4396         if (ret)
4397                 btrfs_abort_transaction(trans, root, ret);
4398 out:
4399         return ret;
4400 }
4401
4402 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
4403 {
4404         struct extent_map *em;
4405         struct map_lookup *map;
4406         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4407         int readonly = 0;
4408         int i;
4409
4410         read_lock(&map_tree->map_tree.lock);
4411         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
4412         read_unlock(&map_tree->map_tree.lock);
4413         if (!em)
4414                 return 1;
4415
4416         if (btrfs_test_opt(root, DEGRADED)) {
4417                 free_extent_map(em);
4418                 return 0;
4419         }
4420
4421         map = (struct map_lookup *)em->bdev;
4422         for (i = 0; i < map->num_stripes; i++) {
4423                 if (!map->stripes[i].dev->writeable) {
4424                         readonly = 1;
4425                         break;
4426                 }
4427         }
4428         free_extent_map(em);
4429         return readonly;
4430 }
4431
4432 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
4433 {
4434         extent_map_tree_init(&tree->map_tree);
4435 }
4436
4437 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
4438 {
4439         struct extent_map *em;
4440
4441         while (1) {
4442                 write_lock(&tree->map_tree.lock);
4443                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
4444                 if (em)
4445                         remove_extent_mapping(&tree->map_tree, em);
4446                 write_unlock(&tree->map_tree.lock);
4447                 if (!em)
4448                         break;
4449                 kfree(em->bdev);
4450                 /* once for us */
4451                 free_extent_map(em);
4452                 /* once for the tree */
4453                 free_extent_map(em);
4454         }
4455 }
4456
4457 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
4458 {
4459         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4460         struct extent_map *em;
4461         struct map_lookup *map;
4462         struct extent_map_tree *em_tree = &map_tree->map_tree;
4463         int ret;
4464
4465         read_lock(&em_tree->lock);
4466         em = lookup_extent_mapping(em_tree, logical, len);
4467         read_unlock(&em_tree->lock);
4468
4469         /*
4470          * We could return errors for these cases, but that could get ugly and
4471          * we'd probably do the same thing which is just not do anything else
4472          * and exit, so return 1 so the callers don't try to use other copies.
4473          */
4474         if (!em) {
4475                 btrfs_crit(fs_info, "No mapping for %Lu-%Lu\n", logical,
4476                             logical+len);
4477                 return 1;
4478         }
4479
4480         if (em->start > logical || em->start + em->len < logical) {
4481                 btrfs_crit(fs_info, "Invalid mapping for %Lu-%Lu, got "
4482                             "%Lu-%Lu\n", logical, logical+len, em->start,
4483                             em->start + em->len);
4484                 return 1;
4485         }
4486
4487         map = (struct map_lookup *)em->bdev;
4488         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
4489                 ret = map->num_stripes;
4490         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
4491                 ret = map->sub_stripes;
4492         else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
4493                 ret = 2;
4494         else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4495                 ret = 3;
4496         else
4497                 ret = 1;
4498         free_extent_map(em);
4499
4500         btrfs_dev_replace_lock(&fs_info->dev_replace);
4501         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))
4502                 ret++;
4503         btrfs_dev_replace_unlock(&fs_info->dev_replace);
4504
4505         return ret;
4506 }
4507
4508 unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
4509                                     struct btrfs_mapping_tree *map_tree,
4510                                     u64 logical)
4511 {
4512         struct extent_map *em;
4513         struct map_lookup *map;
4514         struct extent_map_tree *em_tree = &map_tree->map_tree;
4515         unsigned long len = root->sectorsize;
4516
4517         read_lock(&em_tree->lock);
4518         em = lookup_extent_mapping(em_tree, logical, len);
4519         read_unlock(&em_tree->lock);
4520         BUG_ON(!em);
4521
4522         BUG_ON(em->start > logical || em->start + em->len < logical);
4523         map = (struct map_lookup *)em->bdev;
4524         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4525                          BTRFS_BLOCK_GROUP_RAID6)) {
4526                 len = map->stripe_len * nr_data_stripes(map);
4527         }
4528         free_extent_map(em);
4529         return len;
4530 }
4531
4532 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
4533                            u64 logical, u64 len, int mirror_num)
4534 {
4535         struct extent_map *em;
4536         struct map_lookup *map;
4537         struct extent_map_tree *em_tree = &map_tree->map_tree;
4538         int ret = 0;
4539
4540         read_lock(&em_tree->lock);
4541         em = lookup_extent_mapping(em_tree, logical, len);
4542         read_unlock(&em_tree->lock);
4543         BUG_ON(!em);
4544
4545         BUG_ON(em->start > logical || em->start + em->len < logical);
4546         map = (struct map_lookup *)em->bdev;
4547         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4548                          BTRFS_BLOCK_GROUP_RAID6))
4549                 ret = 1;
4550         free_extent_map(em);
4551         return ret;
4552 }
4553
4554 static int find_live_mirror(struct btrfs_fs_info *fs_info,
4555                             struct map_lookup *map, int first, int num,
4556                             int optimal, int dev_replace_is_ongoing)
4557 {
4558         int i;
4559         int tolerance;
4560         struct btrfs_device *srcdev;
4561
4562         if (dev_replace_is_ongoing &&
4563             fs_info->dev_replace.cont_reading_from_srcdev_mode ==
4564              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
4565                 srcdev = fs_info->dev_replace.srcdev;
4566         else
4567                 srcdev = NULL;
4568
4569         /*
4570          * try to avoid the drive that is the source drive for a
4571          * dev-replace procedure, only choose it if no other non-missing
4572          * mirror is available
4573          */
4574         for (tolerance = 0; tolerance < 2; tolerance++) {
4575                 if (map->stripes[optimal].dev->bdev &&
4576                     (tolerance || map->stripes[optimal].dev != srcdev))
4577                         return optimal;
4578                 for (i = first; i < first + num; i++) {
4579                         if (map->stripes[i].dev->bdev &&
4580                             (tolerance || map->stripes[i].dev != srcdev))
4581                                 return i;
4582                 }
4583         }
4584
4585         /* we couldn't find one that doesn't fail.  Just return something
4586          * and the io error handling code will clean up eventually
4587          */
4588         return optimal;
4589 }
4590
4591 static inline int parity_smaller(u64 a, u64 b)
4592 {
4593         return a > b;
4594 }
4595
4596 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
4597 static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4598 {
4599         struct btrfs_bio_stripe s;
4600         int i;
4601         u64 l;
4602         int again = 1;
4603
4604         while (again) {
4605                 again = 0;
4606                 for (i = 0; i < bbio->num_stripes - 1; i++) {
4607                         if (parity_smaller(raid_map[i], raid_map[i+1])) {
4608                                 s = bbio->stripes[i];
4609                                 l = raid_map[i];
4610                                 bbio->stripes[i] = bbio->stripes[i+1];
4611                                 raid_map[i] = raid_map[i+1];
4612                                 bbio->stripes[i+1] = s;
4613                                 raid_map[i+1] = l;
4614                                 again = 1;
4615                         }
4616                 }
4617         }
4618 }
4619
4620 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4621                              u64 logical, u64 *length,
4622                              struct btrfs_bio **bbio_ret,
4623                              int mirror_num, u64 **raid_map_ret)
4624 {
4625         struct extent_map *em;
4626         struct map_lookup *map;
4627         struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
4628         struct extent_map_tree *em_tree = &map_tree->map_tree;
4629         u64 offset;
4630         u64 stripe_offset;
4631         u64 stripe_end_offset;
4632         u64 stripe_nr;
4633         u64 stripe_nr_orig;
4634         u64 stripe_nr_end;
4635         u64 stripe_len;
4636         u64 *raid_map = NULL;
4637         int stripe_index;
4638         int i;
4639         int ret = 0;
4640         int num_stripes;
4641         int max_errors = 0;
4642         struct btrfs_bio *bbio = NULL;
4643         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4644         int dev_replace_is_ongoing = 0;
4645         int num_alloc_stripes;
4646         int patch_the_first_stripe_for_dev_replace = 0;
4647         u64 physical_to_patch_in_first_stripe = 0;
4648         u64 raid56_full_stripe_start = (u64)-1;
4649
4650         read_lock(&em_tree->lock);
4651         em = lookup_extent_mapping(em_tree, logical, *length);
4652         read_unlock(&em_tree->lock);
4653
4654         if (!em) {
4655                 btrfs_crit(fs_info, "unable to find logical %llu len %llu",
4656                         logical, *length);
4657                 return -EINVAL;
4658         }
4659
4660         if (em->start > logical || em->start + em->len < logical) {
4661                 btrfs_crit(fs_info, "found a bad mapping, wanted %Lu, "
4662                            "found %Lu-%Lu\n", logical, em->start,
4663                            em->start + em->len);
4664                 return -EINVAL;
4665         }
4666
4667         map = (struct map_lookup *)em->bdev;
4668         offset = logical - em->start;
4669
4670         stripe_len = map->stripe_len;
4671         stripe_nr = offset;
4672         /*
4673          * stripe_nr counts the total number of stripes we have to stride
4674          * to get to this block
4675          */
4676         do_div(stripe_nr, stripe_len);
4677
4678         stripe_offset = stripe_nr * stripe_len;
4679         BUG_ON(offset < stripe_offset);
4680
4681         /* stripe_offset is the offset of this block in its stripe*/
4682         stripe_offset = offset - stripe_offset;
4683
4684         /* if we're here for raid56, we need to know the stripe aligned start */
4685         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4686                 unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
4687                 raid56_full_stripe_start = offset;
4688
4689                 /* allow a write of a full stripe, but make sure we don't
4690                  * allow straddling of stripes
4691                  */
4692                 do_div(raid56_full_stripe_start, full_stripe_len);
4693                 raid56_full_stripe_start *= full_stripe_len;
4694         }
4695
4696         if (rw & REQ_DISCARD) {
4697                 /* we don't discard raid56 yet */
4698                 if (map->type &
4699                     (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
4700                         ret = -EOPNOTSUPP;
4701                         goto out;
4702                 }
4703                 *length = min_t(u64, em->len - offset, *length);
4704         } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
4705                 u64 max_len;
4706                 /* For writes to RAID[56], allow a full stripeset across all disks.
4707                    For other RAID types and for RAID[56] reads, just allow a single
4708                    stripe (on a single disk). */
4709                 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
4710                     (rw & REQ_WRITE)) {
4711                         max_len = stripe_len * nr_data_stripes(map) -
4712                                 (offset - raid56_full_stripe_start);
4713                 } else {
4714                         /* we limit the length of each bio to what fits in a stripe */
4715                         max_len = stripe_len - stripe_offset;
4716                 }
4717                 *length = min_t(u64, em->len - offset, max_len);
4718         } else {
4719                 *length = em->len - offset;
4720         }
4721
4722         /* This is for when we're called from btrfs_merge_bio_hook() and all
4723            it cares about is the length */
4724         if (!bbio_ret)
4725                 goto out;
4726
4727         btrfs_dev_replace_lock(dev_replace);
4728         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
4729         if (!dev_replace_is_ongoing)
4730                 btrfs_dev_replace_unlock(dev_replace);
4731
4732         if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
4733             !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
4734             dev_replace->tgtdev != NULL) {
4735                 /*
4736                  * in dev-replace case, for repair case (that's the only
4737                  * case where the mirror is selected explicitly when
4738                  * calling btrfs_map_block), blocks left of the left cursor
4739                  * can also be read from the target drive.
4740                  * For REQ_GET_READ_MIRRORS, the target drive is added as
4741                  * the last one to the array of stripes. For READ, it also
4742                  * needs to be supported using the same mirror number.
4743                  * If the requested block is not left of the left cursor,
4744                  * EIO is returned. This can happen because btrfs_num_copies()
4745                  * returns one more in the dev-replace case.
4746                  */
4747                 u64 tmp_length = *length;
4748                 struct btrfs_bio *tmp_bbio = NULL;
4749                 int tmp_num_stripes;
4750                 u64 srcdev_devid = dev_replace->srcdev->devid;
4751                 int index_srcdev = 0;
4752                 int found = 0;
4753                 u64 physical_of_found = 0;
4754
4755                 ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
4756                              logical, &tmp_length, &tmp_bbio, 0, NULL);
4757                 if (ret) {
4758                         WARN_ON(tmp_bbio != NULL);
4759                         goto out;
4760                 }
4761
4762                 tmp_num_stripes = tmp_bbio->num_stripes;
4763                 if (mirror_num > tmp_num_stripes) {
4764                         /*
4765                          * REQ_GET_READ_MIRRORS does not contain this
4766                          * mirror, that means that the requested area
4767                          * is not left of the left cursor
4768                          */
4769                         ret = -EIO;
4770                         kfree(tmp_bbio);
4771                         goto out;
4772                 }
4773
4774                 /*
4775                  * process the rest of the function using the mirror_num
4776                  * of the source drive. Therefore look it up first.
4777                  * At the end, patch the device pointer to the one of the
4778                  * target drive.
4779                  */
4780                 for (i = 0; i < tmp_num_stripes; i++) {
4781                         if (tmp_bbio->stripes[i].dev->devid == srcdev_devid) {
4782                                 /*
4783                                  * In case of DUP, in order to keep it
4784                                  * simple, only add the mirror with the
4785                                  * lowest physical address
4786                                  */
4787                                 if (found &&
4788                                     physical_of_found <=
4789                                      tmp_bbio->stripes[i].physical)
4790                                         continue;
4791                                 index_srcdev = i;
4792                                 found = 1;
4793                                 physical_of_found =
4794                                         tmp_bbio->stripes[i].physical;
4795                         }
4796                 }
4797
4798                 if (found) {
4799                         mirror_num = index_srcdev + 1;
4800                         patch_the_first_stripe_for_dev_replace = 1;
4801                         physical_to_patch_in_first_stripe = physical_of_found;
4802                 } else {
4803                         WARN_ON(1);
4804                         ret = -EIO;
4805                         kfree(tmp_bbio);
4806                         goto out;
4807                 }
4808
4809                 kfree(tmp_bbio);
4810         } else if (mirror_num > map->num_stripes) {
4811                 mirror_num = 0;
4812         }
4813
4814         num_stripes = 1;
4815         stripe_index = 0;
4816         stripe_nr_orig = stripe_nr;
4817         stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
4818         do_div(stripe_nr_end, map->stripe_len);
4819         stripe_end_offset = stripe_nr_end * map->stripe_len -
4820                             (offset + *length);
4821
4822         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
4823                 if (rw & REQ_DISCARD)
4824                         num_stripes = min_t(u64, map->num_stripes,
4825                                             stripe_nr_end - stripe_nr_orig);
4826                 stripe_index = do_div(stripe_nr, map->num_stripes);
4827         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
4828                 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
4829                         num_stripes = map->num_stripes;
4830                 else if (mirror_num)
4831                         stripe_index = mirror_num - 1;
4832                 else {
4833                         stripe_index = find_live_mirror(fs_info, map, 0,
4834                                             map->num_stripes,
4835                                             current->pid % map->num_stripes,
4836                                             dev_replace_is_ongoing);
4837                         mirror_num = stripe_index + 1;
4838                 }
4839
4840         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
4841                 if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
4842                         num_stripes = map->num_stripes;
4843                 } else if (mirror_num) {
4844                         stripe_index = mirror_num - 1;
4845                 } else {
4846                         mirror_num = 1;
4847                 }
4848
4849         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
4850                 int factor = map->num_stripes / map->sub_stripes;
4851
4852                 stripe_index = do_div(stripe_nr, factor);
4853                 stripe_index *= map->sub_stripes;
4854
4855                 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
4856                         num_stripes = map->sub_stripes;
4857                 else if (rw & REQ_DISCARD)
4858                         num_stripes = min_t(u64, map->sub_stripes *
4859                                             (stripe_nr_end - stripe_nr_orig),
4860                                             map->num_stripes);
4861                 else if (mirror_num)
4862                         stripe_index += mirror_num - 1;
4863                 else {
4864                         int old_stripe_index = stripe_index;
4865                         stripe_index = find_live_mirror(fs_info, map,
4866                                               stripe_index,
4867                                               map->sub_stripes, stripe_index +
4868                                               current->pid % map->sub_stripes,
4869                                               dev_replace_is_ongoing);
4870                         mirror_num = stripe_index - old_stripe_index + 1;
4871                 }
4872
4873         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
4874                                 BTRFS_BLOCK_GROUP_RAID6)) {
4875                 u64 tmp;
4876
4877                 if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
4878                     && raid_map_ret) {
4879                         int i, rot;
4880
4881                         /* push stripe_nr back to the start of the full stripe */
4882                         stripe_nr = raid56_full_stripe_start;
4883                         do_div(stripe_nr, stripe_len);
4884
4885                         stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4886
4887                         /* RAID[56] write or recovery. Return all stripes */
4888                         num_stripes = map->num_stripes;
4889                         max_errors = nr_parity_stripes(map);
4890
4891                         raid_map = kmalloc(sizeof(u64) * num_stripes,
4892                                            GFP_NOFS);
4893                         if (!raid_map) {
4894                                 ret = -ENOMEM;
4895                                 goto out;
4896                         }
4897
4898                         /* Work out the disk rotation on this stripe-set */
4899                         tmp = stripe_nr;
4900                         rot = do_div(tmp, num_stripes);
4901
4902                         /* Fill in the logical address of each stripe */
4903                         tmp = stripe_nr * nr_data_stripes(map);
4904                         for (i = 0; i < nr_data_stripes(map); i++)
4905                                 raid_map[(i+rot) % num_stripes] =
4906                                         em->start + (tmp + i) * map->stripe_len;
4907
4908                         raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
4909                         if (map->type & BTRFS_BLOCK_GROUP_RAID6)
4910                                 raid_map[(i+rot+1) % num_stripes] =
4911                                         RAID6_Q_STRIPE;
4912
4913                         *length = map->stripe_len;
4914                         stripe_index = 0;
4915                         stripe_offset = 0;
4916                 } else {
4917                         /*
4918                          * Mirror #0 or #1 means the original data block.
4919                          * Mirror #2 is RAID5 parity block.
4920                          * Mirror #3 is RAID6 Q block.
4921                          */
4922                         stripe_index = do_div(stripe_nr, nr_data_stripes(map));
4923                         if (mirror_num > 1)
4924                                 stripe_index = nr_data_stripes(map) +
4925                                                 mirror_num - 2;
4926
4927                         /* We distribute the parity blocks across stripes */
4928                         tmp = stripe_nr + stripe_index;
4929                         stripe_index = do_div(tmp, map->num_stripes);
4930                 }
4931         } else {
4932                 /*
4933                  * after this do_div call, stripe_nr is the number of stripes
4934                  * on this device we have to walk to find the data, and
4935                  * stripe_index is the number of our device in the stripe array
4936                  */
4937                 stripe_index = do_div(stripe_nr, map->num_stripes);
4938                 mirror_num = stripe_index + 1;
4939         }
4940         BUG_ON(stripe_index >= map->num_stripes);
4941
4942         num_alloc_stripes = num_stripes;
4943         if (dev_replace_is_ongoing) {
4944                 if (rw & (REQ_WRITE | REQ_DISCARD))
4945                         num_alloc_stripes <<= 1;
4946                 if (rw & REQ_GET_READ_MIRRORS)
4947                         num_alloc_stripes++;
4948         }
4949         bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS);
4950         if (!bbio) {
4951                 kfree(raid_map);
4952                 ret = -ENOMEM;
4953                 goto out;
4954         }
4955         atomic_set(&bbio->error, 0);
4956
4957         if (rw & REQ_DISCARD) {
4958                 int factor = 0;
4959                 int sub_stripes = 0;
4960                 u64 stripes_per_dev = 0;
4961                 u32 remaining_stripes = 0;
4962                 u32 last_stripe = 0;
4963
4964                 if (map->type &
4965                     (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
4966                         if (map->type & BTRFS_BLOCK_GROUP_RAID0)
4967                                 sub_stripes = 1;
4968                         else
4969                                 sub_stripes = map->sub_stripes;
4970
4971                         factor = map->num_stripes / sub_stripes;
4972                         stripes_per_dev = div_u64_rem(stripe_nr_end -
4973                                                       stripe_nr_orig,
4974                                                       factor,
4975                                                       &remaining_stripes);
4976                         div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
4977                         last_stripe *= sub_stripes;
4978                 }
4979
4980                 for (i = 0; i < num_stripes; i++) {
4981                         bbio->stripes[i].physical =
4982                                 map->stripes[stripe_index].physical +
4983                                 stripe_offset + stripe_nr * map->stripe_len;
4984                         bbio->stripes[i].dev = map->stripes[stripe_index].dev;
4985
4986                         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
4987                                          BTRFS_BLOCK_GROUP_RAID10)) {
4988                                 bbio->stripes[i].length = stripes_per_dev *
4989                                                           map->stripe_len;
4990
4991                                 if (i / sub_stripes < remaining_stripes)
4992                                         bbio->stripes[i].length +=
4993                                                 map->stripe_len;
4994
4995                                 /*
4996                                  * Special for the first stripe and
4997                                  * the last stripe:
4998                                  *
4999                                  * |-------|...|-------|
5000                                  *     |----------|
5001                                  *    off     end_off
5002                                  */
5003                                 if (i < sub_stripes)
5004                                         bbio->stripes[i].length -=
5005                                                 stripe_offset;
5006
5007                                 if (stripe_index >= last_stripe &&
5008                                     stripe_index <= (last_stripe +
5009                                                      sub_stripes - 1))
5010                                         bbio->stripes[i].length -=
5011                                                 stripe_end_offset;
5012
5013                                 if (i == sub_stripes - 1)
5014                                         stripe_offset = 0;
5015                         } else
5016                                 bbio->stripes[i].length = *length;
5017
5018                         stripe_index++;
5019                         if (stripe_index == map->num_stripes) {
5020                                 /* This could only happen for RAID0/10 */
5021                                 stripe_index = 0;
5022                                 stripe_nr++;
5023                         }
5024                 }
5025         } else {
5026                 for (i = 0; i < num_stripes; i++) {
5027                         bbio->stripes[i].physical =
5028                                 map->stripes[stripe_index].physical +
5029                                 stripe_offset +
5030                                 stripe_nr * map->stripe_len;
5031                         bbio->stripes[i].dev =
5032                                 map->stripes[stripe_index].dev;
5033                         stripe_index++;
5034                 }
5035         }
5036
5037         if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
5038                 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
5039                                  BTRFS_BLOCK_GROUP_RAID10 |
5040                                  BTRFS_BLOCK_GROUP_RAID5 |
5041                                  BTRFS_BLOCK_GROUP_DUP)) {
5042                         max_errors = 1;
5043                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
5044                         max_errors = 2;
5045                 }
5046         }
5047
5048         if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5049             dev_replace->tgtdev != NULL) {
5050                 int index_where_to_add;
5051                 u64 srcdev_devid = dev_replace->srcdev->devid;
5052
5053                 /*
5054                  * duplicate the write operations while the dev replace
5055                  * procedure is running. Since the copying of the old disk
5056                  * to the new disk takes place at run time while the
5057                  * filesystem is mounted writable, the regular write
5058                  * operations to the old disk have to be duplicated to go
5059                  * to the new disk as well.
5060                  * Note that device->missing is handled by the caller, and
5061                  * that the write to the old disk is already set up in the
5062                  * stripes array.
5063                  */
5064                 index_where_to_add = num_stripes;
5065                 for (i = 0; i < num_stripes; i++) {
5066                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5067                                 /* write to new disk, too */
5068                                 struct btrfs_bio_stripe *new =
5069                                         bbio->stripes + index_where_to_add;
5070                                 struct btrfs_bio_stripe *old =
5071                                         bbio->stripes + i;
5072
5073                                 new->physical = old->physical;
5074                                 new->length = old->length;
5075                                 new->dev = dev_replace->tgtdev;
5076                                 index_where_to_add++;
5077                                 max_errors++;
5078                         }
5079                 }
5080                 num_stripes = index_where_to_add;
5081         } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
5082                    dev_replace->tgtdev != NULL) {
5083                 u64 srcdev_devid = dev_replace->srcdev->devid;
5084                 int index_srcdev = 0;
5085                 int found = 0;
5086                 u64 physical_of_found = 0;
5087
5088                 /*
5089                  * During the dev-replace procedure, the target drive can
5090                  * also be used to read data in case it is needed to repair
5091                  * a corrupt block elsewhere. This is possible if the
5092                  * requested area is left of the left cursor. In this area,
5093                  * the target drive is a full copy of the source drive.
5094                  */
5095                 for (i = 0; i < num_stripes; i++) {
5096                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5097                                 /*
5098                                  * In case of DUP, in order to keep it
5099                                  * simple, only add the mirror with the
5100                                  * lowest physical address
5101                                  */
5102                                 if (found &&
5103                                     physical_of_found <=
5104                                      bbio->stripes[i].physical)
5105                                         continue;
5106                                 index_srcdev = i;
5107                                 found = 1;
5108                                 physical_of_found = bbio->stripes[i].physical;
5109                         }
5110                 }
5111                 if (found) {
5112                         u64 length = map->stripe_len;
5113
5114                         if (physical_of_found + length <=
5115                             dev_replace->cursor_left) {
5116                                 struct btrfs_bio_stripe *tgtdev_stripe =
5117                                         bbio->stripes + num_stripes;
5118
5119                                 tgtdev_stripe->physical = physical_of_found;
5120                                 tgtdev_stripe->length =
5121                                         bbio->stripes[index_srcdev].length;
5122                                 tgtdev_stripe->dev = dev_replace->tgtdev;
5123
5124                                 num_stripes++;
5125                         }
5126                 }
5127         }
5128
5129         *bbio_ret = bbio;
5130         bbio->num_stripes = num_stripes;
5131         bbio->max_errors = max_errors;
5132         bbio->mirror_num = mirror_num;
5133
5134         /*
5135          * this is the case that REQ_READ && dev_replace_is_ongoing &&
5136          * mirror_num == num_stripes + 1 && dev_replace target drive is
5137          * available as a mirror
5138          */
5139         if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
5140                 WARN_ON(num_stripes > 1);
5141                 bbio->stripes[0].dev = dev_replace->tgtdev;
5142                 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
5143                 bbio->mirror_num = map->num_stripes + 1;
5144         }
5145         if (raid_map) {
5146                 sort_parity_stripes(bbio, raid_map);
5147                 *raid_map_ret = raid_map;
5148         }
5149 out:
5150         if (dev_replace_is_ongoing)
5151                 btrfs_dev_replace_unlock(dev_replace);
5152         free_extent_map(em);
5153         return ret;
5154 }
5155
5156 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5157                       u64 logical, u64 *length,
5158                       struct btrfs_bio **bbio_ret, int mirror_num)
5159 {
5160         return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5161                                  mirror_num, NULL);
5162 }
5163
5164 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5165                      u64 chunk_start, u64 physical, u64 devid,
5166                      u64 **logical, int *naddrs, int *stripe_len)
5167 {
5168         struct extent_map_tree *em_tree = &map_tree->map_tree;
5169         struct extent_map *em;
5170         struct map_lookup *map;
5171         u64 *buf;
5172         u64 bytenr;
5173         u64 length;
5174         u64 stripe_nr;
5175         u64 rmap_len;
5176         int i, j, nr = 0;
5177
5178         read_lock(&em_tree->lock);
5179         em = lookup_extent_mapping(em_tree, chunk_start, 1);
5180         read_unlock(&em_tree->lock);
5181
5182         if (!em) {
5183                 printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
5184                        chunk_start);
5185                 return -EIO;
5186         }
5187
5188         if (em->start != chunk_start) {
5189                 printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
5190                        em->start, chunk_start);
5191                 free_extent_map(em);
5192                 return -EIO;
5193         }
5194         map = (struct map_lookup *)em->bdev;
5195
5196         length = em->len;
5197         rmap_len = map->stripe_len;
5198
5199         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5200                 do_div(length, map->num_stripes / map->sub_stripes);
5201         else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5202                 do_div(length, map->num_stripes);
5203         else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
5204                               BTRFS_BLOCK_GROUP_RAID6)) {
5205                 do_div(length, nr_data_stripes(map));
5206                 rmap_len = map->stripe_len * nr_data_stripes(map);
5207         }
5208
5209         buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
5210         BUG_ON(!buf); /* -ENOMEM */
5211
5212         for (i = 0; i < map->num_stripes; i++) {
5213                 if (devid && map->stripes[i].dev->devid != devid)
5214                         continue;
5215                 if (map->stripes[i].physical > physical ||
5216                     map->stripes[i].physical + length <= physical)
5217                         continue;
5218
5219                 stripe_nr = physical - map->stripes[i].physical;
5220                 do_div(stripe_nr, map->stripe_len);
5221
5222                 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
5223                         stripe_nr = stripe_nr * map->num_stripes + i;
5224                         do_div(stripe_nr, map->sub_stripes);
5225                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
5226                         stripe_nr = stripe_nr * map->num_stripes + i;
5227                 } /* else if RAID[56], multiply by nr_data_stripes().
5228                    * Alternatively, just use rmap_len below instead of
5229                    * map->stripe_len */
5230
5231                 bytenr = chunk_start + stripe_nr * rmap_len;
5232                 WARN_ON(nr >= map->num_stripes);
5233                 for (j = 0; j < nr; j++) {
5234                         if (buf[j] == bytenr)
5235                                 break;
5236                 }
5237                 if (j == nr) {
5238                         WARN_ON(nr >= map->num_stripes);
5239                         buf[nr++] = bytenr;
5240                 }
5241         }
5242
5243         *logical = buf;
5244         *naddrs = nr;
5245         *stripe_len = rmap_len;
5246
5247         free_extent_map(em);
5248         return 0;
5249 }
5250
5251 static void btrfs_end_bio(struct bio *bio, int err)
5252 {
5253         struct btrfs_bio *bbio = bio->bi_private;
5254         int is_orig_bio = 0;
5255
5256         if (err) {
5257                 atomic_inc(&bbio->error);
5258                 if (err == -EIO || err == -EREMOTEIO) {
5259                         unsigned int stripe_index =
5260                                 btrfs_io_bio(bio)->stripe_index;
5261                         struct btrfs_device *dev;
5262
5263                         BUG_ON(stripe_index >= bbio->num_stripes);
5264                         dev = bbio->stripes[stripe_index].dev;
5265                         if (dev->bdev) {
5266                                 if (bio->bi_rw & WRITE)
5267                                         btrfs_dev_stat_inc(dev,
5268                                                 BTRFS_DEV_STAT_WRITE_ERRS);
5269                                 else
5270                                         btrfs_dev_stat_inc(dev,
5271                                                 BTRFS_DEV_STAT_READ_ERRS);
5272                                 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
5273                                         btrfs_dev_stat_inc(dev,
5274                                                 BTRFS_DEV_STAT_FLUSH_ERRS);
5275                                 btrfs_dev_stat_print_on_error(dev);
5276                         }
5277                 }
5278         }
5279
5280         if (bio == bbio->orig_bio)
5281                 is_orig_bio = 1;
5282
5283         if (atomic_dec_and_test(&bbio->stripes_pending)) {
5284                 if (!is_orig_bio) {
5285                         bio_put(bio);
5286                         bio = bbio->orig_bio;
5287                 }
5288                 bio->bi_private = bbio->private;
5289                 bio->bi_end_io = bbio->end_io;
5290                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5291                 /* only send an error to the higher layers if it is
5292                  * beyond the tolerance of the btrfs bio
5293                  */
5294                 if (atomic_read(&bbio->error) > bbio->max_errors) {
5295                         err = -EIO;
5296                 } else {
5297                         /*
5298                          * this bio is actually up to date, we didn't
5299                          * go over the max number of errors
5300                          */
5301                         set_bit(BIO_UPTODATE, &bio->bi_flags);
5302                         err = 0;
5303                 }
5304                 kfree(bbio);
5305
5306                 bio_endio(bio, err);
5307         } else if (!is_orig_bio) {
5308                 bio_put(bio);
5309         }
5310 }
5311
5312 struct async_sched {
5313         struct bio *bio;
5314         int rw;
5315         struct btrfs_fs_info *info;
5316         struct btrfs_work work;
5317 };
5318
5319 /*
5320  * see run_scheduled_bios for a description of why bios are collected for
5321  * async submit.
5322  *
5323  * This will add one bio to the pending list for a device and make sure
5324  * the work struct is scheduled.
5325  */
5326 static noinline void btrfs_schedule_bio(struct btrfs_root *root,
5327                                         struct btrfs_device *device,
5328                                         int rw, struct bio *bio)
5329 {
5330         int should_queue = 1;
5331         struct btrfs_pending_bios *pending_bios;
5332
5333         if (device->missing || !device->bdev) {
5334                 bio_endio(bio, -EIO);
5335                 return;
5336         }
5337
5338         /* don't bother with additional async steps for reads, right now */
5339         if (!(rw & REQ_WRITE)) {
5340                 bio_get(bio);
5341                 btrfsic_submit_bio(rw, bio);
5342                 bio_put(bio);
5343                 return;
5344         }
5345
5346         /*
5347          * nr_async_bios allows us to reliably return congestion to the
5348          * higher layers.  Otherwise, the async bio makes it appear we have
5349          * made progress against dirty pages when we've really just put it
5350          * on a queue for later
5351          */
5352         atomic_inc(&root->fs_info->nr_async_bios);
5353         WARN_ON(bio->bi_next);
5354         bio->bi_next = NULL;
5355         bio->bi_rw |= rw;
5356
5357         spin_lock(&device->io_lock);
5358         if (bio->bi_rw & REQ_SYNC)
5359                 pending_bios = &device->pending_sync_bios;
5360         else
5361                 pending_bios = &device->pending_bios;
5362
5363         if (pending_bios->tail)
5364                 pending_bios->tail->bi_next = bio;
5365
5366         pending_bios->tail = bio;
5367         if (!pending_bios->head)
5368                 pending_bios->head = bio;
5369         if (device->running_pending)
5370                 should_queue = 0;
5371
5372         spin_unlock(&device->io_lock);
5373
5374         if (should_queue)
5375                 btrfs_queue_worker(&root->fs_info->submit_workers,
5376                                    &device->work);
5377 }
5378
5379 static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5380                        sector_t sector)
5381 {
5382         struct bio_vec *prev;
5383         struct request_queue *q = bdev_get_queue(bdev);
5384         unsigned short max_sectors = queue_max_sectors(q);
5385         struct bvec_merge_data bvm = {
5386                 .bi_bdev = bdev,
5387                 .bi_sector = sector,
5388                 .bi_rw = bio->bi_rw,
5389         };
5390
5391         if (bio->bi_vcnt == 0) {
5392                 WARN_ON(1);
5393                 return 1;
5394         }
5395
5396         prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5397         if (bio_sectors(bio) > max_sectors)
5398                 return 0;
5399
5400         if (!q->merge_bvec_fn)
5401                 return 1;
5402
5403         bvm.bi_size = bio->bi_size - prev->bv_len;
5404         if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5405                 return 0;
5406         return 1;
5407 }
5408
5409 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5410                               struct bio *bio, u64 physical, int dev_nr,
5411                               int rw, int async)
5412 {
5413         struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5414
5415         bio->bi_private = bbio;
5416         btrfs_io_bio(bio)->stripe_index = dev_nr;
5417         bio->bi_end_io = btrfs_end_bio;
5418         bio->bi_sector = physical >> 9;
5419 #ifdef DEBUG
5420         {
5421                 struct rcu_string *name;
5422
5423                 rcu_read_lock();
5424                 name = rcu_dereference(dev->name);
5425                 pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
5426                          "(%s id %llu), size=%u\n", rw,
5427                          (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
5428                          name->str, dev->devid, bio->bi_size);
5429                 rcu_read_unlock();
5430         }
5431 #endif
5432         bio->bi_bdev = dev->bdev;
5433         if (async)
5434                 btrfs_schedule_bio(root, dev, rw, bio);
5435         else
5436                 btrfsic_submit_bio(rw, bio);
5437 }
5438
5439 static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5440                               struct bio *first_bio, struct btrfs_device *dev,
5441                               int dev_nr, int rw, int async)
5442 {
5443         struct bio_vec *bvec = first_bio->bi_io_vec;
5444         struct bio *bio;
5445         int nr_vecs = bio_get_nr_vecs(dev->bdev);
5446         u64 physical = bbio->stripes[dev_nr].physical;
5447
5448 again:
5449         bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
5450         if (!bio)
5451                 return -ENOMEM;
5452
5453         while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5454                 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5455                                  bvec->bv_offset) < bvec->bv_len) {
5456                         u64 len = bio->bi_size;
5457
5458                         atomic_inc(&bbio->stripes_pending);
5459                         submit_stripe_bio(root, bbio, bio, physical, dev_nr,
5460                                           rw, async);
5461                         physical += len;
5462                         goto again;
5463                 }
5464                 bvec++;
5465         }
5466
5467         submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
5468         return 0;
5469 }
5470
5471 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5472 {
5473         atomic_inc(&bbio->error);
5474         if (atomic_dec_and_test(&bbio->stripes_pending)) {
5475                 bio->bi_private = bbio->private;
5476                 bio->bi_end_io = bbio->end_io;
5477                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5478                 bio->bi_sector = logical >> 9;
5479                 kfree(bbio);
5480                 bio_endio(bio, -EIO);
5481         }
5482 }
5483
5484 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5485                   int mirror_num, int async_submit)
5486 {
5487         struct btrfs_device *dev;
5488         struct bio *first_bio = bio;
5489         u64 logical = (u64)bio->bi_sector << 9;
5490         u64 length = 0;
5491         u64 map_length;
5492         u64 *raid_map = NULL;
5493         int ret;
5494         int dev_nr = 0;
5495         int total_devs = 1;
5496         struct btrfs_bio *bbio = NULL;
5497
5498         length = bio->bi_size;
5499         map_length = length;
5500
5501         ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
5502                               mirror_num, &raid_map);
5503         if (ret) /* -ENOMEM */
5504                 return ret;
5505
5506         total_devs = bbio->num_stripes;
5507         bbio->orig_bio = first_bio;
5508         bbio->private = first_bio->bi_private;
5509         bbio->end_io = first_bio->bi_end_io;
5510         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
5511
5512         if (raid_map) {
5513                 /* In this case, map_length has been set to the length of
5514                    a single stripe; not the whole write */
5515                 if (rw & WRITE) {
5516                         return raid56_parity_write(root, bio, bbio,
5517                                                    raid_map, map_length);
5518                 } else {
5519                         return raid56_parity_recover(root, bio, bbio,
5520                                                      raid_map, map_length,
5521                                                      mirror_num);
5522                 }
5523         }
5524
5525         if (map_length < length) {
5526                 btrfs_crit(root->fs_info, "mapping failed logical %llu bio len %llu len %llu",
5527                         logical, length, map_length);
5528                 BUG();
5529         }
5530
5531         while (dev_nr < total_devs) {
5532                 dev = bbio->stripes[dev_nr].dev;
5533                 if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
5534                         bbio_error(bbio, first_bio, logical);
5535                         dev_nr++;
5536                         continue;
5537                 }
5538
5539                 /*
5540                  * Check and see if we're ok with this bio based on it's size
5541                  * and offset with the given device.
5542                  */
5543                 if (!bio_size_ok(dev->bdev, first_bio,
5544                                  bbio->stripes[dev_nr].physical >> 9)) {
5545                         ret = breakup_stripe_bio(root, bbio, first_bio, dev,
5546                                                  dev_nr, rw, async_submit);
5547                         BUG_ON(ret);
5548                         dev_nr++;
5549                         continue;
5550                 }
5551
5552                 if (dev_nr < total_devs - 1) {
5553                         bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5554                         BUG_ON(!bio); /* -ENOMEM */
5555                 } else {
5556                         bio = first_bio;
5557                 }
5558
5559                 submit_stripe_bio(root, bbio, bio,
5560                                   bbio->stripes[dev_nr].physical, dev_nr, rw,
5561                                   async_submit);
5562                 dev_nr++;
5563         }
5564         return 0;
5565 }
5566
5567 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
5568                                        u8 *uuid, u8 *fsid)
5569 {
5570         struct btrfs_device *device;
5571         struct btrfs_fs_devices *cur_devices;
5572
5573         cur_devices = fs_info->fs_devices;
5574         while (cur_devices) {
5575                 if (!fsid ||
5576                     !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5577                         device = __find_device(&cur_devices->devices,
5578                                                devid, uuid);
5579                         if (device)
5580                                 return device;
5581                 }
5582                 cur_devices = cur_devices->seed;
5583         }
5584         return NULL;
5585 }
5586
5587 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
5588                                             u64 devid, u8 *dev_uuid)
5589 {
5590         struct btrfs_device *device;
5591         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
5592
5593         device = btrfs_alloc_device(NULL, &devid, dev_uuid);
5594         if (IS_ERR(device))
5595                 return NULL;
5596
5597         list_add(&device->dev_list, &fs_devices->devices);
5598         device->fs_devices = fs_devices;
5599         fs_devices->num_devices++;
5600
5601         device->missing = 1;
5602         fs_devices->missing_devices++;
5603
5604         return device;
5605 }
5606
5607 /**
5608  * btrfs_alloc_device - allocate struct btrfs_device
5609  * @fs_info:    used only for generating a new devid, can be NULL if
5610  *              devid is provided (i.e. @devid != NULL).
5611  * @devid:      a pointer to devid for this device.  If NULL a new devid
5612  *              is generated.
5613  * @uuid:       a pointer to UUID for this device.  If NULL a new UUID
5614  *              is generated.
5615  *
5616  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
5617  * on error.  Returned struct is not linked onto any lists and can be
5618  * destroyed with kfree() right away.
5619  */
5620 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
5621                                         const u64 *devid,
5622                                         const u8 *uuid)
5623 {
5624         struct btrfs_device *dev;
5625         u64 tmp;
5626
5627         if (!devid && !fs_info) {
5628                 WARN_ON(1);
5629                 return ERR_PTR(-EINVAL);
5630         }
5631
5632         dev = __alloc_device();
5633         if (IS_ERR(dev))
5634                 return dev;
5635
5636         if (devid)
5637                 tmp = *devid;
5638         else {
5639                 int ret;
5640
5641                 ret = find_next_devid(fs_info, &tmp);
5642                 if (ret) {
5643                         kfree(dev);
5644                         return ERR_PTR(ret);
5645                 }
5646         }
5647         dev->devid = tmp;
5648
5649         if (uuid)
5650                 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
5651         else
5652                 generate_random_uuid(dev->uuid);
5653
5654         dev->work.func = pending_bios_fn;
5655
5656         return dev;
5657 }
5658
5659 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
5660                           struct extent_buffer *leaf,
5661                           struct btrfs_chunk *chunk)
5662 {
5663         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5664         struct map_lookup *map;
5665         struct extent_map *em;
5666         u64 logical;
5667         u64 length;
5668         u64 devid;
5669         u8 uuid[BTRFS_UUID_SIZE];
5670         int num_stripes;
5671         int ret;
5672         int i;
5673
5674         logical = key->offset;
5675         length = btrfs_chunk_length(leaf, chunk);
5676
5677         read_lock(&map_tree->map_tree.lock);
5678         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
5679         read_unlock(&map_tree->map_tree.lock);
5680
5681         /* already mapped? */
5682         if (em && em->start <= logical && em->start + em->len > logical) {
5683                 free_extent_map(em);
5684                 return 0;
5685         } else if (em) {
5686                 free_extent_map(em);
5687         }
5688
5689         em = alloc_extent_map();
5690         if (!em)
5691                 return -ENOMEM;
5692         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
5693         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
5694         if (!map) {
5695                 free_extent_map(em);
5696                 return -ENOMEM;
5697         }
5698
5699         em->bdev = (struct block_device *)map;
5700         em->start = logical;
5701         em->len = length;
5702         em->orig_start = 0;
5703         em->block_start = 0;
5704         em->block_len = em->len;
5705
5706         map->num_stripes = num_stripes;
5707         map->io_width = btrfs_chunk_io_width(leaf, chunk);
5708         map->io_align = btrfs_chunk_io_align(leaf, chunk);
5709         map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
5710         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
5711         map->type = btrfs_chunk_type(leaf, chunk);
5712         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
5713         for (i = 0; i < num_stripes; i++) {
5714                 map->stripes[i].physical =
5715                         btrfs_stripe_offset_nr(leaf, chunk, i);
5716                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
5717                 read_extent_buffer(leaf, uuid, (unsigned long)
5718                                    btrfs_stripe_dev_uuid_nr(chunk, i),
5719                                    BTRFS_UUID_SIZE);
5720                 map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
5721                                                         uuid, NULL);
5722                 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
5723                         kfree(map);
5724                         free_extent_map(em);
5725                         return -EIO;
5726                 }
5727                 if (!map->stripes[i].dev) {
5728                         map->stripes[i].dev =
5729                                 add_missing_dev(root, devid, uuid);
5730                         if (!map->stripes[i].dev) {
5731                                 kfree(map);
5732                                 free_extent_map(em);
5733                                 return -EIO;
5734                         }
5735                 }
5736                 map->stripes[i].dev->in_fs_metadata = 1;
5737         }
5738
5739         write_lock(&map_tree->map_tree.lock);
5740         ret = add_extent_mapping(&map_tree->map_tree, em, 0);
5741         write_unlock(&map_tree->map_tree.lock);
5742         BUG_ON(ret); /* Tree corruption */
5743         free_extent_map(em);
5744
5745         return 0;
5746 }
5747
5748 static void fill_device_from_item(struct extent_buffer *leaf,
5749                                  struct btrfs_dev_item *dev_item,
5750                                  struct btrfs_device *device)
5751 {
5752         unsigned long ptr;
5753
5754         device->devid = btrfs_device_id(leaf, dev_item);
5755         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
5756         device->total_bytes = device->disk_total_bytes;
5757         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
5758         device->type = btrfs_device_type(leaf, dev_item);
5759         device->io_align = btrfs_device_io_align(leaf, dev_item);
5760         device->io_width = btrfs_device_io_width(leaf, dev_item);
5761         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
5762         WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
5763         device->is_tgtdev_for_dev_replace = 0;
5764
5765         ptr = btrfs_device_uuid(dev_item);
5766         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
5767 }
5768
5769 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
5770 {
5771         struct btrfs_fs_devices *fs_devices;
5772         int ret;
5773
5774         BUG_ON(!mutex_is_locked(&uuid_mutex));
5775
5776         fs_devices = root->fs_info->fs_devices->seed;
5777         while (fs_devices) {
5778                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
5779                         ret = 0;
5780                         goto out;
5781                 }
5782                 fs_devices = fs_devices->seed;
5783         }
5784
5785         fs_devices = find_fsid(fsid);
5786         if (!fs_devices) {
5787                 ret = -ENOENT;
5788                 goto out;
5789         }
5790
5791         fs_devices = clone_fs_devices(fs_devices);
5792         if (IS_ERR(fs_devices)) {
5793                 ret = PTR_ERR(fs_devices);
5794                 goto out;
5795         }
5796
5797         ret = __btrfs_open_devices(fs_devices, FMODE_READ,
5798                                    root->fs_info->bdev_holder);
5799         if (ret) {
5800                 free_fs_devices(fs_devices);
5801                 goto out;
5802         }
5803
5804         if (!fs_devices->seeding) {
5805                 __btrfs_close_devices(fs_devices);
5806                 free_fs_devices(fs_devices);
5807                 ret = -EINVAL;
5808                 goto out;
5809         }
5810
5811         fs_devices->seed = root->fs_info->fs_devices->seed;
5812         root->fs_info->fs_devices->seed = fs_devices;
5813 out:
5814         return ret;
5815 }
5816
5817 static int read_one_dev(struct btrfs_root *root,
5818                         struct extent_buffer *leaf,
5819                         struct btrfs_dev_item *dev_item)
5820 {
5821         struct btrfs_device *device;
5822         u64 devid;
5823         int ret;
5824         u8 fs_uuid[BTRFS_UUID_SIZE];
5825         u8 dev_uuid[BTRFS_UUID_SIZE];
5826
5827         devid = btrfs_device_id(leaf, dev_item);
5828         read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
5829                            BTRFS_UUID_SIZE);
5830         read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
5831                            BTRFS_UUID_SIZE);
5832
5833         if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
5834                 ret = open_seed_devices(root, fs_uuid);
5835                 if (ret && !btrfs_test_opt(root, DEGRADED))
5836                         return ret;
5837         }
5838
5839         device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
5840         if (!device || !device->bdev) {
5841                 if (!btrfs_test_opt(root, DEGRADED))
5842                         return -EIO;
5843
5844                 if (!device) {
5845                         btrfs_warn(root->fs_info, "devid %llu missing", devid);
5846                         device = add_missing_dev(root, devid, dev_uuid);
5847                         if (!device)
5848                                 return -ENOMEM;
5849                 } else if (!device->missing) {
5850                         /*
5851                          * this happens when a device that was properly setup
5852                          * in the device info lists suddenly goes bad.
5853                          * device->bdev is NULL, and so we have to set
5854                          * device->missing to one here
5855                          */
5856                         root->fs_info->fs_devices->missing_devices++;
5857                         device->missing = 1;
5858                 }
5859         }
5860
5861         if (device->fs_devices != root->fs_info->fs_devices) {
5862                 BUG_ON(device->writeable);
5863                 if (device->generation !=
5864                     btrfs_device_generation(leaf, dev_item))
5865                         return -EINVAL;
5866         }
5867
5868         fill_device_from_item(leaf, dev_item, device);
5869         device->in_fs_metadata = 1;
5870         if (device->writeable && !device->is_tgtdev_for_dev_replace) {
5871                 device->fs_devices->total_rw_bytes += device->total_bytes;
5872                 spin_lock(&root->fs_info->free_chunk_lock);
5873                 root->fs_info->free_chunk_space += device->total_bytes -
5874                         device->bytes_used;
5875                 spin_unlock(&root->fs_info->free_chunk_lock);
5876         }
5877         ret = 0;
5878         return ret;
5879 }
5880
5881 int btrfs_read_sys_array(struct btrfs_root *root)
5882 {
5883         struct btrfs_super_block *super_copy = root->fs_info->super_copy;
5884         struct extent_buffer *sb;
5885         struct btrfs_disk_key *disk_key;
5886         struct btrfs_chunk *chunk;
5887         u8 *ptr;
5888         unsigned long sb_ptr;
5889         int ret = 0;
5890         u32 num_stripes;
5891         u32 array_size;
5892         u32 len = 0;
5893         u32 cur;
5894         struct btrfs_key key;
5895
5896         sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
5897                                           BTRFS_SUPER_INFO_SIZE);
5898         if (!sb)
5899                 return -ENOMEM;
5900         btrfs_set_buffer_uptodate(sb);
5901         btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
5902         /*
5903          * The sb extent buffer is artifical and just used to read the system array.
5904          * btrfs_set_buffer_uptodate() call does not properly mark all it's
5905          * pages up-to-date when the page is larger: extent does not cover the
5906          * whole page and consequently check_page_uptodate does not find all
5907          * the page's extents up-to-date (the hole beyond sb),
5908          * write_extent_buffer then triggers a WARN_ON.
5909          *
5910          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
5911          * but sb spans only this function. Add an explicit SetPageUptodate call
5912          * to silence the warning eg. on PowerPC 64.
5913          */
5914         if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
5915                 SetPageUptodate(sb->pages[0]);
5916
5917         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
5918         array_size = btrfs_super_sys_array_size(super_copy);
5919
5920         ptr = super_copy->sys_chunk_array;
5921         sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
5922         cur = 0;
5923
5924         while (cur < array_size) {
5925                 disk_key = (struct btrfs_disk_key *)ptr;
5926                 btrfs_disk_key_to_cpu(&key, disk_key);
5927
5928                 len = sizeof(*disk_key); ptr += len;
5929                 sb_ptr += len;
5930                 cur += len;
5931
5932                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
5933                         chunk = (struct btrfs_chunk *)sb_ptr;
5934                         ret = read_one_chunk(root, &key, sb, chunk);
5935                         if (ret)
5936                                 break;
5937                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
5938                         len = btrfs_chunk_item_size(num_stripes);
5939                 } else {
5940                         ret = -EIO;
5941                         break;
5942                 }
5943                 ptr += len;
5944                 sb_ptr += len;
5945                 cur += len;
5946         }
5947         free_extent_buffer(sb);
5948         return ret;
5949 }
5950
5951 int btrfs_read_chunk_tree(struct btrfs_root *root)
5952 {
5953         struct btrfs_path *path;
5954         struct extent_buffer *leaf;
5955         struct btrfs_key key;
5956         struct btrfs_key found_key;
5957         int ret;
5958         int slot;
5959
5960         root = root->fs_info->chunk_root;
5961
5962         path = btrfs_alloc_path();
5963         if (!path)
5964                 return -ENOMEM;
5965
5966         mutex_lock(&uuid_mutex);
5967         lock_chunks(root);
5968
5969         /*
5970          * Read all device items, and then all the chunk items. All
5971          * device items are found before any chunk item (their object id
5972          * is smaller than the lowest possible object id for a chunk
5973          * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
5974          */
5975         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
5976         key.offset = 0;
5977         key.type = 0;
5978         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5979         if (ret < 0)
5980                 goto error;
5981         while (1) {
5982                 leaf = path->nodes[0];
5983                 slot = path->slots[0];
5984                 if (slot >= btrfs_header_nritems(leaf)) {
5985                         ret = btrfs_next_leaf(root, path);
5986                         if (ret == 0)
5987                                 continue;
5988                         if (ret < 0)
5989                                 goto error;
5990                         break;
5991                 }
5992                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5993                 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
5994                         struct btrfs_dev_item *dev_item;
5995                         dev_item = btrfs_item_ptr(leaf, slot,
5996                                                   struct btrfs_dev_item);
5997                         ret = read_one_dev(root, leaf, dev_item);
5998                         if (ret)
5999                                 goto error;
6000                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
6001                         struct btrfs_chunk *chunk;
6002                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
6003                         ret = read_one_chunk(root, &found_key, leaf, chunk);
6004                         if (ret)
6005                                 goto error;
6006                 }
6007                 path->slots[0]++;
6008         }
6009         ret = 0;
6010 error:
6011         unlock_chunks(root);
6012         mutex_unlock(&uuid_mutex);
6013
6014         btrfs_free_path(path);
6015         return ret;
6016 }
6017
6018 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
6019 {
6020         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6021         struct btrfs_device *device;
6022
6023         mutex_lock(&fs_devices->device_list_mutex);
6024         list_for_each_entry(device, &fs_devices->devices, dev_list)
6025                 device->dev_root = fs_info->dev_root;
6026         mutex_unlock(&fs_devices->device_list_mutex);
6027 }
6028
6029 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
6030 {
6031         int i;
6032
6033         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6034                 btrfs_dev_stat_reset(dev, i);
6035 }
6036
6037 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
6038 {
6039         struct btrfs_key key;
6040         struct btrfs_key found_key;
6041         struct btrfs_root *dev_root = fs_info->dev_root;
6042         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6043         struct extent_buffer *eb;
6044         int slot;
6045         int ret = 0;
6046         struct btrfs_device *device;
6047         struct btrfs_path *path = NULL;
6048         int i;
6049
6050         path = btrfs_alloc_path();
6051         if (!path) {
6052                 ret = -ENOMEM;
6053                 goto out;
6054         }
6055
6056         mutex_lock(&fs_devices->device_list_mutex);
6057         list_for_each_entry(device, &fs_devices->devices, dev_list) {
6058                 int item_size;
6059                 struct btrfs_dev_stats_item *ptr;
6060
6061                 key.objectid = 0;
6062                 key.type = BTRFS_DEV_STATS_KEY;
6063                 key.offset = device->devid;
6064                 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
6065                 if (ret) {
6066                         __btrfs_reset_dev_stats(device);
6067                         device->dev_stats_valid = 1;
6068                         btrfs_release_path(path);
6069                         continue;
6070                 }
6071                 slot = path->slots[0];
6072                 eb = path->nodes[0];
6073                 btrfs_item_key_to_cpu(eb, &found_key, slot);
6074                 item_size = btrfs_item_size_nr(eb, slot);
6075
6076                 ptr = btrfs_item_ptr(eb, slot,
6077                                      struct btrfs_dev_stats_item);
6078
6079                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6080                         if (item_size >= (1 + i) * sizeof(__le64))
6081                                 btrfs_dev_stat_set(device, i,
6082                                         btrfs_dev_stats_value(eb, ptr, i));
6083                         else
6084                                 btrfs_dev_stat_reset(device, i);
6085                 }
6086
6087                 device->dev_stats_valid = 1;
6088                 btrfs_dev_stat_print_on_load(device);
6089                 btrfs_release_path(path);
6090         }
6091         mutex_unlock(&fs_devices->device_list_mutex);
6092
6093 out:
6094         btrfs_free_path(path);
6095         return ret < 0 ? ret : 0;
6096 }
6097
6098 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
6099                                 struct btrfs_root *dev_root,
6100                                 struct btrfs_device *device)
6101 {
6102         struct btrfs_path *path;
6103         struct btrfs_key key;
6104         struct extent_buffer *eb;
6105         struct btrfs_dev_stats_item *ptr;
6106         int ret;
6107         int i;
6108
6109         key.objectid = 0;
6110         key.type = BTRFS_DEV_STATS_KEY;
6111         key.offset = device->devid;
6112
6113         path = btrfs_alloc_path();
6114         BUG_ON(!path);
6115         ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
6116         if (ret < 0) {
6117                 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
6118                               ret, rcu_str_deref(device->name));
6119                 goto out;
6120         }
6121
6122         if (ret == 0 &&
6123             btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
6124                 /* need to delete old one and insert a new one */
6125                 ret = btrfs_del_item(trans, dev_root, path);
6126                 if (ret != 0) {
6127                         printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
6128                                       rcu_str_deref(device->name), ret);
6129                         goto out;
6130                 }
6131                 ret = 1;
6132         }
6133
6134         if (ret == 1) {
6135                 /* need to insert a new item */
6136                 btrfs_release_path(path);
6137                 ret = btrfs_insert_empty_item(trans, dev_root, path,
6138                                               &key, sizeof(*ptr));
6139                 if (ret < 0) {
6140                         printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
6141                                       rcu_str_deref(device->name), ret);
6142                         goto out;
6143                 }
6144         }
6145
6146         eb = path->nodes[0];
6147         ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
6148         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6149                 btrfs_set_dev_stats_value(eb, ptr, i,
6150                                           btrfs_dev_stat_read(device, i));
6151         btrfs_mark_buffer_dirty(eb);
6152
6153 out:
6154         btrfs_free_path(path);
6155         return ret;
6156 }
6157
6158 /*
6159  * called from commit_transaction. Writes all changed device stats to disk.
6160  */
6161 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
6162                         struct btrfs_fs_info *fs_info)
6163 {
6164         struct btrfs_root *dev_root = fs_info->dev_root;
6165         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6166         struct btrfs_device *device;
6167         int ret = 0;
6168
6169         mutex_lock(&fs_devices->device_list_mutex);
6170         list_for_each_entry(device, &fs_devices->devices, dev_list) {
6171                 if (!device->dev_stats_valid || !device->dev_stats_dirty)
6172                         continue;
6173
6174                 ret = update_dev_stat_item(trans, dev_root, device);
6175                 if (!ret)
6176                         device->dev_stats_dirty = 0;
6177         }
6178         mutex_unlock(&fs_devices->device_list_mutex);
6179
6180         return ret;
6181 }
6182
6183 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
6184 {
6185         btrfs_dev_stat_inc(dev, index);
6186         btrfs_dev_stat_print_on_error(dev);
6187 }
6188
6189 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
6190 {
6191         if (!dev->dev_stats_valid)
6192                 return;
6193         printk_ratelimited_in_rcu(KERN_ERR
6194                            "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6195                            rcu_str_deref(dev->name),
6196                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6197                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6198                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6199                            btrfs_dev_stat_read(dev,
6200                                                BTRFS_DEV_STAT_CORRUPTION_ERRS),
6201                            btrfs_dev_stat_read(dev,
6202                                                BTRFS_DEV_STAT_GENERATION_ERRS));
6203 }
6204
6205 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
6206 {
6207         int i;
6208
6209         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6210                 if (btrfs_dev_stat_read(dev, i) != 0)
6211                         break;
6212         if (i == BTRFS_DEV_STAT_VALUES_MAX)
6213                 return; /* all values == 0, suppress message */
6214
6215         printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
6216                rcu_str_deref(dev->name),
6217                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
6218                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
6219                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
6220                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
6221                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
6222 }
6223
6224 int btrfs_get_dev_stats(struct btrfs_root *root,
6225                         struct btrfs_ioctl_get_dev_stats *stats)
6226 {
6227         struct btrfs_device *dev;
6228         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6229         int i;
6230
6231         mutex_lock(&fs_devices->device_list_mutex);
6232         dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
6233         mutex_unlock(&fs_devices->device_list_mutex);
6234
6235         if (!dev) {
6236                 printk(KERN_WARNING
6237                        "btrfs: get dev_stats failed, device not found\n");
6238                 return -ENODEV;
6239         } else if (!dev->dev_stats_valid) {
6240                 printk(KERN_WARNING
6241                        "btrfs: get dev_stats failed, not yet valid\n");
6242                 return -ENODEV;
6243         } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
6244                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
6245                         if (stats->nr_items > i)
6246                                 stats->values[i] =
6247                                         btrfs_dev_stat_read_and_reset(dev, i);
6248                         else
6249                                 btrfs_dev_stat_reset(dev, i);
6250                 }
6251         } else {
6252                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
6253                         if (stats->nr_items > i)
6254                                 stats->values[i] = btrfs_dev_stat_read(dev, i);
6255         }
6256         if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
6257                 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
6258         return 0;
6259 }
6260
6261 int btrfs_scratch_superblock(struct btrfs_device *device)
6262 {
6263         struct buffer_head *bh;
6264         struct btrfs_super_block *disk_super;
6265
6266         bh = btrfs_read_dev_super(device->bdev);
6267         if (!bh)
6268                 return -EINVAL;
6269         disk_super = (struct btrfs_super_block *)bh->b_data;
6270
6271         memset(&disk_super->magic, 0, sizeof(disk_super->magic));
6272         set_buffer_dirty(bh);
6273         sync_dirty_buffer(bh);
6274         brelse(bh);
6275
6276         return 0;
6277 }