]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/disk-io.c
Merge branch 'for-linus' of git://git.samba.org/sfrench/cifs-2.6
[karo-tx-linux.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/slab.h>
30 #include <linux/migrate.h>
31 #include <linux/ratelimit.h>
32 #include <linux/uuid.h>
33 #include <linux/semaphore.h>
34 #include <asm/unaligned.h>
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "hash.h"
38 #include "transaction.h"
39 #include "btrfs_inode.h"
40 #include "volumes.h"
41 #include "print-tree.h"
42 #include "async-thread.h"
43 #include "locking.h"
44 #include "tree-log.h"
45 #include "free-space-cache.h"
46 #include "inode-map.h"
47 #include "check-integrity.h"
48 #include "rcu-string.h"
49 #include "dev-replace.h"
50 #include "raid56.h"
51 #include "sysfs.h"
52
53 #ifdef CONFIG_X86
54 #include <asm/cpufeature.h>
55 #endif
56
57 static struct extent_io_ops btree_extent_io_ops;
58 static void end_workqueue_fn(struct btrfs_work *work);
59 static void free_fs_root(struct btrfs_root *root);
60 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
61                                     int read_only);
62 static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
63                                              struct btrfs_root *root);
64 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
65 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
66                                       struct btrfs_root *root);
67 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
68 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
69                                         struct extent_io_tree *dirty_pages,
70                                         int mark);
71 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
72                                        struct extent_io_tree *pinned_extents);
73 static int btrfs_cleanup_transaction(struct btrfs_root *root);
74 static void btrfs_error_commit_super(struct btrfs_root *root);
75
76 /*
77  * end_io_wq structs are used to do processing in task context when an IO is
78  * complete.  This is used during reads to verify checksums, and it is used
79  * by writes to insert metadata for new file extents after IO is complete.
80  */
81 struct end_io_wq {
82         struct bio *bio;
83         bio_end_io_t *end_io;
84         void *private;
85         struct btrfs_fs_info *info;
86         int error;
87         int metadata;
88         struct list_head list;
89         struct btrfs_work work;
90 };
91
92 /*
93  * async submit bios are used to offload expensive checksumming
94  * onto the worker threads.  They checksum file and metadata bios
95  * just before they are sent down the IO stack.
96  */
97 struct async_submit_bio {
98         struct inode *inode;
99         struct bio *bio;
100         struct list_head list;
101         extent_submit_bio_hook_t *submit_bio_start;
102         extent_submit_bio_hook_t *submit_bio_done;
103         int rw;
104         int mirror_num;
105         unsigned long bio_flags;
106         /*
107          * bio_offset is optional, can be used if the pages in the bio
108          * can't tell us where in the file the bio should go
109          */
110         u64 bio_offset;
111         struct btrfs_work work;
112         int error;
113 };
114
115 /*
116  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
117  * eb, the lockdep key is determined by the btrfs_root it belongs to and
118  * the level the eb occupies in the tree.
119  *
120  * Different roots are used for different purposes and may nest inside each
121  * other and they require separate keysets.  As lockdep keys should be
122  * static, assign keysets according to the purpose of the root as indicated
123  * by btrfs_root->objectid.  This ensures that all special purpose roots
124  * have separate keysets.
125  *
126  * Lock-nesting across peer nodes is always done with the immediate parent
127  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
128  * subclass to avoid triggering lockdep warning in such cases.
129  *
130  * The key is set by the readpage_end_io_hook after the buffer has passed
131  * csum validation but before the pages are unlocked.  It is also set by
132  * btrfs_init_new_buffer on freshly allocated blocks.
133  *
134  * We also add a check to make sure the highest level of the tree is the
135  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
136  * needs update as well.
137  */
138 #ifdef CONFIG_DEBUG_LOCK_ALLOC
139 # if BTRFS_MAX_LEVEL != 8
140 #  error
141 # endif
142
143 static struct btrfs_lockdep_keyset {
144         u64                     id;             /* root objectid */
145         const char              *name_stem;     /* lock name stem */
146         char                    names[BTRFS_MAX_LEVEL + 1][20];
147         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
148 } btrfs_lockdep_keysets[] = {
149         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
150         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
151         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
152         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
153         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
154         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
155         { .id = BTRFS_QUOTA_TREE_OBJECTID,      .name_stem = "quota"    },
156         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
157         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
158         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
159         { .id = BTRFS_UUID_TREE_OBJECTID,       .name_stem = "uuid"     },
160         { .id = 0,                              .name_stem = "tree"     },
161 };
162
163 void __init btrfs_init_lockdep(void)
164 {
165         int i, j;
166
167         /* initialize lockdep class names */
168         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
169                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
170
171                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
172                         snprintf(ks->names[j], sizeof(ks->names[j]),
173                                  "btrfs-%s-%02d", ks->name_stem, j);
174         }
175 }
176
177 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
178                                     int level)
179 {
180         struct btrfs_lockdep_keyset *ks;
181
182         BUG_ON(level >= ARRAY_SIZE(ks->keys));
183
184         /* find the matching keyset, id 0 is the default entry */
185         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
186                 if (ks->id == objectid)
187                         break;
188
189         lockdep_set_class_and_name(&eb->lock,
190                                    &ks->keys[level], ks->names[level]);
191 }
192
193 #endif
194
195 /*
196  * extents on the btree inode are pretty simple, there's one extent
197  * that covers the entire device
198  */
199 static struct extent_map *btree_get_extent(struct inode *inode,
200                 struct page *page, size_t pg_offset, u64 start, u64 len,
201                 int create)
202 {
203         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
204         struct extent_map *em;
205         int ret;
206
207         read_lock(&em_tree->lock);
208         em = lookup_extent_mapping(em_tree, start, len);
209         if (em) {
210                 em->bdev =
211                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
212                 read_unlock(&em_tree->lock);
213                 goto out;
214         }
215         read_unlock(&em_tree->lock);
216
217         em = alloc_extent_map();
218         if (!em) {
219                 em = ERR_PTR(-ENOMEM);
220                 goto out;
221         }
222         em->start = 0;
223         em->len = (u64)-1;
224         em->block_len = (u64)-1;
225         em->block_start = 0;
226         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
227
228         write_lock(&em_tree->lock);
229         ret = add_extent_mapping(em_tree, em, 0);
230         if (ret == -EEXIST) {
231                 free_extent_map(em);
232                 em = lookup_extent_mapping(em_tree, start, len);
233                 if (!em)
234                         em = ERR_PTR(-EIO);
235         } else if (ret) {
236                 free_extent_map(em);
237                 em = ERR_PTR(ret);
238         }
239         write_unlock(&em_tree->lock);
240
241 out:
242         return em;
243 }
244
245 u32 btrfs_csum_data(char *data, u32 seed, size_t len)
246 {
247         return btrfs_crc32c(seed, data, len);
248 }
249
250 void btrfs_csum_final(u32 crc, char *result)
251 {
252         put_unaligned_le32(~crc, result);
253 }
254
255 /*
256  * compute the csum for a btree block, and either verify it or write it
257  * into the csum field of the block.
258  */
259 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
260                            int verify)
261 {
262         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
263         char *result = NULL;
264         unsigned long len;
265         unsigned long cur_len;
266         unsigned long offset = BTRFS_CSUM_SIZE;
267         char *kaddr;
268         unsigned long map_start;
269         unsigned long map_len;
270         int err;
271         u32 crc = ~(u32)0;
272         unsigned long inline_result;
273
274         len = buf->len - offset;
275         while (len > 0) {
276                 err = map_private_extent_buffer(buf, offset, 32,
277                                         &kaddr, &map_start, &map_len);
278                 if (err)
279                         return 1;
280                 cur_len = min(len, map_len - (offset - map_start));
281                 crc = btrfs_csum_data(kaddr + offset - map_start,
282                                       crc, cur_len);
283                 len -= cur_len;
284                 offset += cur_len;
285         }
286         if (csum_size > sizeof(inline_result)) {
287                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
288                 if (!result)
289                         return 1;
290         } else {
291                 result = (char *)&inline_result;
292         }
293
294         btrfs_csum_final(crc, result);
295
296         if (verify) {
297                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
298                         u32 val;
299                         u32 found = 0;
300                         memcpy(&found, result, csum_size);
301
302                         read_extent_buffer(buf, &val, 0, csum_size);
303                         printk_ratelimited(KERN_INFO
304                                 "BTRFS: %s checksum verify failed on %llu wanted %X found %X "
305                                 "level %d\n",
306                                 root->fs_info->sb->s_id, buf->start,
307                                 val, found, btrfs_header_level(buf));
308                         if (result != (char *)&inline_result)
309                                 kfree(result);
310                         return 1;
311                 }
312         } else {
313                 write_extent_buffer(buf, result, 0, csum_size);
314         }
315         if (result != (char *)&inline_result)
316                 kfree(result);
317         return 0;
318 }
319
320 /*
321  * we can't consider a given block up to date unless the transid of the
322  * block matches the transid in the parent node's pointer.  This is how we
323  * detect blocks that either didn't get written at all or got written
324  * in the wrong place.
325  */
326 static int verify_parent_transid(struct extent_io_tree *io_tree,
327                                  struct extent_buffer *eb, u64 parent_transid,
328                                  int atomic)
329 {
330         struct extent_state *cached_state = NULL;
331         int ret;
332         bool need_lock = (current->journal_info ==
333                           (void *)BTRFS_SEND_TRANS_STUB);
334
335         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
336                 return 0;
337
338         if (atomic)
339                 return -EAGAIN;
340
341         if (need_lock) {
342                 btrfs_tree_read_lock(eb);
343                 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
344         }
345
346         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
347                          0, &cached_state);
348         if (extent_buffer_uptodate(eb) &&
349             btrfs_header_generation(eb) == parent_transid) {
350                 ret = 0;
351                 goto out;
352         }
353         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
354                        "found %llu\n",
355                        eb->start, parent_transid, btrfs_header_generation(eb));
356         ret = 1;
357
358         /*
359          * Things reading via commit roots that don't have normal protection,
360          * like send, can have a really old block in cache that may point at a
361          * block that has been free'd and re-allocated.  So don't clear uptodate
362          * if we find an eb that is under IO (dirty/writeback) because we could
363          * end up reading in the stale data and then writing it back out and
364          * making everybody very sad.
365          */
366         if (!extent_buffer_under_io(eb))
367                 clear_extent_buffer_uptodate(eb);
368 out:
369         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
370                              &cached_state, GFP_NOFS);
371         btrfs_tree_read_unlock_blocking(eb);
372         return ret;
373 }
374
375 /*
376  * Return 0 if the superblock checksum type matches the checksum value of that
377  * algorithm. Pass the raw disk superblock data.
378  */
379 static int btrfs_check_super_csum(char *raw_disk_sb)
380 {
381         struct btrfs_super_block *disk_sb =
382                 (struct btrfs_super_block *)raw_disk_sb;
383         u16 csum_type = btrfs_super_csum_type(disk_sb);
384         int ret = 0;
385
386         if (csum_type == BTRFS_CSUM_TYPE_CRC32) {
387                 u32 crc = ~(u32)0;
388                 const int csum_size = sizeof(crc);
389                 char result[csum_size];
390
391                 /*
392                  * The super_block structure does not span the whole
393                  * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
394                  * is filled with zeros and is included in the checkum.
395                  */
396                 crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
397                                 crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
398                 btrfs_csum_final(crc, result);
399
400                 if (memcmp(raw_disk_sb, result, csum_size))
401                         ret = 1;
402
403                 if (ret && btrfs_super_generation(disk_sb) < 10) {
404                         printk(KERN_WARNING
405                                 "BTRFS: super block crcs don't match, older mkfs detected\n");
406                         ret = 0;
407                 }
408         }
409
410         if (csum_type >= ARRAY_SIZE(btrfs_csum_sizes)) {
411                 printk(KERN_ERR "BTRFS: unsupported checksum algorithm %u\n",
412                                 csum_type);
413                 ret = 1;
414         }
415
416         return ret;
417 }
418
419 /*
420  * helper to read a given tree block, doing retries as required when
421  * the checksums don't match and we have alternate mirrors to try.
422  */
423 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
424                                           struct extent_buffer *eb,
425                                           u64 start, u64 parent_transid)
426 {
427         struct extent_io_tree *io_tree;
428         int failed = 0;
429         int ret;
430         int num_copies = 0;
431         int mirror_num = 0;
432         int failed_mirror = 0;
433
434         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
435         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
436         while (1) {
437                 ret = read_extent_buffer_pages(io_tree, eb, start,
438                                                WAIT_COMPLETE,
439                                                btree_get_extent, mirror_num);
440                 if (!ret) {
441                         if (!verify_parent_transid(io_tree, eb,
442                                                    parent_transid, 0))
443                                 break;
444                         else
445                                 ret = -EIO;
446                 }
447
448                 /*
449                  * This buffer's crc is fine, but its contents are corrupted, so
450                  * there is no reason to read the other copies, they won't be
451                  * any less wrong.
452                  */
453                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
454                         break;
455
456                 num_copies = btrfs_num_copies(root->fs_info,
457                                               eb->start, eb->len);
458                 if (num_copies == 1)
459                         break;
460
461                 if (!failed_mirror) {
462                         failed = 1;
463                         failed_mirror = eb->read_mirror;
464                 }
465
466                 mirror_num++;
467                 if (mirror_num == failed_mirror)
468                         mirror_num++;
469
470                 if (mirror_num > num_copies)
471                         break;
472         }
473
474         if (failed && !ret && failed_mirror)
475                 repair_eb_io_failure(root, eb, failed_mirror);
476
477         return ret;
478 }
479
480 /*
481  * checksum a dirty tree block before IO.  This has extra checks to make sure
482  * we only fill in the checksum field in the first page of a multi-page block
483  */
484
485 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
486 {
487         u64 start = page_offset(page);
488         u64 found_start;
489         struct extent_buffer *eb;
490
491         eb = (struct extent_buffer *)page->private;
492         if (page != eb->pages[0])
493                 return 0;
494         found_start = btrfs_header_bytenr(eb);
495         if (WARN_ON(found_start != start || !PageUptodate(page)))
496                 return 0;
497         csum_tree_block(root, eb, 0);
498         return 0;
499 }
500
501 static int check_tree_block_fsid(struct btrfs_root *root,
502                                  struct extent_buffer *eb)
503 {
504         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
505         u8 fsid[BTRFS_UUID_SIZE];
506         int ret = 1;
507
508         read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
509         while (fs_devices) {
510                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
511                         ret = 0;
512                         break;
513                 }
514                 fs_devices = fs_devices->seed;
515         }
516         return ret;
517 }
518
519 #define CORRUPT(reason, eb, root, slot)                         \
520         btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu,"       \
521                    "root=%llu, slot=%d", reason,                        \
522                btrfs_header_bytenr(eb), root->objectid, slot)
523
524 static noinline int check_leaf(struct btrfs_root *root,
525                                struct extent_buffer *leaf)
526 {
527         struct btrfs_key key;
528         struct btrfs_key leaf_key;
529         u32 nritems = btrfs_header_nritems(leaf);
530         int slot;
531
532         if (nritems == 0)
533                 return 0;
534
535         /* Check the 0 item */
536         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
537             BTRFS_LEAF_DATA_SIZE(root)) {
538                 CORRUPT("invalid item offset size pair", leaf, root, 0);
539                 return -EIO;
540         }
541
542         /*
543          * Check to make sure each items keys are in the correct order and their
544          * offsets make sense.  We only have to loop through nritems-1 because
545          * we check the current slot against the next slot, which verifies the
546          * next slot's offset+size makes sense and that the current's slot
547          * offset is correct.
548          */
549         for (slot = 0; slot < nritems - 1; slot++) {
550                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
551                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
552
553                 /* Make sure the keys are in the right order */
554                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
555                         CORRUPT("bad key order", leaf, root, slot);
556                         return -EIO;
557                 }
558
559                 /*
560                  * Make sure the offset and ends are right, remember that the
561                  * item data starts at the end of the leaf and grows towards the
562                  * front.
563                  */
564                 if (btrfs_item_offset_nr(leaf, slot) !=
565                         btrfs_item_end_nr(leaf, slot + 1)) {
566                         CORRUPT("slot offset bad", leaf, root, slot);
567                         return -EIO;
568                 }
569
570                 /*
571                  * Check to make sure that we don't point outside of the leaf,
572                  * just incase all the items are consistent to eachother, but
573                  * all point outside of the leaf.
574                  */
575                 if (btrfs_item_end_nr(leaf, slot) >
576                     BTRFS_LEAF_DATA_SIZE(root)) {
577                         CORRUPT("slot end outside of leaf", leaf, root, slot);
578                         return -EIO;
579                 }
580         }
581
582         return 0;
583 }
584
585 static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
586                                       u64 phy_offset, struct page *page,
587                                       u64 start, u64 end, int mirror)
588 {
589         u64 found_start;
590         int found_level;
591         struct extent_buffer *eb;
592         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
593         int ret = 0;
594         int reads_done;
595
596         if (!page->private)
597                 goto out;
598
599         eb = (struct extent_buffer *)page->private;
600
601         /* the pending IO might have been the only thing that kept this buffer
602          * in memory.  Make sure we have a ref for all this other checks
603          */
604         extent_buffer_get(eb);
605
606         reads_done = atomic_dec_and_test(&eb->io_pages);
607         if (!reads_done)
608                 goto err;
609
610         eb->read_mirror = mirror;
611         if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
612                 ret = -EIO;
613                 goto err;
614         }
615
616         found_start = btrfs_header_bytenr(eb);
617         if (found_start != eb->start) {
618                 printk_ratelimited(KERN_INFO "BTRFS: bad tree block start "
619                                "%llu %llu\n",
620                                found_start, eb->start);
621                 ret = -EIO;
622                 goto err;
623         }
624         if (check_tree_block_fsid(root, eb)) {
625                 printk_ratelimited(KERN_INFO "BTRFS: bad fsid on block %llu\n",
626                                eb->start);
627                 ret = -EIO;
628                 goto err;
629         }
630         found_level = btrfs_header_level(eb);
631         if (found_level >= BTRFS_MAX_LEVEL) {
632                 btrfs_info(root->fs_info, "bad tree block level %d",
633                            (int)btrfs_header_level(eb));
634                 ret = -EIO;
635                 goto err;
636         }
637
638         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
639                                        eb, found_level);
640
641         ret = csum_tree_block(root, eb, 1);
642         if (ret) {
643                 ret = -EIO;
644                 goto err;
645         }
646
647         /*
648          * If this is a leaf block and it is corrupt, set the corrupt bit so
649          * that we don't try and read the other copies of this block, just
650          * return -EIO.
651          */
652         if (found_level == 0 && check_leaf(root, eb)) {
653                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
654                 ret = -EIO;
655         }
656
657         if (!ret)
658                 set_extent_buffer_uptodate(eb);
659 err:
660         if (reads_done &&
661             test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
662                 btree_readahead_hook(root, eb, eb->start, ret);
663
664         if (ret) {
665                 /*
666                  * our io error hook is going to dec the io pages
667                  * again, we have to make sure it has something
668                  * to decrement
669                  */
670                 atomic_inc(&eb->io_pages);
671                 clear_extent_buffer_uptodate(eb);
672         }
673         free_extent_buffer(eb);
674 out:
675         return ret;
676 }
677
678 static int btree_io_failed_hook(struct page *page, int failed_mirror)
679 {
680         struct extent_buffer *eb;
681         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
682
683         eb = (struct extent_buffer *)page->private;
684         set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
685         eb->read_mirror = failed_mirror;
686         atomic_dec(&eb->io_pages);
687         if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
688                 btree_readahead_hook(root, eb, eb->start, -EIO);
689         return -EIO;    /* we fixed nothing */
690 }
691
692 static void end_workqueue_bio(struct bio *bio, int err)
693 {
694         struct end_io_wq *end_io_wq = bio->bi_private;
695         struct btrfs_fs_info *fs_info;
696
697         fs_info = end_io_wq->info;
698         end_io_wq->error = err;
699         btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
700
701         if (bio->bi_rw & REQ_WRITE) {
702                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
703                         btrfs_queue_work(fs_info->endio_meta_write_workers,
704                                          &end_io_wq->work);
705                 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
706                         btrfs_queue_work(fs_info->endio_freespace_worker,
707                                          &end_io_wq->work);
708                 else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
709                         btrfs_queue_work(fs_info->endio_raid56_workers,
710                                          &end_io_wq->work);
711                 else
712                         btrfs_queue_work(fs_info->endio_write_workers,
713                                          &end_io_wq->work);
714         } else {
715                 if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
716                         btrfs_queue_work(fs_info->endio_raid56_workers,
717                                          &end_io_wq->work);
718                 else if (end_io_wq->metadata)
719                         btrfs_queue_work(fs_info->endio_meta_workers,
720                                          &end_io_wq->work);
721                 else
722                         btrfs_queue_work(fs_info->endio_workers,
723                                          &end_io_wq->work);
724         }
725 }
726
727 /*
728  * For the metadata arg you want
729  *
730  * 0 - if data
731  * 1 - if normal metadta
732  * 2 - if writing to the free space cache area
733  * 3 - raid parity work
734  */
735 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
736                         int metadata)
737 {
738         struct end_io_wq *end_io_wq;
739         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
740         if (!end_io_wq)
741                 return -ENOMEM;
742
743         end_io_wq->private = bio->bi_private;
744         end_io_wq->end_io = bio->bi_end_io;
745         end_io_wq->info = info;
746         end_io_wq->error = 0;
747         end_io_wq->bio = bio;
748         end_io_wq->metadata = metadata;
749
750         bio->bi_private = end_io_wq;
751         bio->bi_end_io = end_workqueue_bio;
752         return 0;
753 }
754
755 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
756 {
757         unsigned long limit = min_t(unsigned long,
758                                     info->thread_pool_size,
759                                     info->fs_devices->open_devices);
760         return 256 * limit;
761 }
762
763 static void run_one_async_start(struct btrfs_work *work)
764 {
765         struct async_submit_bio *async;
766         int ret;
767
768         async = container_of(work, struct  async_submit_bio, work);
769         ret = async->submit_bio_start(async->inode, async->rw, async->bio,
770                                       async->mirror_num, async->bio_flags,
771                                       async->bio_offset);
772         if (ret)
773                 async->error = ret;
774 }
775
776 static void run_one_async_done(struct btrfs_work *work)
777 {
778         struct btrfs_fs_info *fs_info;
779         struct async_submit_bio *async;
780         int limit;
781
782         async = container_of(work, struct  async_submit_bio, work);
783         fs_info = BTRFS_I(async->inode)->root->fs_info;
784
785         limit = btrfs_async_submit_limit(fs_info);
786         limit = limit * 2 / 3;
787
788         if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
789             waitqueue_active(&fs_info->async_submit_wait))
790                 wake_up(&fs_info->async_submit_wait);
791
792         /* If an error occured we just want to clean up the bio and move on */
793         if (async->error) {
794                 bio_endio(async->bio, async->error);
795                 return;
796         }
797
798         async->submit_bio_done(async->inode, async->rw, async->bio,
799                                async->mirror_num, async->bio_flags,
800                                async->bio_offset);
801 }
802
803 static void run_one_async_free(struct btrfs_work *work)
804 {
805         struct async_submit_bio *async;
806
807         async = container_of(work, struct  async_submit_bio, work);
808         kfree(async);
809 }
810
811 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
812                         int rw, struct bio *bio, int mirror_num,
813                         unsigned long bio_flags,
814                         u64 bio_offset,
815                         extent_submit_bio_hook_t *submit_bio_start,
816                         extent_submit_bio_hook_t *submit_bio_done)
817 {
818         struct async_submit_bio *async;
819
820         async = kmalloc(sizeof(*async), GFP_NOFS);
821         if (!async)
822                 return -ENOMEM;
823
824         async->inode = inode;
825         async->rw = rw;
826         async->bio = bio;
827         async->mirror_num = mirror_num;
828         async->submit_bio_start = submit_bio_start;
829         async->submit_bio_done = submit_bio_done;
830
831         btrfs_init_work(&async->work, run_one_async_start,
832                         run_one_async_done, run_one_async_free);
833
834         async->bio_flags = bio_flags;
835         async->bio_offset = bio_offset;
836
837         async->error = 0;
838
839         atomic_inc(&fs_info->nr_async_submits);
840
841         if (rw & REQ_SYNC)
842                 btrfs_set_work_high_priority(&async->work);
843
844         btrfs_queue_work(fs_info->workers, &async->work);
845
846         while (atomic_read(&fs_info->async_submit_draining) &&
847               atomic_read(&fs_info->nr_async_submits)) {
848                 wait_event(fs_info->async_submit_wait,
849                            (atomic_read(&fs_info->nr_async_submits) == 0));
850         }
851
852         return 0;
853 }
854
855 static int btree_csum_one_bio(struct bio *bio)
856 {
857         struct bio_vec *bvec;
858         struct btrfs_root *root;
859         int i, ret = 0;
860
861         bio_for_each_segment_all(bvec, bio, i) {
862                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
863                 ret = csum_dirty_buffer(root, bvec->bv_page);
864                 if (ret)
865                         break;
866         }
867
868         return ret;
869 }
870
871 static int __btree_submit_bio_start(struct inode *inode, int rw,
872                                     struct bio *bio, int mirror_num,
873                                     unsigned long bio_flags,
874                                     u64 bio_offset)
875 {
876         /*
877          * when we're called for a write, we're already in the async
878          * submission context.  Just jump into btrfs_map_bio
879          */
880         return btree_csum_one_bio(bio);
881 }
882
883 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
884                                  int mirror_num, unsigned long bio_flags,
885                                  u64 bio_offset)
886 {
887         int ret;
888
889         /*
890          * when we're called for a write, we're already in the async
891          * submission context.  Just jump into btrfs_map_bio
892          */
893         ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
894         if (ret)
895                 bio_endio(bio, ret);
896         return ret;
897 }
898
899 static int check_async_write(struct inode *inode, unsigned long bio_flags)
900 {
901         if (bio_flags & EXTENT_BIO_TREE_LOG)
902                 return 0;
903 #ifdef CONFIG_X86
904         if (cpu_has_xmm4_2)
905                 return 0;
906 #endif
907         return 1;
908 }
909
910 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
911                                  int mirror_num, unsigned long bio_flags,
912                                  u64 bio_offset)
913 {
914         int async = check_async_write(inode, bio_flags);
915         int ret;
916
917         if (!(rw & REQ_WRITE)) {
918                 /*
919                  * called for a read, do the setup so that checksum validation
920                  * can happen in the async kernel threads
921                  */
922                 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
923                                           bio, 1);
924                 if (ret)
925                         goto out_w_error;
926                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
927                                     mirror_num, 0);
928         } else if (!async) {
929                 ret = btree_csum_one_bio(bio);
930                 if (ret)
931                         goto out_w_error;
932                 ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
933                                     mirror_num, 0);
934         } else {
935                 /*
936                  * kthread helpers are used to submit writes so that
937                  * checksumming can happen in parallel across all CPUs
938                  */
939                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
940                                           inode, rw, bio, mirror_num, 0,
941                                           bio_offset,
942                                           __btree_submit_bio_start,
943                                           __btree_submit_bio_done);
944         }
945
946         if (ret) {
947 out_w_error:
948                 bio_endio(bio, ret);
949         }
950         return ret;
951 }
952
953 #ifdef CONFIG_MIGRATION
954 static int btree_migratepage(struct address_space *mapping,
955                         struct page *newpage, struct page *page,
956                         enum migrate_mode mode)
957 {
958         /*
959          * we can't safely write a btree page from here,
960          * we haven't done the locking hook
961          */
962         if (PageDirty(page))
963                 return -EAGAIN;
964         /*
965          * Buffers may be managed in a filesystem specific way.
966          * We must have no buffers or drop them.
967          */
968         if (page_has_private(page) &&
969             !try_to_release_page(page, GFP_KERNEL))
970                 return -EAGAIN;
971         return migrate_page(mapping, newpage, page, mode);
972 }
973 #endif
974
975
976 static int btree_writepages(struct address_space *mapping,
977                             struct writeback_control *wbc)
978 {
979         struct btrfs_fs_info *fs_info;
980         int ret;
981
982         if (wbc->sync_mode == WB_SYNC_NONE) {
983
984                 if (wbc->for_kupdate)
985                         return 0;
986
987                 fs_info = BTRFS_I(mapping->host)->root->fs_info;
988                 /* this is a bit racy, but that's ok */
989                 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
990                                              BTRFS_DIRTY_METADATA_THRESH);
991                 if (ret < 0)
992                         return 0;
993         }
994         return btree_write_cache_pages(mapping, wbc);
995 }
996
997 static int btree_readpage(struct file *file, struct page *page)
998 {
999         struct extent_io_tree *tree;
1000         tree = &BTRFS_I(page->mapping->host)->io_tree;
1001         return extent_read_full_page(tree, page, btree_get_extent, 0);
1002 }
1003
1004 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
1005 {
1006         if (PageWriteback(page) || PageDirty(page))
1007                 return 0;
1008
1009         return try_release_extent_buffer(page);
1010 }
1011
1012 static void btree_invalidatepage(struct page *page, unsigned int offset,
1013                                  unsigned int length)
1014 {
1015         struct extent_io_tree *tree;
1016         tree = &BTRFS_I(page->mapping->host)->io_tree;
1017         extent_invalidatepage(tree, page, offset);
1018         btree_releasepage(page, GFP_NOFS);
1019         if (PagePrivate(page)) {
1020                 btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
1021                            "page private not zero on page %llu",
1022                            (unsigned long long)page_offset(page));
1023                 ClearPagePrivate(page);
1024                 set_page_private(page, 0);
1025                 page_cache_release(page);
1026         }
1027 }
1028
1029 static int btree_set_page_dirty(struct page *page)
1030 {
1031 #ifdef DEBUG
1032         struct extent_buffer *eb;
1033
1034         BUG_ON(!PagePrivate(page));
1035         eb = (struct extent_buffer *)page->private;
1036         BUG_ON(!eb);
1037         BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
1038         BUG_ON(!atomic_read(&eb->refs));
1039         btrfs_assert_tree_locked(eb);
1040 #endif
1041         return __set_page_dirty_nobuffers(page);
1042 }
1043
1044 static const struct address_space_operations btree_aops = {
1045         .readpage       = btree_readpage,
1046         .writepages     = btree_writepages,
1047         .releasepage    = btree_releasepage,
1048         .invalidatepage = btree_invalidatepage,
1049 #ifdef CONFIG_MIGRATION
1050         .migratepage    = btree_migratepage,
1051 #endif
1052         .set_page_dirty = btree_set_page_dirty,
1053 };
1054
1055 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1056                          u64 parent_transid)
1057 {
1058         struct extent_buffer *buf = NULL;
1059         struct inode *btree_inode = root->fs_info->btree_inode;
1060         int ret = 0;
1061
1062         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1063         if (!buf)
1064                 return 0;
1065         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1066                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1067         free_extent_buffer(buf);
1068         return ret;
1069 }
1070
1071 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1072                          int mirror_num, struct extent_buffer **eb)
1073 {
1074         struct extent_buffer *buf = NULL;
1075         struct inode *btree_inode = root->fs_info->btree_inode;
1076         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1077         int ret;
1078
1079         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1080         if (!buf)
1081                 return 0;
1082
1083         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1084
1085         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1086                                        btree_get_extent, mirror_num);
1087         if (ret) {
1088                 free_extent_buffer(buf);
1089                 return ret;
1090         }
1091
1092         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1093                 free_extent_buffer(buf);
1094                 return -EIO;
1095         } else if (extent_buffer_uptodate(buf)) {
1096                 *eb = buf;
1097         } else {
1098                 free_extent_buffer(buf);
1099         }
1100         return 0;
1101 }
1102
1103 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1104                                             u64 bytenr, u32 blocksize)
1105 {
1106         return find_extent_buffer(root->fs_info, bytenr);
1107 }
1108
1109 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1110                                                  u64 bytenr, u32 blocksize)
1111 {
1112         return alloc_extent_buffer(root->fs_info, bytenr, blocksize);
1113 }
1114
1115
1116 int btrfs_write_tree_block(struct extent_buffer *buf)
1117 {
1118         return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1119                                         buf->start + buf->len - 1);
1120 }
1121
1122 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1123 {
1124         return filemap_fdatawait_range(buf->pages[0]->mapping,
1125                                        buf->start, buf->start + buf->len - 1);
1126 }
1127
1128 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1129                                       u32 blocksize, u64 parent_transid)
1130 {
1131         struct extent_buffer *buf = NULL;
1132         int ret;
1133
1134         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1135         if (!buf)
1136                 return NULL;
1137
1138         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1139         if (ret) {
1140                 free_extent_buffer(buf);
1141                 return NULL;
1142         }
1143         return buf;
1144
1145 }
1146
1147 void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1148                       struct extent_buffer *buf)
1149 {
1150         struct btrfs_fs_info *fs_info = root->fs_info;
1151
1152         if (btrfs_header_generation(buf) ==
1153             fs_info->running_transaction->transid) {
1154                 btrfs_assert_tree_locked(buf);
1155
1156                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1157                         __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1158                                              -buf->len,
1159                                              fs_info->dirty_metadata_batch);
1160                         /* ugh, clear_extent_buffer_dirty needs to lock the page */
1161                         btrfs_set_lock_blocking(buf);
1162                         clear_extent_buffer_dirty(buf);
1163                 }
1164         }
1165 }
1166
1167 static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
1168 {
1169         struct btrfs_subvolume_writers *writers;
1170         int ret;
1171
1172         writers = kmalloc(sizeof(*writers), GFP_NOFS);
1173         if (!writers)
1174                 return ERR_PTR(-ENOMEM);
1175
1176         ret = percpu_counter_init(&writers->counter, 0);
1177         if (ret < 0) {
1178                 kfree(writers);
1179                 return ERR_PTR(ret);
1180         }
1181
1182         init_waitqueue_head(&writers->wait);
1183         return writers;
1184 }
1185
1186 static void
1187 btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
1188 {
1189         percpu_counter_destroy(&writers->counter);
1190         kfree(writers);
1191 }
1192
1193 static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1194                          u32 stripesize, struct btrfs_root *root,
1195                          struct btrfs_fs_info *fs_info,
1196                          u64 objectid)
1197 {
1198         root->node = NULL;
1199         root->commit_root = NULL;
1200         root->sectorsize = sectorsize;
1201         root->nodesize = nodesize;
1202         root->leafsize = leafsize;
1203         root->stripesize = stripesize;
1204         root->ref_cows = 0;
1205         root->track_dirty = 0;
1206         root->in_radix = 0;
1207         root->orphan_item_inserted = 0;
1208         root->orphan_cleanup_state = 0;
1209
1210         root->objectid = objectid;
1211         root->last_trans = 0;
1212         root->highest_objectid = 0;
1213         root->nr_delalloc_inodes = 0;
1214         root->nr_ordered_extents = 0;
1215         root->name = NULL;
1216         root->inode_tree = RB_ROOT;
1217         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1218         root->block_rsv = NULL;
1219         root->orphan_block_rsv = NULL;
1220
1221         INIT_LIST_HEAD(&root->dirty_list);
1222         INIT_LIST_HEAD(&root->root_list);
1223         INIT_LIST_HEAD(&root->delalloc_inodes);
1224         INIT_LIST_HEAD(&root->delalloc_root);
1225         INIT_LIST_HEAD(&root->ordered_extents);
1226         INIT_LIST_HEAD(&root->ordered_root);
1227         INIT_LIST_HEAD(&root->logged_list[0]);
1228         INIT_LIST_HEAD(&root->logged_list[1]);
1229         spin_lock_init(&root->orphan_lock);
1230         spin_lock_init(&root->inode_lock);
1231         spin_lock_init(&root->delalloc_lock);
1232         spin_lock_init(&root->ordered_extent_lock);
1233         spin_lock_init(&root->accounting_lock);
1234         spin_lock_init(&root->log_extents_lock[0]);
1235         spin_lock_init(&root->log_extents_lock[1]);
1236         mutex_init(&root->objectid_mutex);
1237         mutex_init(&root->log_mutex);
1238         mutex_init(&root->ordered_extent_mutex);
1239         mutex_init(&root->delalloc_mutex);
1240         init_waitqueue_head(&root->log_writer_wait);
1241         init_waitqueue_head(&root->log_commit_wait[0]);
1242         init_waitqueue_head(&root->log_commit_wait[1]);
1243         INIT_LIST_HEAD(&root->log_ctxs[0]);
1244         INIT_LIST_HEAD(&root->log_ctxs[1]);
1245         atomic_set(&root->log_commit[0], 0);
1246         atomic_set(&root->log_commit[1], 0);
1247         atomic_set(&root->log_writers, 0);
1248         atomic_set(&root->log_batch, 0);
1249         atomic_set(&root->orphan_inodes, 0);
1250         atomic_set(&root->refs, 1);
1251         atomic_set(&root->will_be_snapshoted, 0);
1252         root->log_transid = 0;
1253         root->log_transid_committed = -1;
1254         root->last_log_commit = 0;
1255         if (fs_info)
1256                 extent_io_tree_init(&root->dirty_log_pages,
1257                                      fs_info->btree_inode->i_mapping);
1258
1259         memset(&root->root_key, 0, sizeof(root->root_key));
1260         memset(&root->root_item, 0, sizeof(root->root_item));
1261         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1262         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1263         if (fs_info)
1264                 root->defrag_trans_start = fs_info->generation;
1265         else
1266                 root->defrag_trans_start = 0;
1267         init_completion(&root->kobj_unregister);
1268         root->defrag_running = 0;
1269         root->root_key.objectid = objectid;
1270         root->anon_dev = 0;
1271
1272         spin_lock_init(&root->root_item_lock);
1273 }
1274
1275 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1276 {
1277         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1278         if (root)
1279                 root->fs_info = fs_info;
1280         return root;
1281 }
1282
1283 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1284 /* Should only be used by the testing infrastructure */
1285 struct btrfs_root *btrfs_alloc_dummy_root(void)
1286 {
1287         struct btrfs_root *root;
1288
1289         root = btrfs_alloc_root(NULL);
1290         if (!root)
1291                 return ERR_PTR(-ENOMEM);
1292         __setup_root(4096, 4096, 4096, 4096, root, NULL, 1);
1293         root->dummy_root = 1;
1294
1295         return root;
1296 }
1297 #endif
1298
1299 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
1300                                      struct btrfs_fs_info *fs_info,
1301                                      u64 objectid)
1302 {
1303         struct extent_buffer *leaf;
1304         struct btrfs_root *tree_root = fs_info->tree_root;
1305         struct btrfs_root *root;
1306         struct btrfs_key key;
1307         int ret = 0;
1308         uuid_le uuid;
1309
1310         root = btrfs_alloc_root(fs_info);
1311         if (!root)
1312                 return ERR_PTR(-ENOMEM);
1313
1314         __setup_root(tree_root->nodesize, tree_root->leafsize,
1315                      tree_root->sectorsize, tree_root->stripesize,
1316                      root, fs_info, objectid);
1317         root->root_key.objectid = objectid;
1318         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1319         root->root_key.offset = 0;
1320
1321         leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1322                                       0, objectid, NULL, 0, 0, 0);
1323         if (IS_ERR(leaf)) {
1324                 ret = PTR_ERR(leaf);
1325                 leaf = NULL;
1326                 goto fail;
1327         }
1328
1329         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1330         btrfs_set_header_bytenr(leaf, leaf->start);
1331         btrfs_set_header_generation(leaf, trans->transid);
1332         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1333         btrfs_set_header_owner(leaf, objectid);
1334         root->node = leaf;
1335
1336         write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(),
1337                             BTRFS_FSID_SIZE);
1338         write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
1339                             btrfs_header_chunk_tree_uuid(leaf),
1340                             BTRFS_UUID_SIZE);
1341         btrfs_mark_buffer_dirty(leaf);
1342
1343         root->commit_root = btrfs_root_node(root);
1344         root->track_dirty = 1;
1345
1346
1347         root->root_item.flags = 0;
1348         root->root_item.byte_limit = 0;
1349         btrfs_set_root_bytenr(&root->root_item, leaf->start);
1350         btrfs_set_root_generation(&root->root_item, trans->transid);
1351         btrfs_set_root_level(&root->root_item, 0);
1352         btrfs_set_root_refs(&root->root_item, 1);
1353         btrfs_set_root_used(&root->root_item, leaf->len);
1354         btrfs_set_root_last_snapshot(&root->root_item, 0);
1355         btrfs_set_root_dirid(&root->root_item, 0);
1356         uuid_le_gen(&uuid);
1357         memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
1358         root->root_item.drop_level = 0;
1359
1360         key.objectid = objectid;
1361         key.type = BTRFS_ROOT_ITEM_KEY;
1362         key.offset = 0;
1363         ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
1364         if (ret)
1365                 goto fail;
1366
1367         btrfs_tree_unlock(leaf);
1368
1369         return root;
1370
1371 fail:
1372         if (leaf) {
1373                 btrfs_tree_unlock(leaf);
1374                 free_extent_buffer(leaf);
1375         }
1376         kfree(root);
1377
1378         return ERR_PTR(ret);
1379 }
1380
1381 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1382                                          struct btrfs_fs_info *fs_info)
1383 {
1384         struct btrfs_root *root;
1385         struct btrfs_root *tree_root = fs_info->tree_root;
1386         struct extent_buffer *leaf;
1387
1388         root = btrfs_alloc_root(fs_info);
1389         if (!root)
1390                 return ERR_PTR(-ENOMEM);
1391
1392         __setup_root(tree_root->nodesize, tree_root->leafsize,
1393                      tree_root->sectorsize, tree_root->stripesize,
1394                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1395
1396         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1397         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1398         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1399         /*
1400          * log trees do not get reference counted because they go away
1401          * before a real commit is actually done.  They do store pointers
1402          * to file data extents, and those reference counts still get
1403          * updated (along with back refs to the log tree).
1404          */
1405         root->ref_cows = 0;
1406
1407         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1408                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1409                                       0, 0, 0);
1410         if (IS_ERR(leaf)) {
1411                 kfree(root);
1412                 return ERR_CAST(leaf);
1413         }
1414
1415         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1416         btrfs_set_header_bytenr(leaf, leaf->start);
1417         btrfs_set_header_generation(leaf, trans->transid);
1418         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1419         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1420         root->node = leaf;
1421
1422         write_extent_buffer(root->node, root->fs_info->fsid,
1423                             btrfs_header_fsid(), BTRFS_FSID_SIZE);
1424         btrfs_mark_buffer_dirty(root->node);
1425         btrfs_tree_unlock(root->node);
1426         return root;
1427 }
1428
1429 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1430                              struct btrfs_fs_info *fs_info)
1431 {
1432         struct btrfs_root *log_root;
1433
1434         log_root = alloc_log_tree(trans, fs_info);
1435         if (IS_ERR(log_root))
1436                 return PTR_ERR(log_root);
1437         WARN_ON(fs_info->log_root_tree);
1438         fs_info->log_root_tree = log_root;
1439         return 0;
1440 }
1441
1442 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1443                        struct btrfs_root *root)
1444 {
1445         struct btrfs_root *log_root;
1446         struct btrfs_inode_item *inode_item;
1447
1448         log_root = alloc_log_tree(trans, root->fs_info);
1449         if (IS_ERR(log_root))
1450                 return PTR_ERR(log_root);
1451
1452         log_root->last_trans = trans->transid;
1453         log_root->root_key.offset = root->root_key.objectid;
1454
1455         inode_item = &log_root->root_item.inode;
1456         btrfs_set_stack_inode_generation(inode_item, 1);
1457         btrfs_set_stack_inode_size(inode_item, 3);
1458         btrfs_set_stack_inode_nlink(inode_item, 1);
1459         btrfs_set_stack_inode_nbytes(inode_item, root->leafsize);
1460         btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
1461
1462         btrfs_set_root_node(&log_root->root_item, log_root->node);
1463
1464         WARN_ON(root->log_root);
1465         root->log_root = log_root;
1466         root->log_transid = 0;
1467         root->log_transid_committed = -1;
1468         root->last_log_commit = 0;
1469         return 0;
1470 }
1471
1472 static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1473                                                struct btrfs_key *key)
1474 {
1475         struct btrfs_root *root;
1476         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1477         struct btrfs_path *path;
1478         u64 generation;
1479         u32 blocksize;
1480         int ret;
1481
1482         path = btrfs_alloc_path();
1483         if (!path)
1484                 return ERR_PTR(-ENOMEM);
1485
1486         root = btrfs_alloc_root(fs_info);
1487         if (!root) {
1488                 ret = -ENOMEM;
1489                 goto alloc_fail;
1490         }
1491
1492         __setup_root(tree_root->nodesize, tree_root->leafsize,
1493                      tree_root->sectorsize, tree_root->stripesize,
1494                      root, fs_info, key->objectid);
1495
1496         ret = btrfs_find_root(tree_root, key, path,
1497                               &root->root_item, &root->root_key);
1498         if (ret) {
1499                 if (ret > 0)
1500                         ret = -ENOENT;
1501                 goto find_fail;
1502         }
1503
1504         generation = btrfs_root_generation(&root->root_item);
1505         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1506         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1507                                      blocksize, generation);
1508         if (!root->node) {
1509                 ret = -ENOMEM;
1510                 goto find_fail;
1511         } else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
1512                 ret = -EIO;
1513                 goto read_fail;
1514         }
1515         root->commit_root = btrfs_root_node(root);
1516 out:
1517         btrfs_free_path(path);
1518         return root;
1519
1520 read_fail:
1521         free_extent_buffer(root->node);
1522 find_fail:
1523         kfree(root);
1524 alloc_fail:
1525         root = ERR_PTR(ret);
1526         goto out;
1527 }
1528
1529 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
1530                                       struct btrfs_key *location)
1531 {
1532         struct btrfs_root *root;
1533
1534         root = btrfs_read_tree_root(tree_root, location);
1535         if (IS_ERR(root))
1536                 return root;
1537
1538         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
1539                 root->ref_cows = 1;
1540                 btrfs_check_and_init_root_item(&root->root_item);
1541         }
1542
1543         return root;
1544 }
1545
1546 int btrfs_init_fs_root(struct btrfs_root *root)
1547 {
1548         int ret;
1549         struct btrfs_subvolume_writers *writers;
1550
1551         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1552         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1553                                         GFP_NOFS);
1554         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1555                 ret = -ENOMEM;
1556                 goto fail;
1557         }
1558
1559         writers = btrfs_alloc_subvolume_writers();
1560         if (IS_ERR(writers)) {
1561                 ret = PTR_ERR(writers);
1562                 goto fail;
1563         }
1564         root->subv_writers = writers;
1565
1566         btrfs_init_free_ino_ctl(root);
1567         spin_lock_init(&root->cache_lock);
1568         init_waitqueue_head(&root->cache_wait);
1569
1570         ret = get_anon_bdev(&root->anon_dev);
1571         if (ret)
1572                 goto free_writers;
1573         return 0;
1574
1575 free_writers:
1576         btrfs_free_subvolume_writers(root->subv_writers);
1577 fail:
1578         kfree(root->free_ino_ctl);
1579         kfree(root->free_ino_pinned);
1580         return ret;
1581 }
1582
1583 static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1584                                                u64 root_id)
1585 {
1586         struct btrfs_root *root;
1587
1588         spin_lock(&fs_info->fs_roots_radix_lock);
1589         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1590                                  (unsigned long)root_id);
1591         spin_unlock(&fs_info->fs_roots_radix_lock);
1592         return root;
1593 }
1594
1595 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1596                          struct btrfs_root *root)
1597 {
1598         int ret;
1599
1600         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1601         if (ret)
1602                 return ret;
1603
1604         spin_lock(&fs_info->fs_roots_radix_lock);
1605         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1606                                 (unsigned long)root->root_key.objectid,
1607                                 root);
1608         if (ret == 0)
1609                 root->in_radix = 1;
1610         spin_unlock(&fs_info->fs_roots_radix_lock);
1611         radix_tree_preload_end();
1612
1613         return ret;
1614 }
1615
1616 struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1617                                      struct btrfs_key *location,
1618                                      bool check_ref)
1619 {
1620         struct btrfs_root *root;
1621         int ret;
1622
1623         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1624                 return fs_info->tree_root;
1625         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1626                 return fs_info->extent_root;
1627         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1628                 return fs_info->chunk_root;
1629         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1630                 return fs_info->dev_root;
1631         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1632                 return fs_info->csum_root;
1633         if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
1634                 return fs_info->quota_root ? fs_info->quota_root :
1635                                              ERR_PTR(-ENOENT);
1636         if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
1637                 return fs_info->uuid_root ? fs_info->uuid_root :
1638                                             ERR_PTR(-ENOENT);
1639 again:
1640         root = btrfs_lookup_fs_root(fs_info, location->objectid);
1641         if (root) {
1642                 if (check_ref && btrfs_root_refs(&root->root_item) == 0)
1643                         return ERR_PTR(-ENOENT);
1644                 return root;
1645         }
1646
1647         root = btrfs_read_fs_root(fs_info->tree_root, location);
1648         if (IS_ERR(root))
1649                 return root;
1650
1651         if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
1652                 ret = -ENOENT;
1653                 goto fail;
1654         }
1655
1656         ret = btrfs_init_fs_root(root);
1657         if (ret)
1658                 goto fail;
1659
1660         ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID,
1661                         location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL);
1662         if (ret < 0)
1663                 goto fail;
1664         if (ret == 0)
1665                 root->orphan_item_inserted = 1;
1666
1667         ret = btrfs_insert_fs_root(fs_info, root);
1668         if (ret) {
1669                 if (ret == -EEXIST) {
1670                         free_fs_root(root);
1671                         goto again;
1672                 }
1673                 goto fail;
1674         }
1675         return root;
1676 fail:
1677         free_fs_root(root);
1678         return ERR_PTR(ret);
1679 }
1680
1681 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1682 {
1683         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1684         int ret = 0;
1685         struct btrfs_device *device;
1686         struct backing_dev_info *bdi;
1687
1688         rcu_read_lock();
1689         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1690                 if (!device->bdev)
1691                         continue;
1692                 bdi = blk_get_backing_dev_info(device->bdev);
1693                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1694                         ret = 1;
1695                         break;
1696                 }
1697         }
1698         rcu_read_unlock();
1699         return ret;
1700 }
1701
1702 /*
1703  * If this fails, caller must call bdi_destroy() to get rid of the
1704  * bdi again.
1705  */
1706 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1707 {
1708         int err;
1709
1710         bdi->capabilities = BDI_CAP_MAP_COPY;
1711         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1712         if (err)
1713                 return err;
1714
1715         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1716         bdi->congested_fn       = btrfs_congested_fn;
1717         bdi->congested_data     = info;
1718         return 0;
1719 }
1720
1721 /*
1722  * called by the kthread helper functions to finally call the bio end_io
1723  * functions.  This is where read checksum verification actually happens
1724  */
1725 static void end_workqueue_fn(struct btrfs_work *work)
1726 {
1727         struct bio *bio;
1728         struct end_io_wq *end_io_wq;
1729         int error;
1730
1731         end_io_wq = container_of(work, struct end_io_wq, work);
1732         bio = end_io_wq->bio;
1733
1734         error = end_io_wq->error;
1735         bio->bi_private = end_io_wq->private;
1736         bio->bi_end_io = end_io_wq->end_io;
1737         kfree(end_io_wq);
1738         bio_endio_nodec(bio, error);
1739 }
1740
1741 static int cleaner_kthread(void *arg)
1742 {
1743         struct btrfs_root *root = arg;
1744         int again;
1745
1746         do {
1747                 again = 0;
1748
1749                 /* Make the cleaner go to sleep early. */
1750                 if (btrfs_need_cleaner_sleep(root))
1751                         goto sleep;
1752
1753                 if (!mutex_trylock(&root->fs_info->cleaner_mutex))
1754                         goto sleep;
1755
1756                 /*
1757                  * Avoid the problem that we change the status of the fs
1758                  * during the above check and trylock.
1759                  */
1760                 if (btrfs_need_cleaner_sleep(root)) {
1761                         mutex_unlock(&root->fs_info->cleaner_mutex);
1762                         goto sleep;
1763                 }
1764
1765                 btrfs_run_delayed_iputs(root);
1766                 again = btrfs_clean_one_deleted_snapshot(root);
1767                 mutex_unlock(&root->fs_info->cleaner_mutex);
1768
1769                 /*
1770                  * The defragger has dealt with the R/O remount and umount,
1771                  * needn't do anything special here.
1772                  */
1773                 btrfs_run_defrag_inodes(root->fs_info);
1774 sleep:
1775                 if (!try_to_freeze() && !again) {
1776                         set_current_state(TASK_INTERRUPTIBLE);
1777                         if (!kthread_should_stop())
1778                                 schedule();
1779                         __set_current_state(TASK_RUNNING);
1780                 }
1781         } while (!kthread_should_stop());
1782         return 0;
1783 }
1784
1785 static int transaction_kthread(void *arg)
1786 {
1787         struct btrfs_root *root = arg;
1788         struct btrfs_trans_handle *trans;
1789         struct btrfs_transaction *cur;
1790         u64 transid;
1791         unsigned long now;
1792         unsigned long delay;
1793         bool cannot_commit;
1794
1795         do {
1796                 cannot_commit = false;
1797                 delay = HZ * root->fs_info->commit_interval;
1798                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1799
1800                 spin_lock(&root->fs_info->trans_lock);
1801                 cur = root->fs_info->running_transaction;
1802                 if (!cur) {
1803                         spin_unlock(&root->fs_info->trans_lock);
1804                         goto sleep;
1805                 }
1806
1807                 now = get_seconds();
1808                 if (cur->state < TRANS_STATE_BLOCKED &&
1809                     (now < cur->start_time ||
1810                      now - cur->start_time < root->fs_info->commit_interval)) {
1811                         spin_unlock(&root->fs_info->trans_lock);
1812                         delay = HZ * 5;
1813                         goto sleep;
1814                 }
1815                 transid = cur->transid;
1816                 spin_unlock(&root->fs_info->trans_lock);
1817
1818                 /* If the file system is aborted, this will always fail. */
1819                 trans = btrfs_attach_transaction(root);
1820                 if (IS_ERR(trans)) {
1821                         if (PTR_ERR(trans) != -ENOENT)
1822                                 cannot_commit = true;
1823                         goto sleep;
1824                 }
1825                 if (transid == trans->transid) {
1826                         btrfs_commit_transaction(trans, root);
1827                 } else {
1828                         btrfs_end_transaction(trans, root);
1829                 }
1830 sleep:
1831                 wake_up_process(root->fs_info->cleaner_kthread);
1832                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1833
1834                 if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
1835                                       &root->fs_info->fs_state)))
1836                         btrfs_cleanup_transaction(root);
1837                 if (!try_to_freeze()) {
1838                         set_current_state(TASK_INTERRUPTIBLE);
1839                         if (!kthread_should_stop() &&
1840                             (!btrfs_transaction_blocked(root->fs_info) ||
1841                              cannot_commit))
1842                                 schedule_timeout(delay);
1843                         __set_current_state(TASK_RUNNING);
1844                 }
1845         } while (!kthread_should_stop());
1846         return 0;
1847 }
1848
1849 /*
1850  * this will find the highest generation in the array of
1851  * root backups.  The index of the highest array is returned,
1852  * or -1 if we can't find anything.
1853  *
1854  * We check to make sure the array is valid by comparing the
1855  * generation of the latest  root in the array with the generation
1856  * in the super block.  If they don't match we pitch it.
1857  */
1858 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1859 {
1860         u64 cur;
1861         int newest_index = -1;
1862         struct btrfs_root_backup *root_backup;
1863         int i;
1864
1865         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1866                 root_backup = info->super_copy->super_roots + i;
1867                 cur = btrfs_backup_tree_root_gen(root_backup);
1868                 if (cur == newest_gen)
1869                         newest_index = i;
1870         }
1871
1872         /* check to see if we actually wrapped around */
1873         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1874                 root_backup = info->super_copy->super_roots;
1875                 cur = btrfs_backup_tree_root_gen(root_backup);
1876                 if (cur == newest_gen)
1877                         newest_index = 0;
1878         }
1879         return newest_index;
1880 }
1881
1882
1883 /*
1884  * find the oldest backup so we know where to store new entries
1885  * in the backup array.  This will set the backup_root_index
1886  * field in the fs_info struct
1887  */
1888 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1889                                      u64 newest_gen)
1890 {
1891         int newest_index = -1;
1892
1893         newest_index = find_newest_super_backup(info, newest_gen);
1894         /* if there was garbage in there, just move along */
1895         if (newest_index == -1) {
1896                 info->backup_root_index = 0;
1897         } else {
1898                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1899         }
1900 }
1901
1902 /*
1903  * copy all the root pointers into the super backup array.
1904  * this will bump the backup pointer by one when it is
1905  * done
1906  */
1907 static void backup_super_roots(struct btrfs_fs_info *info)
1908 {
1909         int next_backup;
1910         struct btrfs_root_backup *root_backup;
1911         int last_backup;
1912
1913         next_backup = info->backup_root_index;
1914         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1915                 BTRFS_NUM_BACKUP_ROOTS;
1916
1917         /*
1918          * just overwrite the last backup if we're at the same generation
1919          * this happens only at umount
1920          */
1921         root_backup = info->super_for_commit->super_roots + last_backup;
1922         if (btrfs_backup_tree_root_gen(root_backup) ==
1923             btrfs_header_generation(info->tree_root->node))
1924                 next_backup = last_backup;
1925
1926         root_backup = info->super_for_commit->super_roots + next_backup;
1927
1928         /*
1929          * make sure all of our padding and empty slots get zero filled
1930          * regardless of which ones we use today
1931          */
1932         memset(root_backup, 0, sizeof(*root_backup));
1933
1934         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1935
1936         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1937         btrfs_set_backup_tree_root_gen(root_backup,
1938                                btrfs_header_generation(info->tree_root->node));
1939
1940         btrfs_set_backup_tree_root_level(root_backup,
1941                                btrfs_header_level(info->tree_root->node));
1942
1943         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1944         btrfs_set_backup_chunk_root_gen(root_backup,
1945                                btrfs_header_generation(info->chunk_root->node));
1946         btrfs_set_backup_chunk_root_level(root_backup,
1947                                btrfs_header_level(info->chunk_root->node));
1948
1949         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1950         btrfs_set_backup_extent_root_gen(root_backup,
1951                                btrfs_header_generation(info->extent_root->node));
1952         btrfs_set_backup_extent_root_level(root_backup,
1953                                btrfs_header_level(info->extent_root->node));
1954
1955         /*
1956          * we might commit during log recovery, which happens before we set
1957          * the fs_root.  Make sure it is valid before we fill it in.
1958          */
1959         if (info->fs_root && info->fs_root->node) {
1960                 btrfs_set_backup_fs_root(root_backup,
1961                                          info->fs_root->node->start);
1962                 btrfs_set_backup_fs_root_gen(root_backup,
1963                                btrfs_header_generation(info->fs_root->node));
1964                 btrfs_set_backup_fs_root_level(root_backup,
1965                                btrfs_header_level(info->fs_root->node));
1966         }
1967
1968         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1969         btrfs_set_backup_dev_root_gen(root_backup,
1970                                btrfs_header_generation(info->dev_root->node));
1971         btrfs_set_backup_dev_root_level(root_backup,
1972                                        btrfs_header_level(info->dev_root->node));
1973
1974         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1975         btrfs_set_backup_csum_root_gen(root_backup,
1976                                btrfs_header_generation(info->csum_root->node));
1977         btrfs_set_backup_csum_root_level(root_backup,
1978                                btrfs_header_level(info->csum_root->node));
1979
1980         btrfs_set_backup_total_bytes(root_backup,
1981                              btrfs_super_total_bytes(info->super_copy));
1982         btrfs_set_backup_bytes_used(root_backup,
1983                              btrfs_super_bytes_used(info->super_copy));
1984         btrfs_set_backup_num_devices(root_backup,
1985                              btrfs_super_num_devices(info->super_copy));
1986
1987         /*
1988          * if we don't copy this out to the super_copy, it won't get remembered
1989          * for the next commit
1990          */
1991         memcpy(&info->super_copy->super_roots,
1992                &info->super_for_commit->super_roots,
1993                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1994 }
1995
1996 /*
1997  * this copies info out of the root backup array and back into
1998  * the in-memory super block.  It is meant to help iterate through
1999  * the array, so you send it the number of backups you've already
2000  * tried and the last backup index you used.
2001  *
2002  * this returns -1 when it has tried all the backups
2003  */
2004 static noinline int next_root_backup(struct btrfs_fs_info *info,
2005                                      struct btrfs_super_block *super,
2006                                      int *num_backups_tried, int *backup_index)
2007 {
2008         struct btrfs_root_backup *root_backup;
2009         int newest = *backup_index;
2010
2011         if (*num_backups_tried == 0) {
2012                 u64 gen = btrfs_super_generation(super);
2013
2014                 newest = find_newest_super_backup(info, gen);
2015                 if (newest == -1)
2016                         return -1;
2017
2018                 *backup_index = newest;
2019                 *num_backups_tried = 1;
2020         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
2021                 /* we've tried all the backups, all done */
2022                 return -1;
2023         } else {
2024                 /* jump to the next oldest backup */
2025                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
2026                         BTRFS_NUM_BACKUP_ROOTS;
2027                 *backup_index = newest;
2028                 *num_backups_tried += 1;
2029         }
2030         root_backup = super->super_roots + newest;
2031
2032         btrfs_set_super_generation(super,
2033                                    btrfs_backup_tree_root_gen(root_backup));
2034         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
2035         btrfs_set_super_root_level(super,
2036                                    btrfs_backup_tree_root_level(root_backup));
2037         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
2038
2039         /*
2040          * fixme: the total bytes and num_devices need to match or we should
2041          * need a fsck
2042          */
2043         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
2044         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
2045         return 0;
2046 }
2047
2048 /* helper to cleanup workers */
2049 static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
2050 {
2051         btrfs_destroy_workqueue(fs_info->fixup_workers);
2052         btrfs_destroy_workqueue(fs_info->delalloc_workers);
2053         btrfs_destroy_workqueue(fs_info->workers);
2054         btrfs_destroy_workqueue(fs_info->endio_workers);
2055         btrfs_destroy_workqueue(fs_info->endio_meta_workers);
2056         btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
2057         btrfs_destroy_workqueue(fs_info->rmw_workers);
2058         btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
2059         btrfs_destroy_workqueue(fs_info->endio_write_workers);
2060         btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
2061         btrfs_destroy_workqueue(fs_info->submit_workers);
2062         btrfs_destroy_workqueue(fs_info->delayed_workers);
2063         btrfs_destroy_workqueue(fs_info->caching_workers);
2064         btrfs_destroy_workqueue(fs_info->readahead_workers);
2065         btrfs_destroy_workqueue(fs_info->flush_workers);
2066         btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
2067 }
2068
2069 static void free_root_extent_buffers(struct btrfs_root *root)
2070 {
2071         if (root) {
2072                 free_extent_buffer(root->node);
2073                 free_extent_buffer(root->commit_root);
2074                 root->node = NULL;
2075                 root->commit_root = NULL;
2076         }
2077 }
2078
2079 /* helper to cleanup tree roots */
2080 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
2081 {
2082         free_root_extent_buffers(info->tree_root);
2083
2084         free_root_extent_buffers(info->dev_root);
2085         free_root_extent_buffers(info->extent_root);
2086         free_root_extent_buffers(info->csum_root);
2087         free_root_extent_buffers(info->quota_root);
2088         free_root_extent_buffers(info->uuid_root);
2089         if (chunk_root)
2090                 free_root_extent_buffers(info->chunk_root);
2091 }
2092
2093 static void del_fs_roots(struct btrfs_fs_info *fs_info)
2094 {
2095         int ret;
2096         struct btrfs_root *gang[8];
2097         int i;
2098
2099         while (!list_empty(&fs_info->dead_roots)) {
2100                 gang[0] = list_entry(fs_info->dead_roots.next,
2101                                      struct btrfs_root, root_list);
2102                 list_del(&gang[0]->root_list);
2103
2104                 if (gang[0]->in_radix) {
2105                         btrfs_drop_and_free_fs_root(fs_info, gang[0]);
2106                 } else {
2107                         free_extent_buffer(gang[0]->node);
2108                         free_extent_buffer(gang[0]->commit_root);
2109                         btrfs_put_fs_root(gang[0]);
2110                 }
2111         }
2112
2113         while (1) {
2114                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2115                                              (void **)gang, 0,
2116                                              ARRAY_SIZE(gang));
2117                 if (!ret)
2118                         break;
2119                 for (i = 0; i < ret; i++)
2120                         btrfs_drop_and_free_fs_root(fs_info, gang[i]);
2121         }
2122
2123         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
2124                 btrfs_free_log_root_tree(NULL, fs_info);
2125                 btrfs_destroy_pinned_extent(fs_info->tree_root,
2126                                             fs_info->pinned_extents);
2127         }
2128 }
2129
2130 int open_ctree(struct super_block *sb,
2131                struct btrfs_fs_devices *fs_devices,
2132                char *options)
2133 {
2134         u32 sectorsize;
2135         u32 nodesize;
2136         u32 leafsize;
2137         u32 blocksize;
2138         u32 stripesize;
2139         u64 generation;
2140         u64 features;
2141         struct btrfs_key location;
2142         struct buffer_head *bh;
2143         struct btrfs_super_block *disk_super;
2144         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
2145         struct btrfs_root *tree_root;
2146         struct btrfs_root *extent_root;
2147         struct btrfs_root *csum_root;
2148         struct btrfs_root *chunk_root;
2149         struct btrfs_root *dev_root;
2150         struct btrfs_root *quota_root;
2151         struct btrfs_root *uuid_root;
2152         struct btrfs_root *log_tree_root;
2153         int ret;
2154         int err = -EINVAL;
2155         int num_backups_tried = 0;
2156         int backup_index = 0;
2157         int max_active;
2158         int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
2159         bool create_uuid_tree;
2160         bool check_uuid_tree;
2161
2162         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
2163         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
2164         if (!tree_root || !chunk_root) {
2165                 err = -ENOMEM;
2166                 goto fail;
2167         }
2168
2169         ret = init_srcu_struct(&fs_info->subvol_srcu);
2170         if (ret) {
2171                 err = ret;
2172                 goto fail;
2173         }
2174
2175         ret = setup_bdi(fs_info, &fs_info->bdi);
2176         if (ret) {
2177                 err = ret;
2178                 goto fail_srcu;
2179         }
2180
2181         ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
2182         if (ret) {
2183                 err = ret;
2184                 goto fail_bdi;
2185         }
2186         fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2187                                         (1 + ilog2(nr_cpu_ids));
2188
2189         ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
2190         if (ret) {
2191                 err = ret;
2192                 goto fail_dirty_metadata_bytes;
2193         }
2194
2195         ret = percpu_counter_init(&fs_info->bio_counter, 0);
2196         if (ret) {
2197                 err = ret;
2198                 goto fail_delalloc_bytes;
2199         }
2200
2201         fs_info->btree_inode = new_inode(sb);
2202         if (!fs_info->btree_inode) {
2203                 err = -ENOMEM;
2204                 goto fail_bio_counter;
2205         }
2206
2207         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
2208
2209         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2210         INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2211         INIT_LIST_HEAD(&fs_info->trans_list);
2212         INIT_LIST_HEAD(&fs_info->dead_roots);
2213         INIT_LIST_HEAD(&fs_info->delayed_iputs);
2214         INIT_LIST_HEAD(&fs_info->delalloc_roots);
2215         INIT_LIST_HEAD(&fs_info->caching_block_groups);
2216         spin_lock_init(&fs_info->delalloc_root_lock);
2217         spin_lock_init(&fs_info->trans_lock);
2218         spin_lock_init(&fs_info->fs_roots_radix_lock);
2219         spin_lock_init(&fs_info->delayed_iput_lock);
2220         spin_lock_init(&fs_info->defrag_inodes_lock);
2221         spin_lock_init(&fs_info->free_chunk_lock);
2222         spin_lock_init(&fs_info->tree_mod_seq_lock);
2223         spin_lock_init(&fs_info->super_lock);
2224         spin_lock_init(&fs_info->buffer_lock);
2225         rwlock_init(&fs_info->tree_mod_log_lock);
2226         mutex_init(&fs_info->reloc_mutex);
2227         mutex_init(&fs_info->delalloc_root_mutex);
2228         seqlock_init(&fs_info->profiles_lock);
2229
2230         init_completion(&fs_info->kobj_unregister);
2231         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
2232         INIT_LIST_HEAD(&fs_info->space_info);
2233         INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
2234         btrfs_mapping_init(&fs_info->mapping_tree);
2235         btrfs_init_block_rsv(&fs_info->global_block_rsv,
2236                              BTRFS_BLOCK_RSV_GLOBAL);
2237         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv,
2238                              BTRFS_BLOCK_RSV_DELALLOC);
2239         btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS);
2240         btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK);
2241         btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
2242         btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
2243                              BTRFS_BLOCK_RSV_DELOPS);
2244         atomic_set(&fs_info->nr_async_submits, 0);
2245         atomic_set(&fs_info->async_delalloc_pages, 0);
2246         atomic_set(&fs_info->async_submit_draining, 0);
2247         atomic_set(&fs_info->nr_async_bios, 0);
2248         atomic_set(&fs_info->defrag_running, 0);
2249         atomic64_set(&fs_info->tree_mod_seq, 0);
2250         fs_info->sb = sb;
2251         fs_info->max_inline = 8192 * 1024;
2252         fs_info->metadata_ratio = 0;
2253         fs_info->defrag_inodes = RB_ROOT;
2254         fs_info->free_chunk_space = 0;
2255         fs_info->tree_mod_log = RB_ROOT;
2256         fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2257         fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64);
2258         /* readahead state */
2259         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
2260         spin_lock_init(&fs_info->reada_lock);
2261
2262         fs_info->thread_pool_size = min_t(unsigned long,
2263                                           num_online_cpus() + 2, 8);
2264
2265         INIT_LIST_HEAD(&fs_info->ordered_roots);
2266         spin_lock_init(&fs_info->ordered_root_lock);
2267         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2268                                         GFP_NOFS);
2269         if (!fs_info->delayed_root) {
2270                 err = -ENOMEM;
2271                 goto fail_iput;
2272         }
2273         btrfs_init_delayed_root(fs_info->delayed_root);
2274
2275         mutex_init(&fs_info->scrub_lock);
2276         atomic_set(&fs_info->scrubs_running, 0);
2277         atomic_set(&fs_info->scrub_pause_req, 0);
2278         atomic_set(&fs_info->scrubs_paused, 0);
2279         atomic_set(&fs_info->scrub_cancel_req, 0);
2280         init_waitqueue_head(&fs_info->replace_wait);
2281         init_waitqueue_head(&fs_info->scrub_pause_wait);
2282         fs_info->scrub_workers_refcnt = 0;
2283 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2284         fs_info->check_integrity_print_mask = 0;
2285 #endif
2286
2287         spin_lock_init(&fs_info->balance_lock);
2288         mutex_init(&fs_info->balance_mutex);
2289         atomic_set(&fs_info->balance_running, 0);
2290         atomic_set(&fs_info->balance_pause_req, 0);
2291         atomic_set(&fs_info->balance_cancel_req, 0);
2292         fs_info->balance_ctl = NULL;
2293         init_waitqueue_head(&fs_info->balance_wait_q);
2294
2295         sb->s_blocksize = 4096;
2296         sb->s_blocksize_bits = blksize_bits(4096);
2297         sb->s_bdi = &fs_info->bdi;
2298
2299         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2300         set_nlink(fs_info->btree_inode, 1);
2301         /*
2302          * we set the i_size on the btree inode to the max possible int.
2303          * the real end of the address space is determined by all of
2304          * the devices in the system
2305          */
2306         fs_info->btree_inode->i_size = OFFSET_MAX;
2307         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2308         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2309
2310         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2311         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2312                              fs_info->btree_inode->i_mapping);
2313         BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2314         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2315
2316         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2317
2318         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2319         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2320                sizeof(struct btrfs_key));
2321         set_bit(BTRFS_INODE_DUMMY,
2322                 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2323         btrfs_insert_inode_hash(fs_info->btree_inode);
2324
2325         spin_lock_init(&fs_info->block_group_cache_lock);
2326         fs_info->block_group_cache_tree = RB_ROOT;
2327         fs_info->first_logical_byte = (u64)-1;
2328
2329         extent_io_tree_init(&fs_info->freed_extents[0],
2330                              fs_info->btree_inode->i_mapping);
2331         extent_io_tree_init(&fs_info->freed_extents[1],
2332                              fs_info->btree_inode->i_mapping);
2333         fs_info->pinned_extents = &fs_info->freed_extents[0];
2334         fs_info->do_barriers = 1;
2335
2336
2337         mutex_init(&fs_info->ordered_operations_mutex);
2338         mutex_init(&fs_info->ordered_extent_flush_mutex);
2339         mutex_init(&fs_info->tree_log_mutex);
2340         mutex_init(&fs_info->chunk_mutex);
2341         mutex_init(&fs_info->transaction_kthread_mutex);
2342         mutex_init(&fs_info->cleaner_mutex);
2343         mutex_init(&fs_info->volume_mutex);
2344         init_rwsem(&fs_info->commit_root_sem);
2345         init_rwsem(&fs_info->cleanup_work_sem);
2346         init_rwsem(&fs_info->subvol_sem);
2347         sema_init(&fs_info->uuid_tree_rescan_sem, 1);
2348         fs_info->dev_replace.lock_owner = 0;
2349         atomic_set(&fs_info->dev_replace.nesting_level, 0);
2350         mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
2351         mutex_init(&fs_info->dev_replace.lock_management_lock);
2352         mutex_init(&fs_info->dev_replace.lock);
2353
2354         spin_lock_init(&fs_info->qgroup_lock);
2355         mutex_init(&fs_info->qgroup_ioctl_lock);
2356         fs_info->qgroup_tree = RB_ROOT;
2357         INIT_LIST_HEAD(&fs_info->dirty_qgroups);
2358         fs_info->qgroup_seq = 1;
2359         fs_info->quota_enabled = 0;
2360         fs_info->pending_quota_state = 0;
2361         fs_info->qgroup_ulist = NULL;
2362         mutex_init(&fs_info->qgroup_rescan_lock);
2363
2364         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2365         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2366
2367         init_waitqueue_head(&fs_info->transaction_throttle);
2368         init_waitqueue_head(&fs_info->transaction_wait);
2369         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2370         init_waitqueue_head(&fs_info->async_submit_wait);
2371
2372         ret = btrfs_alloc_stripe_hash_table(fs_info);
2373         if (ret) {
2374                 err = ret;
2375                 goto fail_alloc;
2376         }
2377
2378         __setup_root(4096, 4096, 4096, 4096, tree_root,
2379                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2380
2381         invalidate_bdev(fs_devices->latest_bdev);
2382
2383         /*
2384          * Read super block and check the signature bytes only
2385          */
2386         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2387         if (!bh) {
2388                 err = -EINVAL;
2389                 goto fail_alloc;
2390         }
2391
2392         /*
2393          * We want to check superblock checksum, the type is stored inside.
2394          * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
2395          */
2396         if (btrfs_check_super_csum(bh->b_data)) {
2397                 printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
2398                 err = -EINVAL;
2399                 goto fail_alloc;
2400         }
2401
2402         /*
2403          * super_copy is zeroed at allocation time and we never touch the
2404          * following bytes up to INFO_SIZE, the checksum is calculated from
2405          * the whole block of INFO_SIZE
2406          */
2407         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2408         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2409                sizeof(*fs_info->super_for_commit));
2410         brelse(bh);
2411
2412         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2413
2414         ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2415         if (ret) {
2416                 printk(KERN_ERR "BTRFS: superblock contains fatal errors\n");
2417                 err = -EINVAL;
2418                 goto fail_alloc;
2419         }
2420
2421         disk_super = fs_info->super_copy;
2422         if (!btrfs_super_root(disk_super))
2423                 goto fail_alloc;
2424
2425         /* check FS state, whether FS is broken. */
2426         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2427                 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2428
2429         /*
2430          * run through our array of backup supers and setup
2431          * our ring pointer to the oldest one
2432          */
2433         generation = btrfs_super_generation(disk_super);
2434         find_oldest_super_backup(fs_info, generation);
2435
2436         /*
2437          * In the long term, we'll store the compression type in the super
2438          * block, and it'll be used for per file compression control.
2439          */
2440         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2441
2442         ret = btrfs_parse_options(tree_root, options);
2443         if (ret) {
2444                 err = ret;
2445                 goto fail_alloc;
2446         }
2447
2448         features = btrfs_super_incompat_flags(disk_super) &
2449                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2450         if (features) {
2451                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2452                        "unsupported optional features (%Lx).\n",
2453                        features);
2454                 err = -EINVAL;
2455                 goto fail_alloc;
2456         }
2457
2458         if (btrfs_super_leafsize(disk_super) !=
2459             btrfs_super_nodesize(disk_super)) {
2460                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2461                        "blocksizes don't match.  node %d leaf %d\n",
2462                        btrfs_super_nodesize(disk_super),
2463                        btrfs_super_leafsize(disk_super));
2464                 err = -EINVAL;
2465                 goto fail_alloc;
2466         }
2467         if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2468                 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2469                        "blocksize (%d) was too large\n",
2470                        btrfs_super_leafsize(disk_super));
2471                 err = -EINVAL;
2472                 goto fail_alloc;
2473         }
2474
2475         features = btrfs_super_incompat_flags(disk_super);
2476         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2477         if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2478                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2479
2480         if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
2481                 printk(KERN_ERR "BTRFS: has skinny extents\n");
2482
2483         /*
2484          * flag our filesystem as having big metadata blocks if
2485          * they are bigger than the page size
2486          */
2487         if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2488                 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2489                         printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n");
2490                 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2491         }
2492
2493         nodesize = btrfs_super_nodesize(disk_super);
2494         leafsize = btrfs_super_leafsize(disk_super);
2495         sectorsize = btrfs_super_sectorsize(disk_super);
2496         stripesize = btrfs_super_stripesize(disk_super);
2497         fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
2498         fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2499
2500         /*
2501          * mixed block groups end up with duplicate but slightly offset
2502          * extent buffers for the same range.  It leads to corruptions
2503          */
2504         if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2505             (sectorsize != leafsize)) {
2506                 printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes "
2507                                 "are not allowed for mixed block groups on %s\n",
2508                                 sb->s_id);
2509                 goto fail_alloc;
2510         }
2511
2512         /*
2513          * Needn't use the lock because there is no other task which will
2514          * update the flag.
2515          */
2516         btrfs_set_super_incompat_flags(disk_super, features);
2517
2518         features = btrfs_super_compat_ro_flags(disk_super) &
2519                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2520         if (!(sb->s_flags & MS_RDONLY) && features) {
2521                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2522                        "unsupported option features (%Lx).\n",
2523                        features);
2524                 err = -EINVAL;
2525                 goto fail_alloc;
2526         }
2527
2528         max_active = fs_info->thread_pool_size;
2529
2530         fs_info->workers =
2531                 btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
2532                                       max_active, 16);
2533
2534         fs_info->delalloc_workers =
2535                 btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
2536
2537         fs_info->flush_workers =
2538                 btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
2539
2540         fs_info->caching_workers =
2541                 btrfs_alloc_workqueue("cache", flags, max_active, 0);
2542
2543         /*
2544          * a higher idle thresh on the submit workers makes it much more
2545          * likely that bios will be send down in a sane order to the
2546          * devices
2547          */
2548         fs_info->submit_workers =
2549                 btrfs_alloc_workqueue("submit", flags,
2550                                       min_t(u64, fs_devices->num_devices,
2551                                             max_active), 64);
2552
2553         fs_info->fixup_workers =
2554                 btrfs_alloc_workqueue("fixup", flags, 1, 0);
2555
2556         /*
2557          * endios are largely parallel and should have a very
2558          * low idle thresh
2559          */
2560         fs_info->endio_workers =
2561                 btrfs_alloc_workqueue("endio", flags, max_active, 4);
2562         fs_info->endio_meta_workers =
2563                 btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
2564         fs_info->endio_meta_write_workers =
2565                 btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
2566         fs_info->endio_raid56_workers =
2567                 btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
2568         fs_info->rmw_workers =
2569                 btrfs_alloc_workqueue("rmw", flags, max_active, 2);
2570         fs_info->endio_write_workers =
2571                 btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
2572         fs_info->endio_freespace_worker =
2573                 btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
2574         fs_info->delayed_workers =
2575                 btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
2576         fs_info->readahead_workers =
2577                 btrfs_alloc_workqueue("readahead", flags, max_active, 2);
2578         fs_info->qgroup_rescan_workers =
2579                 btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
2580
2581         if (!(fs_info->workers && fs_info->delalloc_workers &&
2582               fs_info->submit_workers && fs_info->flush_workers &&
2583               fs_info->endio_workers && fs_info->endio_meta_workers &&
2584               fs_info->endio_meta_write_workers &&
2585               fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
2586               fs_info->endio_freespace_worker && fs_info->rmw_workers &&
2587               fs_info->caching_workers && fs_info->readahead_workers &&
2588               fs_info->fixup_workers && fs_info->delayed_workers &&
2589               fs_info->qgroup_rescan_workers)) {
2590                 err = -ENOMEM;
2591                 goto fail_sb_buffer;
2592         }
2593
2594         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2595         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2596                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2597
2598         tree_root->nodesize = nodesize;
2599         tree_root->leafsize = leafsize;
2600         tree_root->sectorsize = sectorsize;
2601         tree_root->stripesize = stripesize;
2602
2603         sb->s_blocksize = sectorsize;
2604         sb->s_blocksize_bits = blksize_bits(sectorsize);
2605
2606         if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
2607                 printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id);
2608                 goto fail_sb_buffer;
2609         }
2610
2611         if (sectorsize != PAGE_SIZE) {
2612                 printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) "
2613                        "found on %s\n", (unsigned long)sectorsize, sb->s_id);
2614                 goto fail_sb_buffer;
2615         }
2616
2617         mutex_lock(&fs_info->chunk_mutex);
2618         ret = btrfs_read_sys_array(tree_root);
2619         mutex_unlock(&fs_info->chunk_mutex);
2620         if (ret) {
2621                 printk(KERN_WARNING "BTRFS: failed to read the system "
2622                        "array on %s\n", sb->s_id);
2623                 goto fail_sb_buffer;
2624         }
2625
2626         blocksize = btrfs_level_size(tree_root,
2627                                      btrfs_super_chunk_root_level(disk_super));
2628         generation = btrfs_super_chunk_root_generation(disk_super);
2629
2630         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2631                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2632
2633         chunk_root->node = read_tree_block(chunk_root,
2634                                            btrfs_super_chunk_root(disk_super),
2635                                            blocksize, generation);
2636         if (!chunk_root->node ||
2637             !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2638                 printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n",
2639                        sb->s_id);
2640                 goto fail_tree_roots;
2641         }
2642         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2643         chunk_root->commit_root = btrfs_root_node(chunk_root);
2644
2645         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2646            btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
2647
2648         ret = btrfs_read_chunk_tree(chunk_root);
2649         if (ret) {
2650                 printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n",
2651                        sb->s_id);
2652                 goto fail_tree_roots;
2653         }
2654
2655         /*
2656          * keep the device that is marked to be the target device for the
2657          * dev_replace procedure
2658          */
2659         btrfs_close_extra_devices(fs_info, fs_devices, 0);
2660
2661         if (!fs_devices->latest_bdev) {
2662                 printk(KERN_CRIT "BTRFS: failed to read devices on %s\n",
2663                        sb->s_id);
2664                 goto fail_tree_roots;
2665         }
2666
2667 retry_root_backup:
2668         blocksize = btrfs_level_size(tree_root,
2669                                      btrfs_super_root_level(disk_super));
2670         generation = btrfs_super_generation(disk_super);
2671
2672         tree_root->node = read_tree_block(tree_root,
2673                                           btrfs_super_root(disk_super),
2674                                           blocksize, generation);
2675         if (!tree_root->node ||
2676             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2677                 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
2678                        sb->s_id);
2679
2680                 goto recovery_tree_root;
2681         }
2682
2683         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2684         tree_root->commit_root = btrfs_root_node(tree_root);
2685         btrfs_set_root_refs(&tree_root->root_item, 1);
2686
2687         location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
2688         location.type = BTRFS_ROOT_ITEM_KEY;
2689         location.offset = 0;
2690
2691         extent_root = btrfs_read_tree_root(tree_root, &location);
2692         if (IS_ERR(extent_root)) {
2693                 ret = PTR_ERR(extent_root);
2694                 goto recovery_tree_root;
2695         }
2696         extent_root->track_dirty = 1;
2697         fs_info->extent_root = extent_root;
2698
2699         location.objectid = BTRFS_DEV_TREE_OBJECTID;
2700         dev_root = btrfs_read_tree_root(tree_root, &location);
2701         if (IS_ERR(dev_root)) {
2702                 ret = PTR_ERR(dev_root);
2703                 goto recovery_tree_root;
2704         }
2705         dev_root->track_dirty = 1;
2706         fs_info->dev_root = dev_root;
2707         btrfs_init_devices_late(fs_info);
2708
2709         location.objectid = BTRFS_CSUM_TREE_OBJECTID;
2710         csum_root = btrfs_read_tree_root(tree_root, &location);
2711         if (IS_ERR(csum_root)) {
2712                 ret = PTR_ERR(csum_root);
2713                 goto recovery_tree_root;
2714         }
2715         csum_root->track_dirty = 1;
2716         fs_info->csum_root = csum_root;
2717
2718         location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2719         quota_root = btrfs_read_tree_root(tree_root, &location);
2720         if (!IS_ERR(quota_root)) {
2721                 quota_root->track_dirty = 1;
2722                 fs_info->quota_enabled = 1;
2723                 fs_info->pending_quota_state = 1;
2724                 fs_info->quota_root = quota_root;
2725         }
2726
2727         location.objectid = BTRFS_UUID_TREE_OBJECTID;
2728         uuid_root = btrfs_read_tree_root(tree_root, &location);
2729         if (IS_ERR(uuid_root)) {
2730                 ret = PTR_ERR(uuid_root);
2731                 if (ret != -ENOENT)
2732                         goto recovery_tree_root;
2733                 create_uuid_tree = true;
2734                 check_uuid_tree = false;
2735         } else {
2736                 uuid_root->track_dirty = 1;
2737                 fs_info->uuid_root = uuid_root;
2738                 create_uuid_tree = false;
2739                 check_uuid_tree =
2740                     generation != btrfs_super_uuid_tree_generation(disk_super);
2741         }
2742
2743         fs_info->generation = generation;
2744         fs_info->last_trans_committed = generation;
2745
2746         ret = btrfs_recover_balance(fs_info);
2747         if (ret) {
2748                 printk(KERN_WARNING "BTRFS: failed to recover balance\n");
2749                 goto fail_block_groups;
2750         }
2751
2752         ret = btrfs_init_dev_stats(fs_info);
2753         if (ret) {
2754                 printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n",
2755                        ret);
2756                 goto fail_block_groups;
2757         }
2758
2759         ret = btrfs_init_dev_replace(fs_info);
2760         if (ret) {
2761                 pr_err("BTRFS: failed to init dev_replace: %d\n", ret);
2762                 goto fail_block_groups;
2763         }
2764
2765         btrfs_close_extra_devices(fs_info, fs_devices, 1);
2766
2767         ret = btrfs_sysfs_add_one(fs_info);
2768         if (ret) {
2769                 pr_err("BTRFS: failed to init sysfs interface: %d\n", ret);
2770                 goto fail_block_groups;
2771         }
2772
2773         ret = btrfs_init_space_info(fs_info);
2774         if (ret) {
2775                 printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret);
2776                 goto fail_sysfs;
2777         }
2778
2779         ret = btrfs_read_block_groups(extent_root);
2780         if (ret) {
2781                 printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret);
2782                 goto fail_sysfs;
2783         }
2784         fs_info->num_tolerated_disk_barrier_failures =
2785                 btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
2786         if (fs_info->fs_devices->missing_devices >
2787              fs_info->num_tolerated_disk_barrier_failures &&
2788             !(sb->s_flags & MS_RDONLY)) {
2789                 printk(KERN_WARNING "BTRFS: "
2790                         "too many missing devices, writeable mount is not allowed\n");
2791                 goto fail_sysfs;
2792         }
2793
2794         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2795                                                "btrfs-cleaner");
2796         if (IS_ERR(fs_info->cleaner_kthread))
2797                 goto fail_sysfs;
2798
2799         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2800                                                    tree_root,
2801                                                    "btrfs-transaction");
2802         if (IS_ERR(fs_info->transaction_kthread))
2803                 goto fail_cleaner;
2804
2805         if (!btrfs_test_opt(tree_root, SSD) &&
2806             !btrfs_test_opt(tree_root, NOSSD) &&
2807             !fs_info->fs_devices->rotating) {
2808                 printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD "
2809                        "mode\n");
2810                 btrfs_set_opt(fs_info->mount_opt, SSD);
2811         }
2812
2813         /* Set the real inode map cache flag */
2814         if (btrfs_test_opt(tree_root, CHANGE_INODE_CACHE))
2815                 btrfs_set_opt(tree_root->fs_info->mount_opt, INODE_MAP_CACHE);
2816
2817 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2818         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2819                 ret = btrfsic_mount(tree_root, fs_devices,
2820                                     btrfs_test_opt(tree_root,
2821                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2822                                     1 : 0,
2823                                     fs_info->check_integrity_print_mask);
2824                 if (ret)
2825                         printk(KERN_WARNING "BTRFS: failed to initialize"
2826                                " integrity check module %s\n", sb->s_id);
2827         }
2828 #endif
2829         ret = btrfs_read_qgroup_config(fs_info);
2830         if (ret)
2831                 goto fail_trans_kthread;
2832
2833         /* do not make disk changes in broken FS */
2834         if (btrfs_super_log_root(disk_super) != 0) {
2835                 u64 bytenr = btrfs_super_log_root(disk_super);
2836
2837                 if (fs_devices->rw_devices == 0) {
2838                         printk(KERN_WARNING "BTRFS: log replay required "
2839                                "on RO media\n");
2840                         err = -EIO;
2841                         goto fail_qgroup;
2842                 }
2843                 blocksize =
2844                      btrfs_level_size(tree_root,
2845                                       btrfs_super_log_root_level(disk_super));
2846
2847                 log_tree_root = btrfs_alloc_root(fs_info);
2848                 if (!log_tree_root) {
2849                         err = -ENOMEM;
2850                         goto fail_qgroup;
2851                 }
2852
2853                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2854                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2855
2856                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2857                                                       blocksize,
2858                                                       generation + 1);
2859                 if (!log_tree_root->node ||
2860                     !extent_buffer_uptodate(log_tree_root->node)) {
2861                         printk(KERN_ERR "BTRFS: failed to read log tree\n");
2862                         free_extent_buffer(log_tree_root->node);
2863                         kfree(log_tree_root);
2864                         goto fail_qgroup;
2865                 }
2866                 /* returns with log_tree_root freed on success */
2867                 ret = btrfs_recover_log_trees(log_tree_root);
2868                 if (ret) {
2869                         btrfs_error(tree_root->fs_info, ret,
2870                                     "Failed to recover log tree");
2871                         free_extent_buffer(log_tree_root->node);
2872                         kfree(log_tree_root);
2873                         goto fail_qgroup;
2874                 }
2875
2876                 if (sb->s_flags & MS_RDONLY) {
2877                         ret = btrfs_commit_super(tree_root);
2878                         if (ret)
2879                                 goto fail_qgroup;
2880                 }
2881         }
2882
2883         ret = btrfs_find_orphan_roots(tree_root);
2884         if (ret)
2885                 goto fail_qgroup;
2886
2887         if (!(sb->s_flags & MS_RDONLY)) {
2888                 ret = btrfs_cleanup_fs_roots(fs_info);
2889                 if (ret)
2890                         goto fail_qgroup;
2891
2892                 ret = btrfs_recover_relocation(tree_root);
2893                 if (ret < 0) {
2894                         printk(KERN_WARNING
2895                                "BTRFS: failed to recover relocation\n");
2896                         err = -EINVAL;
2897                         goto fail_qgroup;
2898                 }
2899         }
2900
2901         location.objectid = BTRFS_FS_TREE_OBJECTID;
2902         location.type = BTRFS_ROOT_ITEM_KEY;
2903         location.offset = 0;
2904
2905         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2906         if (IS_ERR(fs_info->fs_root)) {
2907                 err = PTR_ERR(fs_info->fs_root);
2908                 goto fail_qgroup;
2909         }
2910
2911         if (sb->s_flags & MS_RDONLY)
2912                 return 0;
2913
2914         down_read(&fs_info->cleanup_work_sem);
2915         if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
2916             (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
2917                 up_read(&fs_info->cleanup_work_sem);
2918                 close_ctree(tree_root);
2919                 return ret;
2920         }
2921         up_read(&fs_info->cleanup_work_sem);
2922
2923         ret = btrfs_resume_balance_async(fs_info);
2924         if (ret) {
2925                 printk(KERN_WARNING "BTRFS: failed to resume balance\n");
2926                 close_ctree(tree_root);
2927                 return ret;
2928         }
2929
2930         ret = btrfs_resume_dev_replace_async(fs_info);
2931         if (ret) {
2932                 pr_warn("BTRFS: failed to resume dev_replace\n");
2933                 close_ctree(tree_root);
2934                 return ret;
2935         }
2936
2937         btrfs_qgroup_rescan_resume(fs_info);
2938
2939         if (create_uuid_tree) {
2940                 pr_info("BTRFS: creating UUID tree\n");
2941                 ret = btrfs_create_uuid_tree(fs_info);
2942                 if (ret) {
2943                         pr_warn("BTRFS: failed to create the UUID tree %d\n",
2944                                 ret);
2945                         close_ctree(tree_root);
2946                         return ret;
2947                 }
2948         } else if (check_uuid_tree ||
2949                    btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) {
2950                 pr_info("BTRFS: checking UUID tree\n");
2951                 ret = btrfs_check_uuid_tree(fs_info);
2952                 if (ret) {
2953                         pr_warn("BTRFS: failed to check the UUID tree %d\n",
2954                                 ret);
2955                         close_ctree(tree_root);
2956                         return ret;
2957                 }
2958         } else {
2959                 fs_info->update_uuid_tree_gen = 1;
2960         }
2961
2962         return 0;
2963
2964 fail_qgroup:
2965         btrfs_free_qgroup_config(fs_info);
2966 fail_trans_kthread:
2967         kthread_stop(fs_info->transaction_kthread);
2968         btrfs_cleanup_transaction(fs_info->tree_root);
2969         del_fs_roots(fs_info);
2970 fail_cleaner:
2971         kthread_stop(fs_info->cleaner_kthread);
2972
2973         /*
2974          * make sure we're done with the btree inode before we stop our
2975          * kthreads
2976          */
2977         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2978
2979 fail_sysfs:
2980         btrfs_sysfs_remove_one(fs_info);
2981
2982 fail_block_groups:
2983         btrfs_put_block_group_cache(fs_info);
2984         btrfs_free_block_groups(fs_info);
2985
2986 fail_tree_roots:
2987         free_root_pointers(fs_info, 1);
2988         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2989
2990 fail_sb_buffer:
2991         btrfs_stop_all_workers(fs_info);
2992 fail_alloc:
2993 fail_iput:
2994         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2995
2996         iput(fs_info->btree_inode);
2997 fail_bio_counter:
2998         percpu_counter_destroy(&fs_info->bio_counter);
2999 fail_delalloc_bytes:
3000         percpu_counter_destroy(&fs_info->delalloc_bytes);
3001 fail_dirty_metadata_bytes:
3002         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3003 fail_bdi:
3004         bdi_destroy(&fs_info->bdi);
3005 fail_srcu:
3006         cleanup_srcu_struct(&fs_info->subvol_srcu);
3007 fail:
3008         btrfs_free_stripe_hash_table(fs_info);
3009         btrfs_close_devices(fs_info->fs_devices);
3010         return err;
3011
3012 recovery_tree_root:
3013         if (!btrfs_test_opt(tree_root, RECOVERY))
3014                 goto fail_tree_roots;
3015
3016         free_root_pointers(fs_info, 0);
3017
3018         /* don't use the log in recovery mode, it won't be valid */
3019         btrfs_set_super_log_root(disk_super, 0);
3020
3021         /* we can't trust the free space cache either */
3022         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
3023
3024         ret = next_root_backup(fs_info, fs_info->super_copy,
3025                                &num_backups_tried, &backup_index);
3026         if (ret == -1)
3027                 goto fail_block_groups;
3028         goto retry_root_backup;
3029 }
3030
3031 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
3032 {
3033         if (uptodate) {
3034                 set_buffer_uptodate(bh);
3035         } else {
3036                 struct btrfs_device *device = (struct btrfs_device *)
3037                         bh->b_private;
3038
3039                 printk_ratelimited_in_rcu(KERN_WARNING "BTRFS: lost page write due to "
3040                                           "I/O error on %s\n",
3041                                           rcu_str_deref(device->name));
3042                 /* note, we dont' set_buffer_write_io_error because we have
3043                  * our own ways of dealing with the IO errors
3044                  */
3045                 clear_buffer_uptodate(bh);
3046                 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
3047         }
3048         unlock_buffer(bh);
3049         put_bh(bh);
3050 }
3051
3052 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
3053 {
3054         struct buffer_head *bh;
3055         struct buffer_head *latest = NULL;
3056         struct btrfs_super_block *super;
3057         int i;
3058         u64 transid = 0;
3059         u64 bytenr;
3060
3061         /* we would like to check all the supers, but that would make
3062          * a btrfs mount succeed after a mkfs from a different FS.
3063          * So, we need to add a special mount option to scan for
3064          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3065          */
3066         for (i = 0; i < 1; i++) {
3067                 bytenr = btrfs_sb_offset(i);
3068                 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3069                                         i_size_read(bdev->bd_inode))
3070                         break;
3071                 bh = __bread(bdev, bytenr / 4096,
3072                                         BTRFS_SUPER_INFO_SIZE);
3073                 if (!bh)
3074                         continue;
3075
3076                 super = (struct btrfs_super_block *)bh->b_data;
3077                 if (btrfs_super_bytenr(super) != bytenr ||
3078                     btrfs_super_magic(super) != BTRFS_MAGIC) {
3079                         brelse(bh);
3080                         continue;
3081                 }
3082
3083                 if (!latest || btrfs_super_generation(super) > transid) {
3084                         brelse(latest);
3085                         latest = bh;
3086                         transid = btrfs_super_generation(super);
3087                 } else {
3088                         brelse(bh);
3089                 }
3090         }
3091         return latest;
3092 }
3093
3094 /*
3095  * this should be called twice, once with wait == 0 and
3096  * once with wait == 1.  When wait == 0 is done, all the buffer heads
3097  * we write are pinned.
3098  *
3099  * They are released when wait == 1 is done.
3100  * max_mirrors must be the same for both runs, and it indicates how
3101  * many supers on this one device should be written.
3102  *
3103  * max_mirrors == 0 means to write them all.
3104  */
3105 static int write_dev_supers(struct btrfs_device *device,
3106                             struct btrfs_super_block *sb,
3107                             int do_barriers, int wait, int max_mirrors)
3108 {
3109         struct buffer_head *bh;
3110         int i;
3111         int ret;
3112         int errors = 0;
3113         u32 crc;
3114         u64 bytenr;
3115
3116         if (max_mirrors == 0)
3117                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3118
3119         for (i = 0; i < max_mirrors; i++) {
3120                 bytenr = btrfs_sb_offset(i);
3121                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
3122                         break;
3123
3124                 if (wait) {
3125                         bh = __find_get_block(device->bdev, bytenr / 4096,
3126                                               BTRFS_SUPER_INFO_SIZE);
3127                         if (!bh) {
3128                                 errors++;
3129                                 continue;
3130                         }
3131                         wait_on_buffer(bh);
3132                         if (!buffer_uptodate(bh))
3133                                 errors++;
3134
3135                         /* drop our reference */
3136                         brelse(bh);
3137
3138                         /* drop the reference from the wait == 0 run */
3139                         brelse(bh);
3140                         continue;
3141                 } else {
3142                         btrfs_set_super_bytenr(sb, bytenr);
3143
3144                         crc = ~(u32)0;
3145                         crc = btrfs_csum_data((char *)sb +
3146                                               BTRFS_CSUM_SIZE, crc,
3147                                               BTRFS_SUPER_INFO_SIZE -
3148                                               BTRFS_CSUM_SIZE);
3149                         btrfs_csum_final(crc, sb->csum);
3150
3151                         /*
3152                          * one reference for us, and we leave it for the
3153                          * caller
3154                          */
3155                         bh = __getblk(device->bdev, bytenr / 4096,
3156                                       BTRFS_SUPER_INFO_SIZE);
3157                         if (!bh) {
3158                                 printk(KERN_ERR "BTRFS: couldn't get super "
3159                                        "buffer head for bytenr %Lu\n", bytenr);
3160                                 errors++;
3161                                 continue;
3162                         }
3163
3164                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
3165
3166                         /* one reference for submit_bh */
3167                         get_bh(bh);
3168
3169                         set_buffer_uptodate(bh);
3170                         lock_buffer(bh);
3171                         bh->b_end_io = btrfs_end_buffer_write_sync;
3172                         bh->b_private = device;
3173                 }
3174
3175                 /*
3176                  * we fua the first super.  The others we allow
3177                  * to go down lazy.
3178                  */
3179                 if (i == 0)
3180                         ret = btrfsic_submit_bh(WRITE_FUA, bh);
3181                 else
3182                         ret = btrfsic_submit_bh(WRITE_SYNC, bh);
3183                 if (ret)
3184                         errors++;
3185         }
3186         return errors < i ? 0 : -1;
3187 }
3188
3189 /*
3190  * endio for the write_dev_flush, this will wake anyone waiting
3191  * for the barrier when it is done
3192  */
3193 static void btrfs_end_empty_barrier(struct bio *bio, int err)
3194 {
3195         if (err) {
3196                 if (err == -EOPNOTSUPP)
3197                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
3198                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3199         }
3200         if (bio->bi_private)
3201                 complete(bio->bi_private);
3202         bio_put(bio);
3203 }
3204
3205 /*
3206  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
3207  * sent down.  With wait == 1, it waits for the previous flush.
3208  *
3209  * any device where the flush fails with eopnotsupp are flagged as not-barrier
3210  * capable
3211  */
3212 static int write_dev_flush(struct btrfs_device *device, int wait)
3213 {
3214         struct bio *bio;
3215         int ret = 0;
3216
3217         if (device->nobarriers)
3218                 return 0;
3219
3220         if (wait) {
3221                 bio = device->flush_bio;
3222                 if (!bio)
3223                         return 0;
3224
3225                 wait_for_completion(&device->flush_wait);
3226
3227                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
3228                         printk_in_rcu("BTRFS: disabling barriers on dev %s\n",
3229                                       rcu_str_deref(device->name));
3230                         device->nobarriers = 1;
3231                 } else if (!bio_flagged(bio, BIO_UPTODATE)) {
3232                         ret = -EIO;
3233                         btrfs_dev_stat_inc_and_print(device,
3234                                 BTRFS_DEV_STAT_FLUSH_ERRS);
3235                 }
3236
3237                 /* drop the reference from the wait == 0 run */
3238                 bio_put(bio);
3239                 device->flush_bio = NULL;
3240
3241                 return ret;
3242         }
3243
3244         /*
3245          * one reference for us, and we leave it for the
3246          * caller
3247          */
3248         device->flush_bio = NULL;
3249         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3250         if (!bio)
3251                 return -ENOMEM;
3252
3253         bio->bi_end_io = btrfs_end_empty_barrier;
3254         bio->bi_bdev = device->bdev;
3255         init_completion(&device->flush_wait);
3256         bio->bi_private = &device->flush_wait;
3257         device->flush_bio = bio;
3258
3259         bio_get(bio);
3260         btrfsic_submit_bio(WRITE_FLUSH, bio);
3261
3262         return 0;
3263 }
3264
3265 /*
3266  * send an empty flush down to each device in parallel,
3267  * then wait for them
3268  */
3269 static int barrier_all_devices(struct btrfs_fs_info *info)
3270 {
3271         struct list_head *head;
3272         struct btrfs_device *dev;
3273         int errors_send = 0;
3274         int errors_wait = 0;
3275         int ret;
3276
3277         /* send down all the barriers */
3278         head = &info->fs_devices->devices;
3279         list_for_each_entry_rcu(dev, head, dev_list) {
3280                 if (dev->missing)
3281                         continue;
3282                 if (!dev->bdev) {
3283                         errors_send++;
3284                         continue;
3285                 }
3286                 if (!dev->in_fs_metadata || !dev->writeable)
3287                         continue;
3288
3289                 ret = write_dev_flush(dev, 0);
3290                 if (ret)
3291                         errors_send++;
3292         }
3293
3294         /* wait for all the barriers */
3295         list_for_each_entry_rcu(dev, head, dev_list) {
3296                 if (dev->missing)
3297                         continue;
3298                 if (!dev->bdev) {
3299                         errors_wait++;
3300                         continue;
3301                 }
3302                 if (!dev->in_fs_metadata || !dev->writeable)
3303                         continue;
3304
3305                 ret = write_dev_flush(dev, 1);
3306                 if (ret)
3307                         errors_wait++;
3308         }
3309         if (errors_send > info->num_tolerated_disk_barrier_failures ||
3310             errors_wait > info->num_tolerated_disk_barrier_failures)
3311                 return -EIO;
3312         return 0;
3313 }
3314
3315 int btrfs_calc_num_tolerated_disk_barrier_failures(
3316         struct btrfs_fs_info *fs_info)
3317 {
3318         struct btrfs_ioctl_space_info space;
3319         struct btrfs_space_info *sinfo;
3320         u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
3321                        BTRFS_BLOCK_GROUP_SYSTEM,
3322                        BTRFS_BLOCK_GROUP_METADATA,
3323                        BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA};
3324         int num_types = 4;
3325         int i;
3326         int c;
3327         int num_tolerated_disk_barrier_failures =
3328                 (int)fs_info->fs_devices->num_devices;
3329
3330         for (i = 0; i < num_types; i++) {
3331                 struct btrfs_space_info *tmp;
3332
3333                 sinfo = NULL;
3334                 rcu_read_lock();
3335                 list_for_each_entry_rcu(tmp, &fs_info->space_info, list) {
3336                         if (tmp->flags == types[i]) {
3337                                 sinfo = tmp;
3338                                 break;
3339                         }
3340                 }
3341                 rcu_read_unlock();
3342
3343                 if (!sinfo)
3344                         continue;
3345
3346                 down_read(&sinfo->groups_sem);
3347                 for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
3348                         if (!list_empty(&sinfo->block_groups[c])) {
3349                                 u64 flags;
3350
3351                                 btrfs_get_block_group_info(
3352                                         &sinfo->block_groups[c], &space);
3353                                 if (space.total_bytes == 0 ||
3354                                     space.used_bytes == 0)
3355                                         continue;
3356                                 flags = space.flags;
3357                                 /*
3358                                  * return
3359                                  * 0: if dup, single or RAID0 is configured for
3360                                  *    any of metadata, system or data, else
3361                                  * 1: if RAID5 is configured, or if RAID1 or
3362                                  *    RAID10 is configured and only two mirrors
3363                                  *    are used, else
3364                                  * 2: if RAID6 is configured, else
3365                                  * num_mirrors - 1: if RAID1 or RAID10 is
3366                                  *                  configured and more than
3367                                  *                  2 mirrors are used.
3368                                  */
3369                                 if (num_tolerated_disk_barrier_failures > 0 &&
3370                                     ((flags & (BTRFS_BLOCK_GROUP_DUP |
3371                                                BTRFS_BLOCK_GROUP_RAID0)) ||
3372                                      ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
3373                                       == 0)))
3374                                         num_tolerated_disk_barrier_failures = 0;
3375                                 else if (num_tolerated_disk_barrier_failures > 1) {
3376                                         if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3377                                             BTRFS_BLOCK_GROUP_RAID5 |
3378                                             BTRFS_BLOCK_GROUP_RAID10)) {
3379                                                 num_tolerated_disk_barrier_failures = 1;
3380                                         } else if (flags &
3381                                                    BTRFS_BLOCK_GROUP_RAID6) {
3382                                                 num_tolerated_disk_barrier_failures = 2;
3383                                         }
3384                                 }
3385                         }
3386                 }
3387                 up_read(&sinfo->groups_sem);
3388         }
3389
3390         return num_tolerated_disk_barrier_failures;
3391 }
3392
3393 static int write_all_supers(struct btrfs_root *root, int max_mirrors)
3394 {
3395         struct list_head *head;
3396         struct btrfs_device *dev;
3397         struct btrfs_super_block *sb;
3398         struct btrfs_dev_item *dev_item;
3399         int ret;
3400         int do_barriers;
3401         int max_errors;
3402         int total_errors = 0;
3403         u64 flags;
3404
3405         do_barriers = !btrfs_test_opt(root, NOBARRIER);
3406         backup_super_roots(root->fs_info);
3407
3408         sb = root->fs_info->super_for_commit;
3409         dev_item = &sb->dev_item;
3410
3411         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3412         head = &root->fs_info->fs_devices->devices;
3413         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
3414
3415         if (do_barriers) {
3416                 ret = barrier_all_devices(root->fs_info);
3417                 if (ret) {
3418                         mutex_unlock(
3419                                 &root->fs_info->fs_devices->device_list_mutex);
3420                         btrfs_error(root->fs_info, ret,
3421                                     "errors while submitting device barriers.");
3422                         return ret;
3423                 }
3424         }
3425
3426         list_for_each_entry_rcu(dev, head, dev_list) {
3427                 if (!dev->bdev) {
3428                         total_errors++;
3429                         continue;
3430                 }
3431                 if (!dev->in_fs_metadata || !dev->writeable)
3432                         continue;
3433
3434                 btrfs_set_stack_device_generation(dev_item, 0);
3435                 btrfs_set_stack_device_type(dev_item, dev->type);
3436                 btrfs_set_stack_device_id(dev_item, dev->devid);
3437                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
3438                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
3439                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
3440                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
3441                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
3442                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
3443                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
3444
3445                 flags = btrfs_super_flags(sb);
3446                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
3447
3448                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3449                 if (ret)
3450                         total_errors++;
3451         }
3452         if (total_errors > max_errors) {
3453                 btrfs_err(root->fs_info, "%d errors while writing supers",
3454                        total_errors);
3455                 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3456
3457                 /* FUA is masked off if unsupported and can't be the reason */
3458                 btrfs_error(root->fs_info, -EIO,
3459                             "%d errors while writing supers", total_errors);
3460                 return -EIO;
3461         }
3462
3463         total_errors = 0;
3464         list_for_each_entry_rcu(dev, head, dev_list) {
3465                 if (!dev->bdev)
3466                         continue;
3467                 if (!dev->in_fs_metadata || !dev->writeable)
3468                         continue;
3469
3470                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3471                 if (ret)
3472                         total_errors++;
3473         }
3474         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3475         if (total_errors > max_errors) {
3476                 btrfs_error(root->fs_info, -EIO,
3477                             "%d errors while writing supers", total_errors);
3478                 return -EIO;
3479         }
3480         return 0;
3481 }
3482
3483 int write_ctree_super(struct btrfs_trans_handle *trans,
3484                       struct btrfs_root *root, int max_mirrors)
3485 {
3486         return write_all_supers(root, max_mirrors);
3487 }
3488
3489 /* Drop a fs root from the radix tree and free it. */
3490 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
3491                                   struct btrfs_root *root)
3492 {
3493         spin_lock(&fs_info->fs_roots_radix_lock);
3494         radix_tree_delete(&fs_info->fs_roots_radix,
3495                           (unsigned long)root->root_key.objectid);
3496         spin_unlock(&fs_info->fs_roots_radix_lock);
3497
3498         if (btrfs_root_refs(&root->root_item) == 0)
3499                 synchronize_srcu(&fs_info->subvol_srcu);
3500
3501         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3502                 btrfs_free_log(NULL, root);
3503
3504         __btrfs_remove_free_space_cache(root->free_ino_pinned);
3505         __btrfs_remove_free_space_cache(root->free_ino_ctl);
3506         free_fs_root(root);
3507 }
3508
3509 static void free_fs_root(struct btrfs_root *root)
3510 {
3511         iput(root->cache_inode);
3512         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
3513         btrfs_free_block_rsv(root, root->orphan_block_rsv);
3514         root->orphan_block_rsv = NULL;
3515         if (root->anon_dev)
3516                 free_anon_bdev(root->anon_dev);
3517         if (root->subv_writers)
3518                 btrfs_free_subvolume_writers(root->subv_writers);
3519         free_extent_buffer(root->node);
3520         free_extent_buffer(root->commit_root);
3521         kfree(root->free_ino_ctl);
3522         kfree(root->free_ino_pinned);
3523         kfree(root->name);
3524         btrfs_put_fs_root(root);
3525 }
3526
3527 void btrfs_free_fs_root(struct btrfs_root *root)
3528 {
3529         free_fs_root(root);
3530 }
3531
3532 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
3533 {
3534         u64 root_objectid = 0;
3535         struct btrfs_root *gang[8];
3536         int i;
3537         int ret;
3538
3539         while (1) {
3540                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
3541                                              (void **)gang, root_objectid,
3542                                              ARRAY_SIZE(gang));
3543                 if (!ret)
3544                         break;
3545
3546                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
3547                 for (i = 0; i < ret; i++) {
3548                         int err;
3549
3550                         root_objectid = gang[i]->root_key.objectid;
3551                         err = btrfs_orphan_cleanup(gang[i]);
3552                         if (err)
3553                                 return err;
3554                 }
3555                 root_objectid++;
3556         }
3557         return 0;
3558 }
3559
3560 int btrfs_commit_super(struct btrfs_root *root)
3561 {
3562         struct btrfs_trans_handle *trans;
3563
3564         mutex_lock(&root->fs_info->cleaner_mutex);
3565         btrfs_run_delayed_iputs(root);
3566         mutex_unlock(&root->fs_info->cleaner_mutex);
3567         wake_up_process(root->fs_info->cleaner_kthread);
3568
3569         /* wait until ongoing cleanup work done */
3570         down_write(&root->fs_info->cleanup_work_sem);
3571         up_write(&root->fs_info->cleanup_work_sem);
3572
3573         trans = btrfs_join_transaction(root);
3574         if (IS_ERR(trans))
3575                 return PTR_ERR(trans);
3576         return btrfs_commit_transaction(trans, root);
3577 }
3578
3579 int close_ctree(struct btrfs_root *root)
3580 {
3581         struct btrfs_fs_info *fs_info = root->fs_info;
3582         int ret;
3583
3584         fs_info->closing = 1;
3585         smp_mb();
3586
3587         /* wait for the uuid_scan task to finish */
3588         down(&fs_info->uuid_tree_rescan_sem);
3589         /* avoid complains from lockdep et al., set sem back to initial state */
3590         up(&fs_info->uuid_tree_rescan_sem);
3591
3592         /* pause restriper - we want to resume on mount */
3593         btrfs_pause_balance(fs_info);
3594
3595         btrfs_dev_replace_suspend_for_unmount(fs_info);
3596
3597         btrfs_scrub_cancel(fs_info);
3598
3599         /* wait for any defraggers to finish */
3600         wait_event(fs_info->transaction_wait,
3601                    (atomic_read(&fs_info->defrag_running) == 0));
3602
3603         /* clear out the rbtree of defraggable inodes */
3604         btrfs_cleanup_defrag_inodes(fs_info);
3605
3606         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3607                 ret = btrfs_commit_super(root);
3608                 if (ret)
3609                         btrfs_err(root->fs_info, "commit super ret %d", ret);
3610         }
3611
3612         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3613                 btrfs_error_commit_super(root);
3614
3615         kthread_stop(fs_info->transaction_kthread);
3616         kthread_stop(fs_info->cleaner_kthread);
3617
3618         fs_info->closing = 2;
3619         smp_mb();
3620
3621         btrfs_free_qgroup_config(root->fs_info);
3622
3623         if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3624                 btrfs_info(root->fs_info, "at unmount delalloc count %lld",
3625                        percpu_counter_sum(&fs_info->delalloc_bytes));
3626         }
3627
3628         btrfs_sysfs_remove_one(fs_info);
3629
3630         del_fs_roots(fs_info);
3631
3632         btrfs_put_block_group_cache(fs_info);
3633
3634         btrfs_free_block_groups(fs_info);
3635
3636         btrfs_stop_all_workers(fs_info);
3637
3638         free_root_pointers(fs_info, 1);
3639
3640         iput(fs_info->btree_inode);
3641
3642 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3643         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3644                 btrfsic_unmount(root, fs_info->fs_devices);
3645 #endif
3646
3647         btrfs_close_devices(fs_info->fs_devices);
3648         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3649
3650         percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3651         percpu_counter_destroy(&fs_info->delalloc_bytes);
3652         percpu_counter_destroy(&fs_info->bio_counter);
3653         bdi_destroy(&fs_info->bdi);
3654         cleanup_srcu_struct(&fs_info->subvol_srcu);
3655
3656         btrfs_free_stripe_hash_table(fs_info);
3657
3658         btrfs_free_block_rsv(root, root->orphan_block_rsv);
3659         root->orphan_block_rsv = NULL;
3660
3661         return 0;
3662 }
3663
3664 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
3665                           int atomic)
3666 {
3667         int ret;
3668         struct inode *btree_inode = buf->pages[0]->mapping->host;
3669
3670         ret = extent_buffer_uptodate(buf);
3671         if (!ret)
3672                 return ret;
3673
3674         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3675                                     parent_transid, atomic);
3676         if (ret == -EAGAIN)
3677                 return ret;
3678         return !ret;
3679 }
3680
3681 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3682 {
3683         return set_extent_buffer_uptodate(buf);
3684 }
3685
3686 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3687 {
3688         struct btrfs_root *root;
3689         u64 transid = btrfs_header_generation(buf);
3690         int was_dirty;
3691
3692 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3693         /*
3694          * This is a fast path so only do this check if we have sanity tests
3695          * enabled.  Normal people shouldn't be marking dummy buffers as dirty
3696          * outside of the sanity tests.
3697          */
3698         if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &buf->bflags)))
3699                 return;
3700 #endif
3701         root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3702         btrfs_assert_tree_locked(buf);
3703         if (transid != root->fs_info->generation)
3704                 WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, "
3705                        "found %llu running %llu\n",
3706                         buf->start, transid, root->fs_info->generation);
3707         was_dirty = set_extent_buffer_dirty(buf);
3708         if (!was_dirty)
3709                 __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3710                                      buf->len,
3711                                      root->fs_info->dirty_metadata_batch);
3712 }
3713
3714 static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3715                                         int flush_delayed)
3716 {
3717         /*
3718          * looks as though older kernels can get into trouble with
3719          * this code, they end up stuck in balance_dirty_pages forever
3720          */
3721         int ret;
3722
3723         if (current->flags & PF_MEMALLOC)
3724                 return;
3725
3726         if (flush_delayed)
3727                 btrfs_balance_delayed_items(root);
3728
3729         ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3730                                      BTRFS_DIRTY_METADATA_THRESH);
3731         if (ret > 0) {
3732                 balance_dirty_pages_ratelimited(
3733                                    root->fs_info->btree_inode->i_mapping);
3734         }
3735         return;
3736 }
3737
3738 void btrfs_btree_balance_dirty(struct btrfs_root *root)
3739 {
3740         __btrfs_btree_balance_dirty(root, 1);
3741 }
3742
3743 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
3744 {
3745         __btrfs_btree_balance_dirty(root, 0);
3746 }
3747
3748 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3749 {
3750         struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3751         return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3752 }
3753
3754 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3755                               int read_only)
3756 {
3757         /*
3758          * Placeholder for checks
3759          */
3760         return 0;
3761 }
3762
3763 static void btrfs_error_commit_super(struct btrfs_root *root)
3764 {
3765         mutex_lock(&root->fs_info->cleaner_mutex);
3766         btrfs_run_delayed_iputs(root);
3767         mutex_unlock(&root->fs_info->cleaner_mutex);
3768
3769         down_write(&root->fs_info->cleanup_work_sem);
3770         up_write(&root->fs_info->cleanup_work_sem);
3771
3772         /* cleanup FS via transaction */
3773         btrfs_cleanup_transaction(root);
3774 }
3775
3776 static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
3777                                              struct btrfs_root *root)
3778 {
3779         struct btrfs_inode *btrfs_inode;
3780         struct list_head splice;
3781
3782         INIT_LIST_HEAD(&splice);
3783
3784         mutex_lock(&root->fs_info->ordered_operations_mutex);
3785         spin_lock(&root->fs_info->ordered_root_lock);
3786
3787         list_splice_init(&t->ordered_operations, &splice);
3788         while (!list_empty(&splice)) {
3789                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3790                                          ordered_operations);
3791
3792                 list_del_init(&btrfs_inode->ordered_operations);
3793                 spin_unlock(&root->fs_info->ordered_root_lock);
3794
3795                 btrfs_invalidate_inodes(btrfs_inode->root);
3796
3797                 spin_lock(&root->fs_info->ordered_root_lock);
3798         }
3799
3800         spin_unlock(&root->fs_info->ordered_root_lock);
3801         mutex_unlock(&root->fs_info->ordered_operations_mutex);
3802 }
3803
3804 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3805 {
3806         struct btrfs_ordered_extent *ordered;
3807
3808         spin_lock(&root->ordered_extent_lock);
3809         /*
3810          * This will just short circuit the ordered completion stuff which will
3811          * make sure the ordered extent gets properly cleaned up.
3812          */
3813         list_for_each_entry(ordered, &root->ordered_extents,
3814                             root_extent_list)
3815                 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
3816         spin_unlock(&root->ordered_extent_lock);
3817 }
3818
3819 static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
3820 {
3821         struct btrfs_root *root;
3822         struct list_head splice;
3823
3824         INIT_LIST_HEAD(&splice);
3825
3826         spin_lock(&fs_info->ordered_root_lock);
3827         list_splice_init(&fs_info->ordered_roots, &splice);
3828         while (!list_empty(&splice)) {
3829                 root = list_first_entry(&splice, struct btrfs_root,
3830                                         ordered_root);
3831                 list_move_tail(&root->ordered_root,
3832                                &fs_info->ordered_roots);
3833
3834                 spin_unlock(&fs_info->ordered_root_lock);
3835                 btrfs_destroy_ordered_extents(root);
3836
3837                 cond_resched();
3838                 spin_lock(&fs_info->ordered_root_lock);
3839         }
3840         spin_unlock(&fs_info->ordered_root_lock);
3841 }
3842
3843 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3844                                       struct btrfs_root *root)
3845 {
3846         struct rb_node *node;
3847         struct btrfs_delayed_ref_root *delayed_refs;
3848         struct btrfs_delayed_ref_node *ref;
3849         int ret = 0;
3850
3851         delayed_refs = &trans->delayed_refs;
3852
3853         spin_lock(&delayed_refs->lock);
3854         if (atomic_read(&delayed_refs->num_entries) == 0) {
3855                 spin_unlock(&delayed_refs->lock);
3856                 btrfs_info(root->fs_info, "delayed_refs has NO entry");
3857                 return ret;
3858         }
3859
3860         while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
3861                 struct btrfs_delayed_ref_head *head;
3862                 bool pin_bytes = false;
3863
3864                 head = rb_entry(node, struct btrfs_delayed_ref_head,
3865                                 href_node);
3866                 if (!mutex_trylock(&head->mutex)) {
3867                         atomic_inc(&head->node.refs);
3868                         spin_unlock(&delayed_refs->lock);
3869
3870                         mutex_lock(&head->mutex);
3871                         mutex_unlock(&head->mutex);
3872                         btrfs_put_delayed_ref(&head->node);
3873                         spin_lock(&delayed_refs->lock);
3874                         continue;
3875                 }
3876                 spin_lock(&head->lock);
3877                 while ((node = rb_first(&head->ref_root)) != NULL) {
3878                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
3879                                        rb_node);
3880                         ref->in_tree = 0;
3881                         rb_erase(&ref->rb_node, &head->ref_root);
3882                         atomic_dec(&delayed_refs->num_entries);
3883                         btrfs_put_delayed_ref(ref);
3884                 }
3885                 if (head->must_insert_reserved)
3886                         pin_bytes = true;
3887                 btrfs_free_delayed_extent_op(head->extent_op);
3888                 delayed_refs->num_heads--;
3889                 if (head->processing == 0)
3890                         delayed_refs->num_heads_ready--;
3891                 atomic_dec(&delayed_refs->num_entries);
3892                 head->node.in_tree = 0;
3893                 rb_erase(&head->href_node, &delayed_refs->href_root);
3894                 spin_unlock(&head->lock);
3895                 spin_unlock(&delayed_refs->lock);
3896                 mutex_unlock(&head->mutex);
3897
3898                 if (pin_bytes)
3899                         btrfs_pin_extent(root, head->node.bytenr,
3900                                          head->node.num_bytes, 1);
3901                 btrfs_put_delayed_ref(&head->node);
3902                 cond_resched();
3903                 spin_lock(&delayed_refs->lock);
3904         }
3905
3906         spin_unlock(&delayed_refs->lock);
3907
3908         return ret;
3909 }
3910
3911 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3912 {
3913         struct btrfs_inode *btrfs_inode;
3914         struct list_head splice;
3915
3916         INIT_LIST_HEAD(&splice);
3917
3918         spin_lock(&root->delalloc_lock);
3919         list_splice_init(&root->delalloc_inodes, &splice);
3920
3921         while (!list_empty(&splice)) {
3922                 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
3923                                                delalloc_inodes);
3924
3925                 list_del_init(&btrfs_inode->delalloc_inodes);
3926                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3927                           &btrfs_inode->runtime_flags);
3928                 spin_unlock(&root->delalloc_lock);
3929
3930                 btrfs_invalidate_inodes(btrfs_inode->root);
3931
3932                 spin_lock(&root->delalloc_lock);
3933         }
3934
3935         spin_unlock(&root->delalloc_lock);
3936 }
3937
3938 static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
3939 {
3940         struct btrfs_root *root;
3941         struct list_head splice;
3942
3943         INIT_LIST_HEAD(&splice);
3944
3945         spin_lock(&fs_info->delalloc_root_lock);
3946         list_splice_init(&fs_info->delalloc_roots, &splice);
3947         while (!list_empty(&splice)) {
3948                 root = list_first_entry(&splice, struct btrfs_root,
3949                                          delalloc_root);
3950                 list_del_init(&root->delalloc_root);
3951                 root = btrfs_grab_fs_root(root);
3952                 BUG_ON(!root);
3953                 spin_unlock(&fs_info->delalloc_root_lock);
3954
3955                 btrfs_destroy_delalloc_inodes(root);
3956                 btrfs_put_fs_root(root);
3957
3958                 spin_lock(&fs_info->delalloc_root_lock);
3959         }
3960         spin_unlock(&fs_info->delalloc_root_lock);
3961 }
3962
3963 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3964                                         struct extent_io_tree *dirty_pages,
3965                                         int mark)
3966 {
3967         int ret;
3968         struct extent_buffer *eb;
3969         u64 start = 0;
3970         u64 end;
3971
3972         while (1) {
3973                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3974                                             mark, NULL);
3975                 if (ret)
3976                         break;
3977
3978                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3979                 while (start <= end) {
3980                         eb = btrfs_find_tree_block(root, start,
3981                                                    root->leafsize);
3982                         start += root->leafsize;
3983                         if (!eb)
3984                                 continue;
3985                         wait_on_extent_buffer_writeback(eb);
3986
3987                         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3988                                                &eb->bflags))
3989                                 clear_extent_buffer_dirty(eb);
3990                         free_extent_buffer_stale(eb);
3991                 }
3992         }
3993
3994         return ret;
3995 }
3996
3997 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3998                                        struct extent_io_tree *pinned_extents)
3999 {
4000         struct extent_io_tree *unpin;
4001         u64 start;
4002         u64 end;
4003         int ret;
4004         bool loop = true;
4005
4006         unpin = pinned_extents;
4007 again:
4008         while (1) {
4009                 ret = find_first_extent_bit(unpin, 0, &start, &end,
4010                                             EXTENT_DIRTY, NULL);
4011                 if (ret)
4012                         break;
4013
4014                 /* opt_discard */
4015                 if (btrfs_test_opt(root, DISCARD))
4016                         ret = btrfs_error_discard_extent(root, start,
4017                                                          end + 1 - start,
4018                                                          NULL);
4019
4020                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4021                 btrfs_error_unpin_extent_range(root, start, end);
4022                 cond_resched();
4023         }
4024
4025         if (loop) {
4026                 if (unpin == &root->fs_info->freed_extents[0])
4027                         unpin = &root->fs_info->freed_extents[1];
4028                 else
4029                         unpin = &root->fs_info->freed_extents[0];
4030                 loop = false;
4031                 goto again;
4032         }
4033
4034         return 0;
4035 }
4036
4037 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4038                                    struct btrfs_root *root)
4039 {
4040         btrfs_destroy_ordered_operations(cur_trans, root);
4041
4042         btrfs_destroy_delayed_refs(cur_trans, root);
4043
4044         cur_trans->state = TRANS_STATE_COMMIT_START;
4045         wake_up(&root->fs_info->transaction_blocked_wait);
4046
4047         cur_trans->state = TRANS_STATE_UNBLOCKED;
4048         wake_up(&root->fs_info->transaction_wait);
4049
4050         btrfs_destroy_delayed_inodes(root);
4051         btrfs_assert_delayed_root_empty(root);
4052
4053         btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
4054                                      EXTENT_DIRTY);
4055         btrfs_destroy_pinned_extent(root,
4056                                     root->fs_info->pinned_extents);
4057
4058         cur_trans->state =TRANS_STATE_COMPLETED;
4059         wake_up(&cur_trans->commit_wait);
4060
4061         /*
4062         memset(cur_trans, 0, sizeof(*cur_trans));
4063         kmem_cache_free(btrfs_transaction_cachep, cur_trans);
4064         */
4065 }
4066
4067 static int btrfs_cleanup_transaction(struct btrfs_root *root)
4068 {
4069         struct btrfs_transaction *t;
4070
4071         mutex_lock(&root->fs_info->transaction_kthread_mutex);
4072
4073         spin_lock(&root->fs_info->trans_lock);
4074         while (!list_empty(&root->fs_info->trans_list)) {
4075                 t = list_first_entry(&root->fs_info->trans_list,
4076                                      struct btrfs_transaction, list);
4077                 if (t->state >= TRANS_STATE_COMMIT_START) {
4078                         atomic_inc(&t->use_count);
4079                         spin_unlock(&root->fs_info->trans_lock);
4080                         btrfs_wait_for_commit(root, t->transid);
4081                         btrfs_put_transaction(t);
4082                         spin_lock(&root->fs_info->trans_lock);
4083                         continue;
4084                 }
4085                 if (t == root->fs_info->running_transaction) {
4086                         t->state = TRANS_STATE_COMMIT_DOING;
4087                         spin_unlock(&root->fs_info->trans_lock);
4088                         /*
4089                          * We wait for 0 num_writers since we don't hold a trans
4090                          * handle open currently for this transaction.
4091                          */
4092                         wait_event(t->writer_wait,
4093                                    atomic_read(&t->num_writers) == 0);
4094                 } else {
4095                         spin_unlock(&root->fs_info->trans_lock);
4096                 }
4097                 btrfs_cleanup_one_transaction(t, root);
4098
4099                 spin_lock(&root->fs_info->trans_lock);
4100                 if (t == root->fs_info->running_transaction)
4101                         root->fs_info->running_transaction = NULL;
4102                 list_del_init(&t->list);
4103                 spin_unlock(&root->fs_info->trans_lock);
4104
4105                 btrfs_put_transaction(t);
4106                 trace_btrfs_transaction_commit(root);
4107                 spin_lock(&root->fs_info->trans_lock);
4108         }
4109         spin_unlock(&root->fs_info->trans_lock);
4110         btrfs_destroy_all_ordered_extents(root->fs_info);
4111         btrfs_destroy_delayed_inodes(root);
4112         btrfs_assert_delayed_root_empty(root);
4113         btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents);
4114         btrfs_destroy_all_delalloc_inodes(root->fs_info);
4115         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
4116
4117         return 0;
4118 }
4119
4120 static struct extent_io_ops btree_extent_io_ops = {
4121         .readpage_end_io_hook = btree_readpage_end_io_hook,
4122         .readpage_io_failed_hook = btree_io_failed_hook,
4123         .submit_bio_hook = btree_submit_bio_hook,
4124         /* note we're sharing with inode.c for the merge bio hook */
4125         .merge_bio_hook = btrfs_merge_bio_hook,
4126 };