]> git.karo-electronics.de Git - mv-sheeva.git/blob - fs/btrfs/disk-io.c
btrfs: separate superblock items out of fs_info
[mv-sheeva.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <asm/unaligned.h>
34 #include "compat.h"
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
42 #include "locking.h"
43 #include "tree-log.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
46
47 static struct extent_io_ops btree_extent_io_ops;
48 static void end_workqueue_fn(struct btrfs_work *work);
49 static void free_fs_root(struct btrfs_root *root);
50 static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
51                                     int read_only);
52 static int btrfs_destroy_ordered_operations(struct btrfs_root *root);
53 static int btrfs_destroy_ordered_extents(struct btrfs_root *root);
54 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
55                                       struct btrfs_root *root);
56 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
57 static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
58 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
59                                         struct extent_io_tree *dirty_pages,
60                                         int mark);
61 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
62                                        struct extent_io_tree *pinned_extents);
63 static int btrfs_cleanup_transaction(struct btrfs_root *root);
64
65 /*
66  * end_io_wq structs are used to do processing in task context when an IO is
67  * complete.  This is used during reads to verify checksums, and it is used
68  * by writes to insert metadata for new file extents after IO is complete.
69  */
70 struct end_io_wq {
71         struct bio *bio;
72         bio_end_io_t *end_io;
73         void *private;
74         struct btrfs_fs_info *info;
75         int error;
76         int metadata;
77         struct list_head list;
78         struct btrfs_work work;
79 };
80
81 /*
82  * async submit bios are used to offload expensive checksumming
83  * onto the worker threads.  They checksum file and metadata bios
84  * just before they are sent down the IO stack.
85  */
86 struct async_submit_bio {
87         struct inode *inode;
88         struct bio *bio;
89         struct list_head list;
90         extent_submit_bio_hook_t *submit_bio_start;
91         extent_submit_bio_hook_t *submit_bio_done;
92         int rw;
93         int mirror_num;
94         unsigned long bio_flags;
95         /*
96          * bio_offset is optional, can be used if the pages in the bio
97          * can't tell us where in the file the bio should go
98          */
99         u64 bio_offset;
100         struct btrfs_work work;
101 };
102
103 /*
104  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
105  * eb, the lockdep key is determined by the btrfs_root it belongs to and
106  * the level the eb occupies in the tree.
107  *
108  * Different roots are used for different purposes and may nest inside each
109  * other and they require separate keysets.  As lockdep keys should be
110  * static, assign keysets according to the purpose of the root as indicated
111  * by btrfs_root->objectid.  This ensures that all special purpose roots
112  * have separate keysets.
113  *
114  * Lock-nesting across peer nodes is always done with the immediate parent
115  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
116  * subclass to avoid triggering lockdep warning in such cases.
117  *
118  * The key is set by the readpage_end_io_hook after the buffer has passed
119  * csum validation but before the pages are unlocked.  It is also set by
120  * btrfs_init_new_buffer on freshly allocated blocks.
121  *
122  * We also add a check to make sure the highest level of the tree is the
123  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
124  * needs update as well.
125  */
126 #ifdef CONFIG_DEBUG_LOCK_ALLOC
127 # if BTRFS_MAX_LEVEL != 8
128 #  error
129 # endif
130
131 static struct btrfs_lockdep_keyset {
132         u64                     id;             /* root objectid */
133         const char              *name_stem;     /* lock name stem */
134         char                    names[BTRFS_MAX_LEVEL + 1][20];
135         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
136 } btrfs_lockdep_keysets[] = {
137         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
138         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
139         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
140         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
141         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
142         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
143         { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
144         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
145         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
146         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
147         { .id = 0,                              .name_stem = "tree"     },
148 };
149
150 void __init btrfs_init_lockdep(void)
151 {
152         int i, j;
153
154         /* initialize lockdep class names */
155         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
156                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
157
158                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
159                         snprintf(ks->names[j], sizeof(ks->names[j]),
160                                  "btrfs-%s-%02d", ks->name_stem, j);
161         }
162 }
163
164 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
165                                     int level)
166 {
167         struct btrfs_lockdep_keyset *ks;
168
169         BUG_ON(level >= ARRAY_SIZE(ks->keys));
170
171         /* find the matching keyset, id 0 is the default entry */
172         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
173                 if (ks->id == objectid)
174                         break;
175
176         lockdep_set_class_and_name(&eb->lock,
177                                    &ks->keys[level], ks->names[level]);
178 }
179
180 #endif
181
182 /*
183  * extents on the btree inode are pretty simple, there's one extent
184  * that covers the entire device
185  */
186 static struct extent_map *btree_get_extent(struct inode *inode,
187                 struct page *page, size_t pg_offset, u64 start, u64 len,
188                 int create)
189 {
190         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
191         struct extent_map *em;
192         int ret;
193
194         read_lock(&em_tree->lock);
195         em = lookup_extent_mapping(em_tree, start, len);
196         if (em) {
197                 em->bdev =
198                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
199                 read_unlock(&em_tree->lock);
200                 goto out;
201         }
202         read_unlock(&em_tree->lock);
203
204         em = alloc_extent_map();
205         if (!em) {
206                 em = ERR_PTR(-ENOMEM);
207                 goto out;
208         }
209         em->start = 0;
210         em->len = (u64)-1;
211         em->block_len = (u64)-1;
212         em->block_start = 0;
213         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
214
215         write_lock(&em_tree->lock);
216         ret = add_extent_mapping(em_tree, em);
217         if (ret == -EEXIST) {
218                 u64 failed_start = em->start;
219                 u64 failed_len = em->len;
220
221                 free_extent_map(em);
222                 em = lookup_extent_mapping(em_tree, start, len);
223                 if (em) {
224                         ret = 0;
225                 } else {
226                         em = lookup_extent_mapping(em_tree, failed_start,
227                                                    failed_len);
228                         ret = -EIO;
229                 }
230         } else if (ret) {
231                 free_extent_map(em);
232                 em = NULL;
233         }
234         write_unlock(&em_tree->lock);
235
236         if (ret)
237                 em = ERR_PTR(ret);
238 out:
239         return em;
240 }
241
242 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
243 {
244         return crc32c(seed, data, len);
245 }
246
247 void btrfs_csum_final(u32 crc, char *result)
248 {
249         put_unaligned_le32(~crc, result);
250 }
251
252 /*
253  * compute the csum for a btree block, and either verify it or write it
254  * into the csum field of the block.
255  */
256 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
257                            int verify)
258 {
259         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
260         char *result = NULL;
261         unsigned long len;
262         unsigned long cur_len;
263         unsigned long offset = BTRFS_CSUM_SIZE;
264         char *kaddr;
265         unsigned long map_start;
266         unsigned long map_len;
267         int err;
268         u32 crc = ~(u32)0;
269         unsigned long inline_result;
270
271         len = buf->len - offset;
272         while (len > 0) {
273                 err = map_private_extent_buffer(buf, offset, 32,
274                                         &kaddr, &map_start, &map_len);
275                 if (err)
276                         return 1;
277                 cur_len = min(len, map_len - (offset - map_start));
278                 crc = btrfs_csum_data(root, kaddr + offset - map_start,
279                                       crc, cur_len);
280                 len -= cur_len;
281                 offset += cur_len;
282         }
283         if (csum_size > sizeof(inline_result)) {
284                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
285                 if (!result)
286                         return 1;
287         } else {
288                 result = (char *)&inline_result;
289         }
290
291         btrfs_csum_final(crc, result);
292
293         if (verify) {
294                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
295                         u32 val;
296                         u32 found = 0;
297                         memcpy(&found, result, csum_size);
298
299                         read_extent_buffer(buf, &val, 0, csum_size);
300                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
301                                        "failed on %llu wanted %X found %X "
302                                        "level %d\n",
303                                        root->fs_info->sb->s_id,
304                                        (unsigned long long)buf->start, val, found,
305                                        btrfs_header_level(buf));
306                         if (result != (char *)&inline_result)
307                                 kfree(result);
308                         return 1;
309                 }
310         } else {
311                 write_extent_buffer(buf, result, 0, csum_size);
312         }
313         if (result != (char *)&inline_result)
314                 kfree(result);
315         return 0;
316 }
317
318 /*
319  * we can't consider a given block up to date unless the transid of the
320  * block matches the transid in the parent node's pointer.  This is how we
321  * detect blocks that either didn't get written at all or got written
322  * in the wrong place.
323  */
324 static int verify_parent_transid(struct extent_io_tree *io_tree,
325                                  struct extent_buffer *eb, u64 parent_transid)
326 {
327         struct extent_state *cached_state = NULL;
328         int ret;
329
330         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
331                 return 0;
332
333         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
334                          0, &cached_state, GFP_NOFS);
335         if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
336             btrfs_header_generation(eb) == parent_transid) {
337                 ret = 0;
338                 goto out;
339         }
340         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
341                        "found %llu\n",
342                        (unsigned long long)eb->start,
343                        (unsigned long long)parent_transid,
344                        (unsigned long long)btrfs_header_generation(eb));
345         ret = 1;
346         clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
347 out:
348         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
349                              &cached_state, GFP_NOFS);
350         return ret;
351 }
352
353 /*
354  * helper to read a given tree block, doing retries as required when
355  * the checksums don't match and we have alternate mirrors to try.
356  */
357 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
358                                           struct extent_buffer *eb,
359                                           u64 start, u64 parent_transid)
360 {
361         struct extent_io_tree *io_tree;
362         int ret;
363         int num_copies = 0;
364         int mirror_num = 0;
365
366         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
367         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
368         while (1) {
369                 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
370                                                btree_get_extent, mirror_num);
371                 if (!ret &&
372                     !verify_parent_transid(io_tree, eb, parent_transid))
373                         return ret;
374
375                 /*
376                  * This buffer's crc is fine, but its contents are corrupted, so
377                  * there is no reason to read the other copies, they won't be
378                  * any less wrong.
379                  */
380                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
381                         return ret;
382
383                 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
384                                               eb->start, eb->len);
385                 if (num_copies == 1)
386                         return ret;
387
388                 mirror_num++;
389                 if (mirror_num > num_copies)
390                         return ret;
391         }
392         return -EIO;
393 }
394
395 /*
396  * checksum a dirty tree block before IO.  This has extra checks to make sure
397  * we only fill in the checksum field in the first page of a multi-page block
398  */
399
400 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
401 {
402         struct extent_io_tree *tree;
403         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
404         u64 found_start;
405         unsigned long len;
406         struct extent_buffer *eb;
407         int ret;
408
409         tree = &BTRFS_I(page->mapping->host)->io_tree;
410
411         if (page->private == EXTENT_PAGE_PRIVATE) {
412                 WARN_ON(1);
413                 goto out;
414         }
415         if (!page->private) {
416                 WARN_ON(1);
417                 goto out;
418         }
419         len = page->private >> 2;
420         WARN_ON(len == 0);
421
422         eb = alloc_extent_buffer(tree, start, len, page);
423         if (eb == NULL) {
424                 WARN_ON(1);
425                 goto out;
426         }
427         ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
428                                              btrfs_header_generation(eb));
429         BUG_ON(ret);
430         WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
431
432         found_start = btrfs_header_bytenr(eb);
433         if (found_start != start) {
434                 WARN_ON(1);
435                 goto err;
436         }
437         if (eb->first_page != page) {
438                 WARN_ON(1);
439                 goto err;
440         }
441         if (!PageUptodate(page)) {
442                 WARN_ON(1);
443                 goto err;
444         }
445         csum_tree_block(root, eb, 0);
446 err:
447         free_extent_buffer(eb);
448 out:
449         return 0;
450 }
451
452 static int check_tree_block_fsid(struct btrfs_root *root,
453                                  struct extent_buffer *eb)
454 {
455         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
456         u8 fsid[BTRFS_UUID_SIZE];
457         int ret = 1;
458
459         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
460                            BTRFS_FSID_SIZE);
461         while (fs_devices) {
462                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
463                         ret = 0;
464                         break;
465                 }
466                 fs_devices = fs_devices->seed;
467         }
468         return ret;
469 }
470
471 #define CORRUPT(reason, eb, root, slot)                         \
472         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
473                "root=%llu, slot=%d\n", reason,                  \
474                (unsigned long long)btrfs_header_bytenr(eb),     \
475                (unsigned long long)root->objectid, slot)
476
477 static noinline int check_leaf(struct btrfs_root *root,
478                                struct extent_buffer *leaf)
479 {
480         struct btrfs_key key;
481         struct btrfs_key leaf_key;
482         u32 nritems = btrfs_header_nritems(leaf);
483         int slot;
484
485         if (nritems == 0)
486                 return 0;
487
488         /* Check the 0 item */
489         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
490             BTRFS_LEAF_DATA_SIZE(root)) {
491                 CORRUPT("invalid item offset size pair", leaf, root, 0);
492                 return -EIO;
493         }
494
495         /*
496          * Check to make sure each items keys are in the correct order and their
497          * offsets make sense.  We only have to loop through nritems-1 because
498          * we check the current slot against the next slot, which verifies the
499          * next slot's offset+size makes sense and that the current's slot
500          * offset is correct.
501          */
502         for (slot = 0; slot < nritems - 1; slot++) {
503                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
504                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
505
506                 /* Make sure the keys are in the right order */
507                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
508                         CORRUPT("bad key order", leaf, root, slot);
509                         return -EIO;
510                 }
511
512                 /*
513                  * Make sure the offset and ends are right, remember that the
514                  * item data starts at the end of the leaf and grows towards the
515                  * front.
516                  */
517                 if (btrfs_item_offset_nr(leaf, slot) !=
518                         btrfs_item_end_nr(leaf, slot + 1)) {
519                         CORRUPT("slot offset bad", leaf, root, slot);
520                         return -EIO;
521                 }
522
523                 /*
524                  * Check to make sure that we don't point outside of the leaf,
525                  * just incase all the items are consistent to eachother, but
526                  * all point outside of the leaf.
527                  */
528                 if (btrfs_item_end_nr(leaf, slot) >
529                     BTRFS_LEAF_DATA_SIZE(root)) {
530                         CORRUPT("slot end outside of leaf", leaf, root, slot);
531                         return -EIO;
532                 }
533         }
534
535         return 0;
536 }
537
538 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
539                                struct extent_state *state)
540 {
541         struct extent_io_tree *tree;
542         u64 found_start;
543         int found_level;
544         unsigned long len;
545         struct extent_buffer *eb;
546         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
547         int ret = 0;
548
549         tree = &BTRFS_I(page->mapping->host)->io_tree;
550         if (page->private == EXTENT_PAGE_PRIVATE)
551                 goto out;
552         if (!page->private)
553                 goto out;
554
555         len = page->private >> 2;
556         WARN_ON(len == 0);
557
558         eb = alloc_extent_buffer(tree, start, len, page);
559         if (eb == NULL) {
560                 ret = -EIO;
561                 goto out;
562         }
563
564         found_start = btrfs_header_bytenr(eb);
565         if (found_start != start) {
566                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
567                                "%llu %llu\n",
568                                (unsigned long long)found_start,
569                                (unsigned long long)eb->start);
570                 ret = -EIO;
571                 goto err;
572         }
573         if (eb->first_page != page) {
574                 printk(KERN_INFO "btrfs bad first page %lu %lu\n",
575                        eb->first_page->index, page->index);
576                 WARN_ON(1);
577                 ret = -EIO;
578                 goto err;
579         }
580         if (check_tree_block_fsid(root, eb)) {
581                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
582                                (unsigned long long)eb->start);
583                 ret = -EIO;
584                 goto err;
585         }
586         found_level = btrfs_header_level(eb);
587
588         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
589                                        eb, found_level);
590
591         ret = csum_tree_block(root, eb, 1);
592         if (ret) {
593                 ret = -EIO;
594                 goto err;
595         }
596
597         /*
598          * If this is a leaf block and it is corrupt, set the corrupt bit so
599          * that we don't try and read the other copies of this block, just
600          * return -EIO.
601          */
602         if (found_level == 0 && check_leaf(root, eb)) {
603                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
604                 ret = -EIO;
605         }
606
607         end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
608         end = eb->start + end - 1;
609 err:
610         free_extent_buffer(eb);
611 out:
612         return ret;
613 }
614
615 static void end_workqueue_bio(struct bio *bio, int err)
616 {
617         struct end_io_wq *end_io_wq = bio->bi_private;
618         struct btrfs_fs_info *fs_info;
619
620         fs_info = end_io_wq->info;
621         end_io_wq->error = err;
622         end_io_wq->work.func = end_workqueue_fn;
623         end_io_wq->work.flags = 0;
624
625         if (bio->bi_rw & REQ_WRITE) {
626                 if (end_io_wq->metadata == 1)
627                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
628                                            &end_io_wq->work);
629                 else if (end_io_wq->metadata == 2)
630                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
631                                            &end_io_wq->work);
632                 else
633                         btrfs_queue_worker(&fs_info->endio_write_workers,
634                                            &end_io_wq->work);
635         } else {
636                 if (end_io_wq->metadata)
637                         btrfs_queue_worker(&fs_info->endio_meta_workers,
638                                            &end_io_wq->work);
639                 else
640                         btrfs_queue_worker(&fs_info->endio_workers,
641                                            &end_io_wq->work);
642         }
643 }
644
645 /*
646  * For the metadata arg you want
647  *
648  * 0 - if data
649  * 1 - if normal metadta
650  * 2 - if writing to the free space cache area
651  */
652 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
653                         int metadata)
654 {
655         struct end_io_wq *end_io_wq;
656         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
657         if (!end_io_wq)
658                 return -ENOMEM;
659
660         end_io_wq->private = bio->bi_private;
661         end_io_wq->end_io = bio->bi_end_io;
662         end_io_wq->info = info;
663         end_io_wq->error = 0;
664         end_io_wq->bio = bio;
665         end_io_wq->metadata = metadata;
666
667         bio->bi_private = end_io_wq;
668         bio->bi_end_io = end_workqueue_bio;
669         return 0;
670 }
671
672 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
673 {
674         unsigned long limit = min_t(unsigned long,
675                                     info->workers.max_workers,
676                                     info->fs_devices->open_devices);
677         return 256 * limit;
678 }
679
680 static void run_one_async_start(struct btrfs_work *work)
681 {
682         struct async_submit_bio *async;
683
684         async = container_of(work, struct  async_submit_bio, work);
685         async->submit_bio_start(async->inode, async->rw, async->bio,
686                                async->mirror_num, async->bio_flags,
687                                async->bio_offset);
688 }
689
690 static void run_one_async_done(struct btrfs_work *work)
691 {
692         struct btrfs_fs_info *fs_info;
693         struct async_submit_bio *async;
694         int limit;
695
696         async = container_of(work, struct  async_submit_bio, work);
697         fs_info = BTRFS_I(async->inode)->root->fs_info;
698
699         limit = btrfs_async_submit_limit(fs_info);
700         limit = limit * 2 / 3;
701
702         atomic_dec(&fs_info->nr_async_submits);
703
704         if (atomic_read(&fs_info->nr_async_submits) < limit &&
705             waitqueue_active(&fs_info->async_submit_wait))
706                 wake_up(&fs_info->async_submit_wait);
707
708         async->submit_bio_done(async->inode, async->rw, async->bio,
709                                async->mirror_num, async->bio_flags,
710                                async->bio_offset);
711 }
712
713 static void run_one_async_free(struct btrfs_work *work)
714 {
715         struct async_submit_bio *async;
716
717         async = container_of(work, struct  async_submit_bio, work);
718         kfree(async);
719 }
720
721 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
722                         int rw, struct bio *bio, int mirror_num,
723                         unsigned long bio_flags,
724                         u64 bio_offset,
725                         extent_submit_bio_hook_t *submit_bio_start,
726                         extent_submit_bio_hook_t *submit_bio_done)
727 {
728         struct async_submit_bio *async;
729
730         async = kmalloc(sizeof(*async), GFP_NOFS);
731         if (!async)
732                 return -ENOMEM;
733
734         async->inode = inode;
735         async->rw = rw;
736         async->bio = bio;
737         async->mirror_num = mirror_num;
738         async->submit_bio_start = submit_bio_start;
739         async->submit_bio_done = submit_bio_done;
740
741         async->work.func = run_one_async_start;
742         async->work.ordered_func = run_one_async_done;
743         async->work.ordered_free = run_one_async_free;
744
745         async->work.flags = 0;
746         async->bio_flags = bio_flags;
747         async->bio_offset = bio_offset;
748
749         atomic_inc(&fs_info->nr_async_submits);
750
751         if (rw & REQ_SYNC)
752                 btrfs_set_work_high_prio(&async->work);
753
754         btrfs_queue_worker(&fs_info->workers, &async->work);
755
756         while (atomic_read(&fs_info->async_submit_draining) &&
757               atomic_read(&fs_info->nr_async_submits)) {
758                 wait_event(fs_info->async_submit_wait,
759                            (atomic_read(&fs_info->nr_async_submits) == 0));
760         }
761
762         return 0;
763 }
764
765 static int btree_csum_one_bio(struct bio *bio)
766 {
767         struct bio_vec *bvec = bio->bi_io_vec;
768         int bio_index = 0;
769         struct btrfs_root *root;
770
771         WARN_ON(bio->bi_vcnt <= 0);
772         while (bio_index < bio->bi_vcnt) {
773                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
774                 csum_dirty_buffer(root, bvec->bv_page);
775                 bio_index++;
776                 bvec++;
777         }
778         return 0;
779 }
780
781 static int __btree_submit_bio_start(struct inode *inode, int rw,
782                                     struct bio *bio, int mirror_num,
783                                     unsigned long bio_flags,
784                                     u64 bio_offset)
785 {
786         /*
787          * when we're called for a write, we're already in the async
788          * submission context.  Just jump into btrfs_map_bio
789          */
790         btree_csum_one_bio(bio);
791         return 0;
792 }
793
794 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
795                                  int mirror_num, unsigned long bio_flags,
796                                  u64 bio_offset)
797 {
798         /*
799          * when we're called for a write, we're already in the async
800          * submission context.  Just jump into btrfs_map_bio
801          */
802         return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
803 }
804
805 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
806                                  int mirror_num, unsigned long bio_flags,
807                                  u64 bio_offset)
808 {
809         int ret;
810
811         ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
812                                           bio, 1);
813         BUG_ON(ret);
814
815         if (!(rw & REQ_WRITE)) {
816                 /*
817                  * called for a read, do the setup so that checksum validation
818                  * can happen in the async kernel threads
819                  */
820                 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
821                                      mirror_num, 0);
822         }
823
824         /*
825          * kthread helpers are used to submit writes so that checksumming
826          * can happen in parallel across all CPUs
827          */
828         return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
829                                    inode, rw, bio, mirror_num, 0,
830                                    bio_offset,
831                                    __btree_submit_bio_start,
832                                    __btree_submit_bio_done);
833 }
834
835 #ifdef CONFIG_MIGRATION
836 static int btree_migratepage(struct address_space *mapping,
837                         struct page *newpage, struct page *page)
838 {
839         /*
840          * we can't safely write a btree page from here,
841          * we haven't done the locking hook
842          */
843         if (PageDirty(page))
844                 return -EAGAIN;
845         /*
846          * Buffers may be managed in a filesystem specific way.
847          * We must have no buffers or drop them.
848          */
849         if (page_has_private(page) &&
850             !try_to_release_page(page, GFP_KERNEL))
851                 return -EAGAIN;
852         return migrate_page(mapping, newpage, page);
853 }
854 #endif
855
856 static int btree_writepage(struct page *page, struct writeback_control *wbc)
857 {
858         struct extent_io_tree *tree;
859         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
860         struct extent_buffer *eb;
861         int was_dirty;
862
863         tree = &BTRFS_I(page->mapping->host)->io_tree;
864         if (!(current->flags & PF_MEMALLOC)) {
865                 return extent_write_full_page(tree, page,
866                                               btree_get_extent, wbc);
867         }
868
869         redirty_page_for_writepage(wbc, page);
870         eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
871         WARN_ON(!eb);
872
873         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
874         if (!was_dirty) {
875                 spin_lock(&root->fs_info->delalloc_lock);
876                 root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
877                 spin_unlock(&root->fs_info->delalloc_lock);
878         }
879         free_extent_buffer(eb);
880
881         unlock_page(page);
882         return 0;
883 }
884
885 static int btree_writepages(struct address_space *mapping,
886                             struct writeback_control *wbc)
887 {
888         struct extent_io_tree *tree;
889         tree = &BTRFS_I(mapping->host)->io_tree;
890         if (wbc->sync_mode == WB_SYNC_NONE) {
891                 struct btrfs_root *root = BTRFS_I(mapping->host)->root;
892                 u64 num_dirty;
893                 unsigned long thresh = 32 * 1024 * 1024;
894
895                 if (wbc->for_kupdate)
896                         return 0;
897
898                 /* this is a bit racy, but that's ok */
899                 num_dirty = root->fs_info->dirty_metadata_bytes;
900                 if (num_dirty < thresh)
901                         return 0;
902         }
903         return extent_writepages(tree, mapping, btree_get_extent, wbc);
904 }
905
906 static int btree_readpage(struct file *file, struct page *page)
907 {
908         struct extent_io_tree *tree;
909         tree = &BTRFS_I(page->mapping->host)->io_tree;
910         return extent_read_full_page(tree, page, btree_get_extent);
911 }
912
913 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
914 {
915         struct extent_io_tree *tree;
916         struct extent_map_tree *map;
917         int ret;
918
919         if (PageWriteback(page) || PageDirty(page))
920                 return 0;
921
922         tree = &BTRFS_I(page->mapping->host)->io_tree;
923         map = &BTRFS_I(page->mapping->host)->extent_tree;
924
925         ret = try_release_extent_state(map, tree, page, gfp_flags);
926         if (!ret)
927                 return 0;
928
929         ret = try_release_extent_buffer(tree, page);
930         if (ret == 1) {
931                 ClearPagePrivate(page);
932                 set_page_private(page, 0);
933                 page_cache_release(page);
934         }
935
936         return ret;
937 }
938
939 static void btree_invalidatepage(struct page *page, unsigned long offset)
940 {
941         struct extent_io_tree *tree;
942         tree = &BTRFS_I(page->mapping->host)->io_tree;
943         extent_invalidatepage(tree, page, offset);
944         btree_releasepage(page, GFP_NOFS);
945         if (PagePrivate(page)) {
946                 printk(KERN_WARNING "btrfs warning page private not zero "
947                        "on page %llu\n", (unsigned long long)page_offset(page));
948                 ClearPagePrivate(page);
949                 set_page_private(page, 0);
950                 page_cache_release(page);
951         }
952 }
953
954 static const struct address_space_operations btree_aops = {
955         .readpage       = btree_readpage,
956         .writepage      = btree_writepage,
957         .writepages     = btree_writepages,
958         .releasepage    = btree_releasepage,
959         .invalidatepage = btree_invalidatepage,
960 #ifdef CONFIG_MIGRATION
961         .migratepage    = btree_migratepage,
962 #endif
963 };
964
965 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
966                          u64 parent_transid)
967 {
968         struct extent_buffer *buf = NULL;
969         struct inode *btree_inode = root->fs_info->btree_inode;
970         int ret = 0;
971
972         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
973         if (!buf)
974                 return 0;
975         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
976                                  buf, 0, 0, btree_get_extent, 0);
977         free_extent_buffer(buf);
978         return ret;
979 }
980
981 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
982                                             u64 bytenr, u32 blocksize)
983 {
984         struct inode *btree_inode = root->fs_info->btree_inode;
985         struct extent_buffer *eb;
986         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
987                                 bytenr, blocksize);
988         return eb;
989 }
990
991 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
992                                                  u64 bytenr, u32 blocksize)
993 {
994         struct inode *btree_inode = root->fs_info->btree_inode;
995         struct extent_buffer *eb;
996
997         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
998                                  bytenr, blocksize, NULL);
999         return eb;
1000 }
1001
1002
1003 int btrfs_write_tree_block(struct extent_buffer *buf)
1004 {
1005         return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
1006                                         buf->start + buf->len - 1);
1007 }
1008
1009 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1010 {
1011         return filemap_fdatawait_range(buf->first_page->mapping,
1012                                        buf->start, buf->start + buf->len - 1);
1013 }
1014
1015 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1016                                       u32 blocksize, u64 parent_transid)
1017 {
1018         struct extent_buffer *buf = NULL;
1019         int ret;
1020
1021         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1022         if (!buf)
1023                 return NULL;
1024
1025         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1026
1027         if (ret == 0)
1028                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
1029         return buf;
1030
1031 }
1032
1033 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1034                      struct extent_buffer *buf)
1035 {
1036         struct inode *btree_inode = root->fs_info->btree_inode;
1037         if (btrfs_header_generation(buf) ==
1038             root->fs_info->running_transaction->transid) {
1039                 btrfs_assert_tree_locked(buf);
1040
1041                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1042                         spin_lock(&root->fs_info->delalloc_lock);
1043                         if (root->fs_info->dirty_metadata_bytes >= buf->len)
1044                                 root->fs_info->dirty_metadata_bytes -= buf->len;
1045                         else
1046                                 WARN_ON(1);
1047                         spin_unlock(&root->fs_info->delalloc_lock);
1048                 }
1049
1050                 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1051                 btrfs_set_lock_blocking(buf);
1052                 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
1053                                           buf);
1054         }
1055         return 0;
1056 }
1057
1058 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1059                         u32 stripesize, struct btrfs_root *root,
1060                         struct btrfs_fs_info *fs_info,
1061                         u64 objectid)
1062 {
1063         root->node = NULL;
1064         root->commit_root = NULL;
1065         root->sectorsize = sectorsize;
1066         root->nodesize = nodesize;
1067         root->leafsize = leafsize;
1068         root->stripesize = stripesize;
1069         root->ref_cows = 0;
1070         root->track_dirty = 0;
1071         root->in_radix = 0;
1072         root->orphan_item_inserted = 0;
1073         root->orphan_cleanup_state = 0;
1074
1075         root->fs_info = fs_info;
1076         root->objectid = objectid;
1077         root->last_trans = 0;
1078         root->highest_objectid = 0;
1079         root->name = NULL;
1080         root->inode_tree = RB_ROOT;
1081         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1082         root->block_rsv = NULL;
1083         root->orphan_block_rsv = NULL;
1084
1085         INIT_LIST_HEAD(&root->dirty_list);
1086         INIT_LIST_HEAD(&root->orphan_list);
1087         INIT_LIST_HEAD(&root->root_list);
1088         spin_lock_init(&root->orphan_lock);
1089         spin_lock_init(&root->inode_lock);
1090         spin_lock_init(&root->accounting_lock);
1091         mutex_init(&root->objectid_mutex);
1092         mutex_init(&root->log_mutex);
1093         init_waitqueue_head(&root->log_writer_wait);
1094         init_waitqueue_head(&root->log_commit_wait[0]);
1095         init_waitqueue_head(&root->log_commit_wait[1]);
1096         atomic_set(&root->log_commit[0], 0);
1097         atomic_set(&root->log_commit[1], 0);
1098         atomic_set(&root->log_writers, 0);
1099         root->log_batch = 0;
1100         root->log_transid = 0;
1101         root->last_log_commit = 0;
1102         extent_io_tree_init(&root->dirty_log_pages,
1103                              fs_info->btree_inode->i_mapping);
1104
1105         memset(&root->root_key, 0, sizeof(root->root_key));
1106         memset(&root->root_item, 0, sizeof(root->root_item));
1107         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1108         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1109         root->defrag_trans_start = fs_info->generation;
1110         init_completion(&root->kobj_unregister);
1111         root->defrag_running = 0;
1112         root->root_key.objectid = objectid;
1113         root->anon_dev = 0;
1114         return 0;
1115 }
1116
1117 static int find_and_setup_root(struct btrfs_root *tree_root,
1118                                struct btrfs_fs_info *fs_info,
1119                                u64 objectid,
1120                                struct btrfs_root *root)
1121 {
1122         int ret;
1123         u32 blocksize;
1124         u64 generation;
1125
1126         __setup_root(tree_root->nodesize, tree_root->leafsize,
1127                      tree_root->sectorsize, tree_root->stripesize,
1128                      root, fs_info, objectid);
1129         ret = btrfs_find_last_root(tree_root, objectid,
1130                                    &root->root_item, &root->root_key);
1131         if (ret > 0)
1132                 return -ENOENT;
1133         BUG_ON(ret);
1134
1135         generation = btrfs_root_generation(&root->root_item);
1136         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1137         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1138                                      blocksize, generation);
1139         if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
1140                 free_extent_buffer(root->node);
1141                 return -EIO;
1142         }
1143         root->commit_root = btrfs_root_node(root);
1144         return 0;
1145 }
1146
1147 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1148                                          struct btrfs_fs_info *fs_info)
1149 {
1150         struct btrfs_root *root;
1151         struct btrfs_root *tree_root = fs_info->tree_root;
1152         struct extent_buffer *leaf;
1153
1154         root = kzalloc(sizeof(*root), GFP_NOFS);
1155         if (!root)
1156                 return ERR_PTR(-ENOMEM);
1157
1158         __setup_root(tree_root->nodesize, tree_root->leafsize,
1159                      tree_root->sectorsize, tree_root->stripesize,
1160                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1161
1162         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1163         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1164         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1165         /*
1166          * log trees do not get reference counted because they go away
1167          * before a real commit is actually done.  They do store pointers
1168          * to file data extents, and those reference counts still get
1169          * updated (along with back refs to the log tree).
1170          */
1171         root->ref_cows = 0;
1172
1173         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1174                                       BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0);
1175         if (IS_ERR(leaf)) {
1176                 kfree(root);
1177                 return ERR_CAST(leaf);
1178         }
1179
1180         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1181         btrfs_set_header_bytenr(leaf, leaf->start);
1182         btrfs_set_header_generation(leaf, trans->transid);
1183         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1184         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1185         root->node = leaf;
1186
1187         write_extent_buffer(root->node, root->fs_info->fsid,
1188                             (unsigned long)btrfs_header_fsid(root->node),
1189                             BTRFS_FSID_SIZE);
1190         btrfs_mark_buffer_dirty(root->node);
1191         btrfs_tree_unlock(root->node);
1192         return root;
1193 }
1194
1195 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1196                              struct btrfs_fs_info *fs_info)
1197 {
1198         struct btrfs_root *log_root;
1199
1200         log_root = alloc_log_tree(trans, fs_info);
1201         if (IS_ERR(log_root))
1202                 return PTR_ERR(log_root);
1203         WARN_ON(fs_info->log_root_tree);
1204         fs_info->log_root_tree = log_root;
1205         return 0;
1206 }
1207
1208 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1209                        struct btrfs_root *root)
1210 {
1211         struct btrfs_root *log_root;
1212         struct btrfs_inode_item *inode_item;
1213
1214         log_root = alloc_log_tree(trans, root->fs_info);
1215         if (IS_ERR(log_root))
1216                 return PTR_ERR(log_root);
1217
1218         log_root->last_trans = trans->transid;
1219         log_root->root_key.offset = root->root_key.objectid;
1220
1221         inode_item = &log_root->root_item.inode;
1222         inode_item->generation = cpu_to_le64(1);
1223         inode_item->size = cpu_to_le64(3);
1224         inode_item->nlink = cpu_to_le32(1);
1225         inode_item->nbytes = cpu_to_le64(root->leafsize);
1226         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1227
1228         btrfs_set_root_node(&log_root->root_item, log_root->node);
1229
1230         WARN_ON(root->log_root);
1231         root->log_root = log_root;
1232         root->log_transid = 0;
1233         root->last_log_commit = 0;
1234         return 0;
1235 }
1236
1237 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1238                                                struct btrfs_key *location)
1239 {
1240         struct btrfs_root *root;
1241         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1242         struct btrfs_path *path;
1243         struct extent_buffer *l;
1244         u64 generation;
1245         u32 blocksize;
1246         int ret = 0;
1247
1248         root = kzalloc(sizeof(*root), GFP_NOFS);
1249         if (!root)
1250                 return ERR_PTR(-ENOMEM);
1251         if (location->offset == (u64)-1) {
1252                 ret = find_and_setup_root(tree_root, fs_info,
1253                                           location->objectid, root);
1254                 if (ret) {
1255                         kfree(root);
1256                         return ERR_PTR(ret);
1257                 }
1258                 goto out;
1259         }
1260
1261         __setup_root(tree_root->nodesize, tree_root->leafsize,
1262                      tree_root->sectorsize, tree_root->stripesize,
1263                      root, fs_info, location->objectid);
1264
1265         path = btrfs_alloc_path();
1266         if (!path) {
1267                 kfree(root);
1268                 return ERR_PTR(-ENOMEM);
1269         }
1270         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1271         if (ret == 0) {
1272                 l = path->nodes[0];
1273                 read_extent_buffer(l, &root->root_item,
1274                                 btrfs_item_ptr_offset(l, path->slots[0]),
1275                                 sizeof(root->root_item));
1276                 memcpy(&root->root_key, location, sizeof(*location));
1277         }
1278         btrfs_free_path(path);
1279         if (ret) {
1280                 kfree(root);
1281                 if (ret > 0)
1282                         ret = -ENOENT;
1283                 return ERR_PTR(ret);
1284         }
1285
1286         generation = btrfs_root_generation(&root->root_item);
1287         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1288         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1289                                      blocksize, generation);
1290         root->commit_root = btrfs_root_node(root);
1291         BUG_ON(!root->node);
1292 out:
1293         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1294                 root->ref_cows = 1;
1295                 btrfs_check_and_init_root_item(&root->root_item);
1296         }
1297
1298         return root;
1299 }
1300
1301 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1302                                               struct btrfs_key *location)
1303 {
1304         struct btrfs_root *root;
1305         int ret;
1306
1307         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1308                 return fs_info->tree_root;
1309         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1310                 return fs_info->extent_root;
1311         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1312                 return fs_info->chunk_root;
1313         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1314                 return fs_info->dev_root;
1315         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1316                 return fs_info->csum_root;
1317 again:
1318         spin_lock(&fs_info->fs_roots_radix_lock);
1319         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1320                                  (unsigned long)location->objectid);
1321         spin_unlock(&fs_info->fs_roots_radix_lock);
1322         if (root)
1323                 return root;
1324
1325         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1326         if (IS_ERR(root))
1327                 return root;
1328
1329         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1330         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1331                                         GFP_NOFS);
1332         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1333                 ret = -ENOMEM;
1334                 goto fail;
1335         }
1336
1337         btrfs_init_free_ino_ctl(root);
1338         mutex_init(&root->fs_commit_mutex);
1339         spin_lock_init(&root->cache_lock);
1340         init_waitqueue_head(&root->cache_wait);
1341
1342         ret = get_anon_bdev(&root->anon_dev);
1343         if (ret)
1344                 goto fail;
1345
1346         if (btrfs_root_refs(&root->root_item) == 0) {
1347                 ret = -ENOENT;
1348                 goto fail;
1349         }
1350
1351         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1352         if (ret < 0)
1353                 goto fail;
1354         if (ret == 0)
1355                 root->orphan_item_inserted = 1;
1356
1357         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1358         if (ret)
1359                 goto fail;
1360
1361         spin_lock(&fs_info->fs_roots_radix_lock);
1362         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1363                                 (unsigned long)root->root_key.objectid,
1364                                 root);
1365         if (ret == 0)
1366                 root->in_radix = 1;
1367
1368         spin_unlock(&fs_info->fs_roots_radix_lock);
1369         radix_tree_preload_end();
1370         if (ret) {
1371                 if (ret == -EEXIST) {
1372                         free_fs_root(root);
1373                         goto again;
1374                 }
1375                 goto fail;
1376         }
1377
1378         ret = btrfs_find_dead_roots(fs_info->tree_root,
1379                                     root->root_key.objectid);
1380         WARN_ON(ret);
1381         return root;
1382 fail:
1383         free_fs_root(root);
1384         return ERR_PTR(ret);
1385 }
1386
1387 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1388 {
1389         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1390         int ret = 0;
1391         struct btrfs_device *device;
1392         struct backing_dev_info *bdi;
1393
1394         rcu_read_lock();
1395         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1396                 if (!device->bdev)
1397                         continue;
1398                 bdi = blk_get_backing_dev_info(device->bdev);
1399                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1400                         ret = 1;
1401                         break;
1402                 }
1403         }
1404         rcu_read_unlock();
1405         return ret;
1406 }
1407
1408 /*
1409  * If this fails, caller must call bdi_destroy() to get rid of the
1410  * bdi again.
1411  */
1412 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1413 {
1414         int err;
1415
1416         bdi->capabilities = BDI_CAP_MAP_COPY;
1417         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1418         if (err)
1419                 return err;
1420
1421         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1422         bdi->congested_fn       = btrfs_congested_fn;
1423         bdi->congested_data     = info;
1424         return 0;
1425 }
1426
1427 static int bio_ready_for_csum(struct bio *bio)
1428 {
1429         u64 length = 0;
1430         u64 buf_len = 0;
1431         u64 start = 0;
1432         struct page *page;
1433         struct extent_io_tree *io_tree = NULL;
1434         struct bio_vec *bvec;
1435         int i;
1436         int ret;
1437
1438         bio_for_each_segment(bvec, bio, i) {
1439                 page = bvec->bv_page;
1440                 if (page->private == EXTENT_PAGE_PRIVATE) {
1441                         length += bvec->bv_len;
1442                         continue;
1443                 }
1444                 if (!page->private) {
1445                         length += bvec->bv_len;
1446                         continue;
1447                 }
1448                 length = bvec->bv_len;
1449                 buf_len = page->private >> 2;
1450                 start = page_offset(page) + bvec->bv_offset;
1451                 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1452         }
1453         /* are we fully contained in this bio? */
1454         if (buf_len <= length)
1455                 return 1;
1456
1457         ret = extent_range_uptodate(io_tree, start + length,
1458                                     start + buf_len - 1);
1459         return ret;
1460 }
1461
1462 /*
1463  * called by the kthread helper functions to finally call the bio end_io
1464  * functions.  This is where read checksum verification actually happens
1465  */
1466 static void end_workqueue_fn(struct btrfs_work *work)
1467 {
1468         struct bio *bio;
1469         struct end_io_wq *end_io_wq;
1470         struct btrfs_fs_info *fs_info;
1471         int error;
1472
1473         end_io_wq = container_of(work, struct end_io_wq, work);
1474         bio = end_io_wq->bio;
1475         fs_info = end_io_wq->info;
1476
1477         /* metadata bio reads are special because the whole tree block must
1478          * be checksummed at once.  This makes sure the entire block is in
1479          * ram and up to date before trying to verify things.  For
1480          * blocksize <= pagesize, it is basically a noop
1481          */
1482         if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
1483             !bio_ready_for_csum(bio)) {
1484                 btrfs_queue_worker(&fs_info->endio_meta_workers,
1485                                    &end_io_wq->work);
1486                 return;
1487         }
1488         error = end_io_wq->error;
1489         bio->bi_private = end_io_wq->private;
1490         bio->bi_end_io = end_io_wq->end_io;
1491         kfree(end_io_wq);
1492         bio_endio(bio, error);
1493 }
1494
1495 static int cleaner_kthread(void *arg)
1496 {
1497         struct btrfs_root *root = arg;
1498
1499         do {
1500                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1501
1502                 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1503                     mutex_trylock(&root->fs_info->cleaner_mutex)) {
1504                         btrfs_run_delayed_iputs(root);
1505                         btrfs_clean_old_snapshots(root);
1506                         mutex_unlock(&root->fs_info->cleaner_mutex);
1507                         btrfs_run_defrag_inodes(root->fs_info);
1508                 }
1509
1510                 if (freezing(current)) {
1511                         refrigerator();
1512                 } else {
1513                         set_current_state(TASK_INTERRUPTIBLE);
1514                         if (!kthread_should_stop())
1515                                 schedule();
1516                         __set_current_state(TASK_RUNNING);
1517                 }
1518         } while (!kthread_should_stop());
1519         return 0;
1520 }
1521
1522 static int transaction_kthread(void *arg)
1523 {
1524         struct btrfs_root *root = arg;
1525         struct btrfs_trans_handle *trans;
1526         struct btrfs_transaction *cur;
1527         u64 transid;
1528         unsigned long now;
1529         unsigned long delay;
1530         int ret;
1531
1532         do {
1533                 delay = HZ * 30;
1534                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1535                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1536
1537                 spin_lock(&root->fs_info->trans_lock);
1538                 cur = root->fs_info->running_transaction;
1539                 if (!cur) {
1540                         spin_unlock(&root->fs_info->trans_lock);
1541                         goto sleep;
1542                 }
1543
1544                 now = get_seconds();
1545                 if (!cur->blocked &&
1546                     (now < cur->start_time || now - cur->start_time < 30)) {
1547                         spin_unlock(&root->fs_info->trans_lock);
1548                         delay = HZ * 5;
1549                         goto sleep;
1550                 }
1551                 transid = cur->transid;
1552                 spin_unlock(&root->fs_info->trans_lock);
1553
1554                 trans = btrfs_join_transaction(root);
1555                 BUG_ON(IS_ERR(trans));
1556                 if (transid == trans->transid) {
1557                         ret = btrfs_commit_transaction(trans, root);
1558                         BUG_ON(ret);
1559                 } else {
1560                         btrfs_end_transaction(trans, root);
1561                 }
1562 sleep:
1563                 wake_up_process(root->fs_info->cleaner_kthread);
1564                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1565
1566                 if (freezing(current)) {
1567                         refrigerator();
1568                 } else {
1569                         set_current_state(TASK_INTERRUPTIBLE);
1570                         if (!kthread_should_stop() &&
1571                             !btrfs_transaction_blocked(root->fs_info))
1572                                 schedule_timeout(delay);
1573                         __set_current_state(TASK_RUNNING);
1574                 }
1575         } while (!kthread_should_stop());
1576         return 0;
1577 }
1578
1579 struct btrfs_root *open_ctree(struct super_block *sb,
1580                               struct btrfs_fs_devices *fs_devices,
1581                               char *options)
1582 {
1583         u32 sectorsize;
1584         u32 nodesize;
1585         u32 leafsize;
1586         u32 blocksize;
1587         u32 stripesize;
1588         u64 generation;
1589         u64 features;
1590         struct btrfs_key location;
1591         struct buffer_head *bh;
1592         struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1593                                                  GFP_NOFS);
1594         struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1595                                                  GFP_NOFS);
1596         struct btrfs_root *tree_root = btrfs_sb(sb);
1597         struct btrfs_fs_info *fs_info = NULL;
1598         struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1599                                                 GFP_NOFS);
1600         struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1601                                               GFP_NOFS);
1602         struct btrfs_root *log_tree_root;
1603
1604         int ret;
1605         int err = -EINVAL;
1606
1607         struct btrfs_super_block *disk_super;
1608
1609         if (!extent_root || !tree_root || !tree_root->fs_info ||
1610             !chunk_root || !dev_root || !csum_root) {
1611                 err = -ENOMEM;
1612                 goto fail;
1613         }
1614         fs_info = tree_root->fs_info;
1615
1616         ret = init_srcu_struct(&fs_info->subvol_srcu);
1617         if (ret) {
1618                 err = ret;
1619                 goto fail;
1620         }
1621
1622         ret = setup_bdi(fs_info, &fs_info->bdi);
1623         if (ret) {
1624                 err = ret;
1625                 goto fail_srcu;
1626         }
1627
1628         fs_info->btree_inode = new_inode(sb);
1629         if (!fs_info->btree_inode) {
1630                 err = -ENOMEM;
1631                 goto fail_bdi;
1632         }
1633
1634         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1635
1636         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1637         INIT_LIST_HEAD(&fs_info->trans_list);
1638         INIT_LIST_HEAD(&fs_info->dead_roots);
1639         INIT_LIST_HEAD(&fs_info->delayed_iputs);
1640         INIT_LIST_HEAD(&fs_info->hashers);
1641         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1642         INIT_LIST_HEAD(&fs_info->ordered_operations);
1643         INIT_LIST_HEAD(&fs_info->caching_block_groups);
1644         spin_lock_init(&fs_info->delalloc_lock);
1645         spin_lock_init(&fs_info->trans_lock);
1646         spin_lock_init(&fs_info->ref_cache_lock);
1647         spin_lock_init(&fs_info->fs_roots_radix_lock);
1648         spin_lock_init(&fs_info->delayed_iput_lock);
1649         spin_lock_init(&fs_info->defrag_inodes_lock);
1650         spin_lock_init(&fs_info->free_chunk_lock);
1651         mutex_init(&fs_info->reloc_mutex);
1652
1653         init_completion(&fs_info->kobj_unregister);
1654         fs_info->tree_root = tree_root;
1655         fs_info->extent_root = extent_root;
1656         fs_info->csum_root = csum_root;
1657         fs_info->chunk_root = chunk_root;
1658         fs_info->dev_root = dev_root;
1659         fs_info->fs_devices = fs_devices;
1660         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1661         INIT_LIST_HEAD(&fs_info->space_info);
1662         btrfs_mapping_init(&fs_info->mapping_tree);
1663         btrfs_init_block_rsv(&fs_info->global_block_rsv);
1664         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
1665         btrfs_init_block_rsv(&fs_info->trans_block_rsv);
1666         btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
1667         btrfs_init_block_rsv(&fs_info->empty_block_rsv);
1668         atomic_set(&fs_info->nr_async_submits, 0);
1669         atomic_set(&fs_info->async_delalloc_pages, 0);
1670         atomic_set(&fs_info->async_submit_draining, 0);
1671         atomic_set(&fs_info->nr_async_bios, 0);
1672         atomic_set(&fs_info->defrag_running, 0);
1673         fs_info->sb = sb;
1674         fs_info->max_inline = 8192 * 1024;
1675         fs_info->metadata_ratio = 0;
1676         fs_info->defrag_inodes = RB_ROOT;
1677         fs_info->trans_no_join = 0;
1678         fs_info->free_chunk_space = 0;
1679
1680         fs_info->thread_pool_size = min_t(unsigned long,
1681                                           num_online_cpus() + 2, 8);
1682
1683         INIT_LIST_HEAD(&fs_info->ordered_extents);
1684         spin_lock_init(&fs_info->ordered_extent_lock);
1685         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
1686                                         GFP_NOFS);
1687         if (!fs_info->delayed_root) {
1688                 err = -ENOMEM;
1689                 goto fail_iput;
1690         }
1691         btrfs_init_delayed_root(fs_info->delayed_root);
1692
1693         mutex_init(&fs_info->scrub_lock);
1694         atomic_set(&fs_info->scrubs_running, 0);
1695         atomic_set(&fs_info->scrub_pause_req, 0);
1696         atomic_set(&fs_info->scrubs_paused, 0);
1697         atomic_set(&fs_info->scrub_cancel_req, 0);
1698         init_waitqueue_head(&fs_info->scrub_pause_wait);
1699         init_rwsem(&fs_info->scrub_super_lock);
1700         fs_info->scrub_workers_refcnt = 0;
1701
1702         sb->s_blocksize = 4096;
1703         sb->s_blocksize_bits = blksize_bits(4096);
1704         sb->s_bdi = &fs_info->bdi;
1705
1706         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1707         fs_info->btree_inode->i_nlink = 1;
1708         /*
1709          * we set the i_size on the btree inode to the max possible int.
1710          * the real end of the address space is determined by all of
1711          * the devices in the system
1712          */
1713         fs_info->btree_inode->i_size = OFFSET_MAX;
1714         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1715         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1716
1717         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
1718         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1719                              fs_info->btree_inode->i_mapping);
1720         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
1721
1722         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1723
1724         BTRFS_I(fs_info->btree_inode)->root = tree_root;
1725         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1726                sizeof(struct btrfs_key));
1727         BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
1728         insert_inode_hash(fs_info->btree_inode);
1729
1730         spin_lock_init(&fs_info->block_group_cache_lock);
1731         fs_info->block_group_cache_tree = RB_ROOT;
1732
1733         extent_io_tree_init(&fs_info->freed_extents[0],
1734                              fs_info->btree_inode->i_mapping);
1735         extent_io_tree_init(&fs_info->freed_extents[1],
1736                              fs_info->btree_inode->i_mapping);
1737         fs_info->pinned_extents = &fs_info->freed_extents[0];
1738         fs_info->do_barriers = 1;
1739
1740
1741         mutex_init(&fs_info->ordered_operations_mutex);
1742         mutex_init(&fs_info->tree_log_mutex);
1743         mutex_init(&fs_info->chunk_mutex);
1744         mutex_init(&fs_info->transaction_kthread_mutex);
1745         mutex_init(&fs_info->cleaner_mutex);
1746         mutex_init(&fs_info->volume_mutex);
1747         init_rwsem(&fs_info->extent_commit_sem);
1748         init_rwsem(&fs_info->cleanup_work_sem);
1749         init_rwsem(&fs_info->subvol_sem);
1750
1751         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1752         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
1753
1754         init_waitqueue_head(&fs_info->transaction_throttle);
1755         init_waitqueue_head(&fs_info->transaction_wait);
1756         init_waitqueue_head(&fs_info->transaction_blocked_wait);
1757         init_waitqueue_head(&fs_info->async_submit_wait);
1758
1759         __setup_root(4096, 4096, 4096, 4096, tree_root,
1760                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
1761
1762         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1763         if (!bh) {
1764                 err = -EINVAL;
1765                 goto fail_alloc;
1766         }
1767
1768         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
1769         memcpy(fs_info->super_for_commit, fs_info->super_copy,
1770                sizeof(*fs_info->super_for_commit));
1771         brelse(bh);
1772
1773         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
1774
1775         disk_super = fs_info->super_copy;
1776         if (!btrfs_super_root(disk_super))
1777                 goto fail_alloc;
1778
1779         /* check FS state, whether FS is broken. */
1780         fs_info->fs_state |= btrfs_super_flags(disk_super);
1781
1782         btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
1783
1784         /*
1785          * In the long term, we'll store the compression type in the super
1786          * block, and it'll be used for per file compression control.
1787          */
1788         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
1789
1790         ret = btrfs_parse_options(tree_root, options);
1791         if (ret) {
1792                 err = ret;
1793                 goto fail_alloc;
1794         }
1795
1796         features = btrfs_super_incompat_flags(disk_super) &
1797                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
1798         if (features) {
1799                 printk(KERN_ERR "BTRFS: couldn't mount because of "
1800                        "unsupported optional features (%Lx).\n",
1801                        (unsigned long long)features);
1802                 err = -EINVAL;
1803                 goto fail_alloc;
1804         }
1805
1806         features = btrfs_super_incompat_flags(disk_super);
1807         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
1808         if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
1809                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
1810         btrfs_set_super_incompat_flags(disk_super, features);
1811
1812         features = btrfs_super_compat_ro_flags(disk_super) &
1813                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
1814         if (!(sb->s_flags & MS_RDONLY) && features) {
1815                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
1816                        "unsupported option features (%Lx).\n",
1817                        (unsigned long long)features);
1818                 err = -EINVAL;
1819                 goto fail_alloc;
1820         }
1821
1822         btrfs_init_workers(&fs_info->generic_worker,
1823                            "genwork", 1, NULL);
1824
1825         btrfs_init_workers(&fs_info->workers, "worker",
1826                            fs_info->thread_pool_size,
1827                            &fs_info->generic_worker);
1828
1829         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1830                            fs_info->thread_pool_size,
1831                            &fs_info->generic_worker);
1832
1833         btrfs_init_workers(&fs_info->submit_workers, "submit",
1834                            min_t(u64, fs_devices->num_devices,
1835                            fs_info->thread_pool_size),
1836                            &fs_info->generic_worker);
1837
1838         btrfs_init_workers(&fs_info->caching_workers, "cache",
1839                            2, &fs_info->generic_worker);
1840
1841         /* a higher idle thresh on the submit workers makes it much more
1842          * likely that bios will be send down in a sane order to the
1843          * devices
1844          */
1845         fs_info->submit_workers.idle_thresh = 64;
1846
1847         fs_info->workers.idle_thresh = 16;
1848         fs_info->workers.ordered = 1;
1849
1850         fs_info->delalloc_workers.idle_thresh = 2;
1851         fs_info->delalloc_workers.ordered = 1;
1852
1853         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
1854                            &fs_info->generic_worker);
1855         btrfs_init_workers(&fs_info->endio_workers, "endio",
1856                            fs_info->thread_pool_size,
1857                            &fs_info->generic_worker);
1858         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1859                            fs_info->thread_pool_size,
1860                            &fs_info->generic_worker);
1861         btrfs_init_workers(&fs_info->endio_meta_write_workers,
1862                            "endio-meta-write", fs_info->thread_pool_size,
1863                            &fs_info->generic_worker);
1864         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1865                            fs_info->thread_pool_size,
1866                            &fs_info->generic_worker);
1867         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
1868                            1, &fs_info->generic_worker);
1869         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
1870                            fs_info->thread_pool_size,
1871                            &fs_info->generic_worker);
1872
1873         /*
1874          * endios are largely parallel and should have a very
1875          * low idle thresh
1876          */
1877         fs_info->endio_workers.idle_thresh = 4;
1878         fs_info->endio_meta_workers.idle_thresh = 4;
1879
1880         fs_info->endio_write_workers.idle_thresh = 2;
1881         fs_info->endio_meta_write_workers.idle_thresh = 2;
1882
1883         btrfs_start_workers(&fs_info->workers, 1);
1884         btrfs_start_workers(&fs_info->generic_worker, 1);
1885         btrfs_start_workers(&fs_info->submit_workers, 1);
1886         btrfs_start_workers(&fs_info->delalloc_workers, 1);
1887         btrfs_start_workers(&fs_info->fixup_workers, 1);
1888         btrfs_start_workers(&fs_info->endio_workers, 1);
1889         btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1890         btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1891         btrfs_start_workers(&fs_info->endio_write_workers, 1);
1892         btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
1893         btrfs_start_workers(&fs_info->delayed_workers, 1);
1894         btrfs_start_workers(&fs_info->caching_workers, 1);
1895
1896         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1897         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
1898                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1899
1900         nodesize = btrfs_super_nodesize(disk_super);
1901         leafsize = btrfs_super_leafsize(disk_super);
1902         sectorsize = btrfs_super_sectorsize(disk_super);
1903         stripesize = btrfs_super_stripesize(disk_super);
1904         tree_root->nodesize = nodesize;
1905         tree_root->leafsize = leafsize;
1906         tree_root->sectorsize = sectorsize;
1907         tree_root->stripesize = stripesize;
1908
1909         sb->s_blocksize = sectorsize;
1910         sb->s_blocksize_bits = blksize_bits(sectorsize);
1911
1912         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1913                     sizeof(disk_super->magic))) {
1914                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1915                 goto fail_sb_buffer;
1916         }
1917
1918         mutex_lock(&fs_info->chunk_mutex);
1919         ret = btrfs_read_sys_array(tree_root);
1920         mutex_unlock(&fs_info->chunk_mutex);
1921         if (ret) {
1922                 printk(KERN_WARNING "btrfs: failed to read the system "
1923                        "array on %s\n", sb->s_id);
1924                 goto fail_sb_buffer;
1925         }
1926
1927         blocksize = btrfs_level_size(tree_root,
1928                                      btrfs_super_chunk_root_level(disk_super));
1929         generation = btrfs_super_chunk_root_generation(disk_super);
1930
1931         __setup_root(nodesize, leafsize, sectorsize, stripesize,
1932                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1933
1934         chunk_root->node = read_tree_block(chunk_root,
1935                                            btrfs_super_chunk_root(disk_super),
1936                                            blocksize, generation);
1937         BUG_ON(!chunk_root->node);
1938         if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
1939                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
1940                        sb->s_id);
1941                 goto fail_chunk_root;
1942         }
1943         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
1944         chunk_root->commit_root = btrfs_root_node(chunk_root);
1945
1946         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1947            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1948            BTRFS_UUID_SIZE);
1949
1950         mutex_lock(&fs_info->chunk_mutex);
1951         ret = btrfs_read_chunk_tree(chunk_root);
1952         mutex_unlock(&fs_info->chunk_mutex);
1953         if (ret) {
1954                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1955                        sb->s_id);
1956                 goto fail_chunk_root;
1957         }
1958
1959         btrfs_close_extra_devices(fs_devices);
1960
1961         blocksize = btrfs_level_size(tree_root,
1962                                      btrfs_super_root_level(disk_super));
1963         generation = btrfs_super_generation(disk_super);
1964
1965         tree_root->node = read_tree_block(tree_root,
1966                                           btrfs_super_root(disk_super),
1967                                           blocksize, generation);
1968         if (!tree_root->node)
1969                 goto fail_chunk_root;
1970         if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
1971                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
1972                        sb->s_id);
1973                 goto fail_tree_root;
1974         }
1975         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
1976         tree_root->commit_root = btrfs_root_node(tree_root);
1977
1978         ret = find_and_setup_root(tree_root, fs_info,
1979                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1980         if (ret)
1981                 goto fail_tree_root;
1982         extent_root->track_dirty = 1;
1983
1984         ret = find_and_setup_root(tree_root, fs_info,
1985                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
1986         if (ret)
1987                 goto fail_extent_root;
1988         dev_root->track_dirty = 1;
1989
1990         ret = find_and_setup_root(tree_root, fs_info,
1991                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
1992         if (ret)
1993                 goto fail_dev_root;
1994
1995         csum_root->track_dirty = 1;
1996
1997         fs_info->generation = generation;
1998         fs_info->last_trans_committed = generation;
1999         fs_info->data_alloc_profile = (u64)-1;
2000         fs_info->metadata_alloc_profile = (u64)-1;
2001         fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
2002
2003         ret = btrfs_init_space_info(fs_info);
2004         if (ret) {
2005                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2006                 goto fail_block_groups;
2007         }
2008
2009         ret = btrfs_read_block_groups(extent_root);
2010         if (ret) {
2011                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2012                 goto fail_block_groups;
2013         }
2014
2015         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2016                                                "btrfs-cleaner");
2017         if (IS_ERR(fs_info->cleaner_kthread))
2018                 goto fail_block_groups;
2019
2020         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2021                                                    tree_root,
2022                                                    "btrfs-transaction");
2023         if (IS_ERR(fs_info->transaction_kthread))
2024                 goto fail_cleaner;
2025
2026         if (!btrfs_test_opt(tree_root, SSD) &&
2027             !btrfs_test_opt(tree_root, NOSSD) &&
2028             !fs_info->fs_devices->rotating) {
2029                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2030                        "mode\n");
2031                 btrfs_set_opt(fs_info->mount_opt, SSD);
2032         }
2033
2034         /* do not make disk changes in broken FS */
2035         if (btrfs_super_log_root(disk_super) != 0 &&
2036             !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
2037                 u64 bytenr = btrfs_super_log_root(disk_super);
2038
2039                 if (fs_devices->rw_devices == 0) {
2040                         printk(KERN_WARNING "Btrfs log replay required "
2041                                "on RO media\n");
2042                         err = -EIO;
2043                         goto fail_trans_kthread;
2044                 }
2045                 blocksize =
2046                      btrfs_level_size(tree_root,
2047                                       btrfs_super_log_root_level(disk_super));
2048
2049                 log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
2050                 if (!log_tree_root) {
2051                         err = -ENOMEM;
2052                         goto fail_trans_kthread;
2053                 }
2054
2055                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2056                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2057
2058                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2059                                                       blocksize,
2060                                                       generation + 1);
2061                 ret = btrfs_recover_log_trees(log_tree_root);
2062                 BUG_ON(ret);
2063
2064                 if (sb->s_flags & MS_RDONLY) {
2065                         ret =  btrfs_commit_super(tree_root);
2066                         BUG_ON(ret);
2067                 }
2068         }
2069
2070         ret = btrfs_find_orphan_roots(tree_root);
2071         BUG_ON(ret);
2072
2073         if (!(sb->s_flags & MS_RDONLY)) {
2074                 ret = btrfs_cleanup_fs_roots(fs_info);
2075                 BUG_ON(ret);
2076
2077                 ret = btrfs_recover_relocation(tree_root);
2078                 if (ret < 0) {
2079                         printk(KERN_WARNING
2080                                "btrfs: failed to recover relocation\n");
2081                         err = -EINVAL;
2082                         goto fail_trans_kthread;
2083                 }
2084         }
2085
2086         location.objectid = BTRFS_FS_TREE_OBJECTID;
2087         location.type = BTRFS_ROOT_ITEM_KEY;
2088         location.offset = (u64)-1;
2089
2090         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2091         if (!fs_info->fs_root)
2092                 goto fail_trans_kthread;
2093         if (IS_ERR(fs_info->fs_root)) {
2094                 err = PTR_ERR(fs_info->fs_root);
2095                 goto fail_trans_kthread;
2096         }
2097
2098         if (!(sb->s_flags & MS_RDONLY)) {
2099                 down_read(&fs_info->cleanup_work_sem);
2100                 err = btrfs_orphan_cleanup(fs_info->fs_root);
2101                 if (!err)
2102                         err = btrfs_orphan_cleanup(fs_info->tree_root);
2103                 up_read(&fs_info->cleanup_work_sem);
2104                 if (err) {
2105                         close_ctree(tree_root);
2106                         return ERR_PTR(err);
2107                 }
2108         }
2109
2110         return tree_root;
2111
2112 fail_trans_kthread:
2113         kthread_stop(fs_info->transaction_kthread);
2114 fail_cleaner:
2115         kthread_stop(fs_info->cleaner_kthread);
2116
2117         /*
2118          * make sure we're done with the btree inode before we stop our
2119          * kthreads
2120          */
2121         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2122         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2123
2124 fail_block_groups:
2125         btrfs_free_block_groups(fs_info);
2126         free_extent_buffer(csum_root->node);
2127         free_extent_buffer(csum_root->commit_root);
2128 fail_dev_root:
2129         free_extent_buffer(dev_root->node);
2130         free_extent_buffer(dev_root->commit_root);
2131 fail_extent_root:
2132         free_extent_buffer(extent_root->node);
2133         free_extent_buffer(extent_root->commit_root);
2134 fail_tree_root:
2135         free_extent_buffer(tree_root->node);
2136         free_extent_buffer(tree_root->commit_root);
2137 fail_chunk_root:
2138         free_extent_buffer(chunk_root->node);
2139         free_extent_buffer(chunk_root->commit_root);
2140 fail_sb_buffer:
2141         btrfs_stop_workers(&fs_info->generic_worker);
2142         btrfs_stop_workers(&fs_info->fixup_workers);
2143         btrfs_stop_workers(&fs_info->delalloc_workers);
2144         btrfs_stop_workers(&fs_info->workers);
2145         btrfs_stop_workers(&fs_info->endio_workers);
2146         btrfs_stop_workers(&fs_info->endio_meta_workers);
2147         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2148         btrfs_stop_workers(&fs_info->endio_write_workers);
2149         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2150         btrfs_stop_workers(&fs_info->submit_workers);
2151         btrfs_stop_workers(&fs_info->delayed_workers);
2152         btrfs_stop_workers(&fs_info->caching_workers);
2153 fail_alloc:
2154 fail_iput:
2155         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2156         iput(fs_info->btree_inode);
2157
2158         btrfs_close_devices(fs_info->fs_devices);
2159         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2160 fail_bdi:
2161         bdi_destroy(&fs_info->bdi);
2162 fail_srcu:
2163         cleanup_srcu_struct(&fs_info->subvol_srcu);
2164 fail:
2165         free_fs_info(fs_info);
2166         return ERR_PTR(err);
2167 }
2168
2169 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2170 {
2171         char b[BDEVNAME_SIZE];
2172
2173         if (uptodate) {
2174                 set_buffer_uptodate(bh);
2175         } else {
2176                 printk_ratelimited(KERN_WARNING "lost page write due to "
2177                                         "I/O error on %s\n",
2178                                        bdevname(bh->b_bdev, b));
2179                 /* note, we dont' set_buffer_write_io_error because we have
2180                  * our own ways of dealing with the IO errors
2181                  */
2182                 clear_buffer_uptodate(bh);
2183         }
2184         unlock_buffer(bh);
2185         put_bh(bh);
2186 }
2187
2188 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2189 {
2190         struct buffer_head *bh;
2191         struct buffer_head *latest = NULL;
2192         struct btrfs_super_block *super;
2193         int i;
2194         u64 transid = 0;
2195         u64 bytenr;
2196
2197         /* we would like to check all the supers, but that would make
2198          * a btrfs mount succeed after a mkfs from a different FS.
2199          * So, we need to add a special mount option to scan for
2200          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2201          */
2202         for (i = 0; i < 1; i++) {
2203                 bytenr = btrfs_sb_offset(i);
2204                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2205                         break;
2206                 bh = __bread(bdev, bytenr / 4096, 4096);
2207                 if (!bh)
2208                         continue;
2209
2210                 super = (struct btrfs_super_block *)bh->b_data;
2211                 if (btrfs_super_bytenr(super) != bytenr ||
2212                     strncmp((char *)(&super->magic), BTRFS_MAGIC,
2213                             sizeof(super->magic))) {
2214                         brelse(bh);
2215                         continue;
2216                 }
2217
2218                 if (!latest || btrfs_super_generation(super) > transid) {
2219                         brelse(latest);
2220                         latest = bh;
2221                         transid = btrfs_super_generation(super);
2222                 } else {
2223                         brelse(bh);
2224                 }
2225         }
2226         return latest;
2227 }
2228
2229 /*
2230  * this should be called twice, once with wait == 0 and
2231  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2232  * we write are pinned.
2233  *
2234  * They are released when wait == 1 is done.
2235  * max_mirrors must be the same for both runs, and it indicates how
2236  * many supers on this one device should be written.
2237  *
2238  * max_mirrors == 0 means to write them all.
2239  */
2240 static int write_dev_supers(struct btrfs_device *device,
2241                             struct btrfs_super_block *sb,
2242                             int do_barriers, int wait, int max_mirrors)
2243 {
2244         struct buffer_head *bh;
2245         int i;
2246         int ret;
2247         int errors = 0;
2248         u32 crc;
2249         u64 bytenr;
2250         int last_barrier = 0;
2251
2252         if (max_mirrors == 0)
2253                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2254
2255         /* make sure only the last submit_bh does a barrier */
2256         if (do_barriers) {
2257                 for (i = 0; i < max_mirrors; i++) {
2258                         bytenr = btrfs_sb_offset(i);
2259                         if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2260                             device->total_bytes)
2261                                 break;
2262                         last_barrier = i;
2263                 }
2264         }
2265
2266         for (i = 0; i < max_mirrors; i++) {
2267                 bytenr = btrfs_sb_offset(i);
2268                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2269                         break;
2270
2271                 if (wait) {
2272                         bh = __find_get_block(device->bdev, bytenr / 4096,
2273                                               BTRFS_SUPER_INFO_SIZE);
2274                         BUG_ON(!bh);
2275                         wait_on_buffer(bh);
2276                         if (!buffer_uptodate(bh))
2277                                 errors++;
2278
2279                         /* drop our reference */
2280                         brelse(bh);
2281
2282                         /* drop the reference from the wait == 0 run */
2283                         brelse(bh);
2284                         continue;
2285                 } else {
2286                         btrfs_set_super_bytenr(sb, bytenr);
2287
2288                         crc = ~(u32)0;
2289                         crc = btrfs_csum_data(NULL, (char *)sb +
2290                                               BTRFS_CSUM_SIZE, crc,
2291                                               BTRFS_SUPER_INFO_SIZE -
2292                                               BTRFS_CSUM_SIZE);
2293                         btrfs_csum_final(crc, sb->csum);
2294
2295                         /*
2296                          * one reference for us, and we leave it for the
2297                          * caller
2298                          */
2299                         bh = __getblk(device->bdev, bytenr / 4096,
2300                                       BTRFS_SUPER_INFO_SIZE);
2301                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2302
2303                         /* one reference for submit_bh */
2304                         get_bh(bh);
2305
2306                         set_buffer_uptodate(bh);
2307                         lock_buffer(bh);
2308                         bh->b_end_io = btrfs_end_buffer_write_sync;
2309                 }
2310
2311                 if (i == last_barrier && do_barriers)
2312                         ret = submit_bh(WRITE_FLUSH_FUA, bh);
2313                 else
2314                         ret = submit_bh(WRITE_SYNC, bh);
2315
2316                 if (ret)
2317                         errors++;
2318         }
2319         return errors < i ? 0 : -1;
2320 }
2321
2322 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2323 {
2324         struct list_head *head;
2325         struct btrfs_device *dev;
2326         struct btrfs_super_block *sb;
2327         struct btrfs_dev_item *dev_item;
2328         int ret;
2329         int do_barriers;
2330         int max_errors;
2331         int total_errors = 0;
2332         u64 flags;
2333
2334         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
2335         do_barriers = !btrfs_test_opt(root, NOBARRIER);
2336
2337         sb = root->fs_info->super_for_commit;
2338         dev_item = &sb->dev_item;
2339
2340         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2341         head = &root->fs_info->fs_devices->devices;
2342         list_for_each_entry_rcu(dev, head, dev_list) {
2343                 if (!dev->bdev) {
2344                         total_errors++;
2345                         continue;
2346                 }
2347                 if (!dev->in_fs_metadata || !dev->writeable)
2348                         continue;
2349
2350                 btrfs_set_stack_device_generation(dev_item, 0);
2351                 btrfs_set_stack_device_type(dev_item, dev->type);
2352                 btrfs_set_stack_device_id(dev_item, dev->devid);
2353                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2354                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2355                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2356                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2357                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2358                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2359                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2360
2361                 flags = btrfs_super_flags(sb);
2362                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2363
2364                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2365                 if (ret)
2366                         total_errors++;
2367         }
2368         if (total_errors > max_errors) {
2369                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2370                        total_errors);
2371                 BUG();
2372         }
2373
2374         total_errors = 0;
2375         list_for_each_entry_rcu(dev, head, dev_list) {
2376                 if (!dev->bdev)
2377                         continue;
2378                 if (!dev->in_fs_metadata || !dev->writeable)
2379                         continue;
2380
2381                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2382                 if (ret)
2383                         total_errors++;
2384         }
2385         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2386         if (total_errors > max_errors) {
2387                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2388                        total_errors);
2389                 BUG();
2390         }
2391         return 0;
2392 }
2393
2394 int write_ctree_super(struct btrfs_trans_handle *trans,
2395                       struct btrfs_root *root, int max_mirrors)
2396 {
2397         int ret;
2398
2399         ret = write_all_supers(root, max_mirrors);
2400         return ret;
2401 }
2402
2403 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2404 {
2405         spin_lock(&fs_info->fs_roots_radix_lock);
2406         radix_tree_delete(&fs_info->fs_roots_radix,
2407                           (unsigned long)root->root_key.objectid);
2408         spin_unlock(&fs_info->fs_roots_radix_lock);
2409
2410         if (btrfs_root_refs(&root->root_item) == 0)
2411                 synchronize_srcu(&fs_info->subvol_srcu);
2412
2413         __btrfs_remove_free_space_cache(root->free_ino_pinned);
2414         __btrfs_remove_free_space_cache(root->free_ino_ctl);
2415         free_fs_root(root);
2416         return 0;
2417 }
2418
2419 static void free_fs_root(struct btrfs_root *root)
2420 {
2421         iput(root->cache_inode);
2422         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2423         if (root->anon_dev)
2424                 free_anon_bdev(root->anon_dev);
2425         free_extent_buffer(root->node);
2426         free_extent_buffer(root->commit_root);
2427         kfree(root->free_ino_ctl);
2428         kfree(root->free_ino_pinned);
2429         kfree(root->name);
2430         kfree(root);
2431 }
2432
2433 static int del_fs_roots(struct btrfs_fs_info *fs_info)
2434 {
2435         int ret;
2436         struct btrfs_root *gang[8];
2437         int i;
2438
2439         while (!list_empty(&fs_info->dead_roots)) {
2440                 gang[0] = list_entry(fs_info->dead_roots.next,
2441                                      struct btrfs_root, root_list);
2442                 list_del(&gang[0]->root_list);
2443
2444                 if (gang[0]->in_radix) {
2445                         btrfs_free_fs_root(fs_info, gang[0]);
2446                 } else {
2447                         free_extent_buffer(gang[0]->node);
2448                         free_extent_buffer(gang[0]->commit_root);
2449                         kfree(gang[0]);
2450                 }
2451         }
2452
2453         while (1) {
2454                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2455                                              (void **)gang, 0,
2456                                              ARRAY_SIZE(gang));
2457                 if (!ret)
2458                         break;
2459                 for (i = 0; i < ret; i++)
2460                         btrfs_free_fs_root(fs_info, gang[i]);
2461         }
2462         return 0;
2463 }
2464
2465 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2466 {
2467         u64 root_objectid = 0;
2468         struct btrfs_root *gang[8];
2469         int i;
2470         int ret;
2471
2472         while (1) {
2473                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2474                                              (void **)gang, root_objectid,
2475                                              ARRAY_SIZE(gang));
2476                 if (!ret)
2477                         break;
2478
2479                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
2480                 for (i = 0; i < ret; i++) {
2481                         int err;
2482
2483                         root_objectid = gang[i]->root_key.objectid;
2484                         err = btrfs_orphan_cleanup(gang[i]);
2485                         if (err)
2486                                 return err;
2487                 }
2488                 root_objectid++;
2489         }
2490         return 0;
2491 }
2492
2493 int btrfs_commit_super(struct btrfs_root *root)
2494 {
2495         struct btrfs_trans_handle *trans;
2496         int ret;
2497
2498         mutex_lock(&root->fs_info->cleaner_mutex);
2499         btrfs_run_delayed_iputs(root);
2500         btrfs_clean_old_snapshots(root);
2501         mutex_unlock(&root->fs_info->cleaner_mutex);
2502
2503         /* wait until ongoing cleanup work done */
2504         down_write(&root->fs_info->cleanup_work_sem);
2505         up_write(&root->fs_info->cleanup_work_sem);
2506
2507         trans = btrfs_join_transaction(root);
2508         if (IS_ERR(trans))
2509                 return PTR_ERR(trans);
2510         ret = btrfs_commit_transaction(trans, root);
2511         BUG_ON(ret);
2512         /* run commit again to drop the original snapshot */
2513         trans = btrfs_join_transaction(root);
2514         if (IS_ERR(trans))
2515                 return PTR_ERR(trans);
2516         btrfs_commit_transaction(trans, root);
2517         ret = btrfs_write_and_wait_transaction(NULL, root);
2518         BUG_ON(ret);
2519
2520         ret = write_ctree_super(NULL, root, 0);
2521         return ret;
2522 }
2523
2524 int close_ctree(struct btrfs_root *root)
2525 {
2526         struct btrfs_fs_info *fs_info = root->fs_info;
2527         int ret;
2528
2529         fs_info->closing = 1;
2530         smp_mb();
2531
2532         btrfs_scrub_cancel(root);
2533
2534         /* wait for any defraggers to finish */
2535         wait_event(fs_info->transaction_wait,
2536                    (atomic_read(&fs_info->defrag_running) == 0));
2537
2538         /* clear out the rbtree of defraggable inodes */
2539         btrfs_run_defrag_inodes(root->fs_info);
2540
2541         /*
2542          * Here come 2 situations when btrfs is broken to flip readonly:
2543          *
2544          * 1. when btrfs flips readonly somewhere else before
2545          * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
2546          * and btrfs will skip to write sb directly to keep
2547          * ERROR state on disk.
2548          *
2549          * 2. when btrfs flips readonly just in btrfs_commit_super,
2550          * and in such case, btrfs cannot write sb via btrfs_commit_super,
2551          * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
2552          * btrfs will cleanup all FS resources first and write sb then.
2553          */
2554         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2555                 ret = btrfs_commit_super(root);
2556                 if (ret)
2557                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2558         }
2559
2560         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
2561                 ret = btrfs_error_commit_super(root);
2562                 if (ret)
2563                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2564         }
2565
2566         btrfs_put_block_group_cache(fs_info);
2567
2568         kthread_stop(root->fs_info->transaction_kthread);
2569         kthread_stop(root->fs_info->cleaner_kthread);
2570
2571         fs_info->closing = 2;
2572         smp_mb();
2573
2574         if (fs_info->delalloc_bytes) {
2575                 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2576                        (unsigned long long)fs_info->delalloc_bytes);
2577         }
2578         if (fs_info->total_ref_cache_size) {
2579                 printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2580                        (unsigned long long)fs_info->total_ref_cache_size);
2581         }
2582
2583         free_extent_buffer(fs_info->extent_root->node);
2584         free_extent_buffer(fs_info->extent_root->commit_root);
2585         free_extent_buffer(fs_info->tree_root->node);
2586         free_extent_buffer(fs_info->tree_root->commit_root);
2587         free_extent_buffer(root->fs_info->chunk_root->node);
2588         free_extent_buffer(root->fs_info->chunk_root->commit_root);
2589         free_extent_buffer(root->fs_info->dev_root->node);
2590         free_extent_buffer(root->fs_info->dev_root->commit_root);
2591         free_extent_buffer(root->fs_info->csum_root->node);
2592         free_extent_buffer(root->fs_info->csum_root->commit_root);
2593
2594         btrfs_free_block_groups(root->fs_info);
2595
2596         del_fs_roots(fs_info);
2597
2598         iput(fs_info->btree_inode);
2599
2600         btrfs_stop_workers(&fs_info->generic_worker);
2601         btrfs_stop_workers(&fs_info->fixup_workers);
2602         btrfs_stop_workers(&fs_info->delalloc_workers);
2603         btrfs_stop_workers(&fs_info->workers);
2604         btrfs_stop_workers(&fs_info->endio_workers);
2605         btrfs_stop_workers(&fs_info->endio_meta_workers);
2606         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2607         btrfs_stop_workers(&fs_info->endio_write_workers);
2608         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2609         btrfs_stop_workers(&fs_info->submit_workers);
2610         btrfs_stop_workers(&fs_info->delayed_workers);
2611         btrfs_stop_workers(&fs_info->caching_workers);
2612
2613         btrfs_close_devices(fs_info->fs_devices);
2614         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2615
2616         bdi_destroy(&fs_info->bdi);
2617         cleanup_srcu_struct(&fs_info->subvol_srcu);
2618
2619         free_fs_info(fs_info);
2620
2621         return 0;
2622 }
2623
2624 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2625 {
2626         int ret;
2627         struct inode *btree_inode = buf->first_page->mapping->host;
2628
2629         ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
2630                                      NULL);
2631         if (!ret)
2632                 return ret;
2633
2634         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
2635                                     parent_transid);
2636         return !ret;
2637 }
2638
2639 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
2640 {
2641         struct inode *btree_inode = buf->first_page->mapping->host;
2642         return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2643                                           buf);
2644 }
2645
2646 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2647 {
2648         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2649         u64 transid = btrfs_header_generation(buf);
2650         struct inode *btree_inode = root->fs_info->btree_inode;
2651         int was_dirty;
2652
2653         btrfs_assert_tree_locked(buf);
2654         if (transid != root->fs_info->generation) {
2655                 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2656                        "found %llu running %llu\n",
2657                         (unsigned long long)buf->start,
2658                         (unsigned long long)transid,
2659                         (unsigned long long)root->fs_info->generation);
2660                 WARN_ON(1);
2661         }
2662         was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
2663                                             buf);
2664         if (!was_dirty) {
2665                 spin_lock(&root->fs_info->delalloc_lock);
2666                 root->fs_info->dirty_metadata_bytes += buf->len;
2667                 spin_unlock(&root->fs_info->delalloc_lock);
2668         }
2669 }
2670
2671 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2672 {
2673         /*
2674          * looks as though older kernels can get into trouble with
2675          * this code, they end up stuck in balance_dirty_pages forever
2676          */
2677         u64 num_dirty;
2678         unsigned long thresh = 32 * 1024 * 1024;
2679
2680         if (current->flags & PF_MEMALLOC)
2681                 return;
2682
2683         btrfs_balance_delayed_items(root);
2684
2685         num_dirty = root->fs_info->dirty_metadata_bytes;
2686
2687         if (num_dirty > thresh) {
2688                 balance_dirty_pages_ratelimited_nr(
2689                                    root->fs_info->btree_inode->i_mapping, 1);
2690         }
2691         return;
2692 }
2693
2694 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2695 {
2696         /*
2697          * looks as though older kernels can get into trouble with
2698          * this code, they end up stuck in balance_dirty_pages forever
2699          */
2700         u64 num_dirty;
2701         unsigned long thresh = 32 * 1024 * 1024;
2702
2703         if (current->flags & PF_MEMALLOC)
2704                 return;
2705
2706         num_dirty = root->fs_info->dirty_metadata_bytes;
2707
2708         if (num_dirty > thresh) {
2709                 balance_dirty_pages_ratelimited_nr(
2710                                    root->fs_info->btree_inode->i_mapping, 1);
2711         }
2712         return;
2713 }
2714
2715 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2716 {
2717         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2718         int ret;
2719         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2720         if (ret == 0)
2721                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2722         return ret;
2723 }
2724
2725 static int btree_lock_page_hook(struct page *page, void *data,
2726                                 void (*flush_fn)(void *))
2727 {
2728         struct inode *inode = page->mapping->host;
2729         struct btrfs_root *root = BTRFS_I(inode)->root;
2730         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2731         struct extent_buffer *eb;
2732         unsigned long len;
2733         u64 bytenr = page_offset(page);
2734
2735         if (page->private == EXTENT_PAGE_PRIVATE)
2736                 goto out;
2737
2738         len = page->private >> 2;
2739         eb = find_extent_buffer(io_tree, bytenr, len);
2740         if (!eb)
2741                 goto out;
2742
2743         if (!btrfs_try_tree_write_lock(eb)) {
2744                 flush_fn(data);
2745                 btrfs_tree_lock(eb);
2746         }
2747         btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2748
2749         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
2750                 spin_lock(&root->fs_info->delalloc_lock);
2751                 if (root->fs_info->dirty_metadata_bytes >= eb->len)
2752                         root->fs_info->dirty_metadata_bytes -= eb->len;
2753                 else
2754                         WARN_ON(1);
2755                 spin_unlock(&root->fs_info->delalloc_lock);
2756         }
2757
2758         btrfs_tree_unlock(eb);
2759         free_extent_buffer(eb);
2760 out:
2761         if (!trylock_page(page)) {
2762                 flush_fn(data);
2763                 lock_page(page);
2764         }
2765         return 0;
2766 }
2767
2768 static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
2769                               int read_only)
2770 {
2771         if (read_only)
2772                 return;
2773
2774         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2775                 printk(KERN_WARNING "warning: mount fs with errors, "
2776                        "running btrfsck is recommended\n");
2777 }
2778
2779 int btrfs_error_commit_super(struct btrfs_root *root)
2780 {
2781         int ret;
2782
2783         mutex_lock(&root->fs_info->cleaner_mutex);
2784         btrfs_run_delayed_iputs(root);
2785         mutex_unlock(&root->fs_info->cleaner_mutex);
2786
2787         down_write(&root->fs_info->cleanup_work_sem);
2788         up_write(&root->fs_info->cleanup_work_sem);
2789
2790         /* cleanup FS via transaction */
2791         btrfs_cleanup_transaction(root);
2792
2793         ret = write_ctree_super(NULL, root, 0);
2794
2795         return ret;
2796 }
2797
2798 static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
2799 {
2800         struct btrfs_inode *btrfs_inode;
2801         struct list_head splice;
2802
2803         INIT_LIST_HEAD(&splice);
2804
2805         mutex_lock(&root->fs_info->ordered_operations_mutex);
2806         spin_lock(&root->fs_info->ordered_extent_lock);
2807
2808         list_splice_init(&root->fs_info->ordered_operations, &splice);
2809         while (!list_empty(&splice)) {
2810                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
2811                                          ordered_operations);
2812
2813                 list_del_init(&btrfs_inode->ordered_operations);
2814
2815                 btrfs_invalidate_inodes(btrfs_inode->root);
2816         }
2817
2818         spin_unlock(&root->fs_info->ordered_extent_lock);
2819         mutex_unlock(&root->fs_info->ordered_operations_mutex);
2820
2821         return 0;
2822 }
2823
2824 static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
2825 {
2826         struct list_head splice;
2827         struct btrfs_ordered_extent *ordered;
2828         struct inode *inode;
2829
2830         INIT_LIST_HEAD(&splice);
2831
2832         spin_lock(&root->fs_info->ordered_extent_lock);
2833
2834         list_splice_init(&root->fs_info->ordered_extents, &splice);
2835         while (!list_empty(&splice)) {
2836                 ordered = list_entry(splice.next, struct btrfs_ordered_extent,
2837                                      root_extent_list);
2838
2839                 list_del_init(&ordered->root_extent_list);
2840                 atomic_inc(&ordered->refs);
2841
2842                 /* the inode may be getting freed (in sys_unlink path). */
2843                 inode = igrab(ordered->inode);
2844
2845                 spin_unlock(&root->fs_info->ordered_extent_lock);
2846                 if (inode)
2847                         iput(inode);
2848
2849                 atomic_set(&ordered->refs, 1);
2850                 btrfs_put_ordered_extent(ordered);
2851
2852                 spin_lock(&root->fs_info->ordered_extent_lock);
2853         }
2854
2855         spin_unlock(&root->fs_info->ordered_extent_lock);
2856
2857         return 0;
2858 }
2859
2860 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
2861                                       struct btrfs_root *root)
2862 {
2863         struct rb_node *node;
2864         struct btrfs_delayed_ref_root *delayed_refs;
2865         struct btrfs_delayed_ref_node *ref;
2866         int ret = 0;
2867
2868         delayed_refs = &trans->delayed_refs;
2869
2870         spin_lock(&delayed_refs->lock);
2871         if (delayed_refs->num_entries == 0) {
2872                 spin_unlock(&delayed_refs->lock);
2873                 printk(KERN_INFO "delayed_refs has NO entry\n");
2874                 return ret;
2875         }
2876
2877         node = rb_first(&delayed_refs->root);
2878         while (node) {
2879                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2880                 node = rb_next(node);
2881
2882                 ref->in_tree = 0;
2883                 rb_erase(&ref->rb_node, &delayed_refs->root);
2884                 delayed_refs->num_entries--;
2885
2886                 atomic_set(&ref->refs, 1);
2887                 if (btrfs_delayed_ref_is_head(ref)) {
2888                         struct btrfs_delayed_ref_head *head;
2889
2890                         head = btrfs_delayed_node_to_head(ref);
2891                         mutex_lock(&head->mutex);
2892                         kfree(head->extent_op);
2893                         delayed_refs->num_heads--;
2894                         if (list_empty(&head->cluster))
2895                                 delayed_refs->num_heads_ready--;
2896                         list_del_init(&head->cluster);
2897                         mutex_unlock(&head->mutex);
2898                 }
2899
2900                 spin_unlock(&delayed_refs->lock);
2901                 btrfs_put_delayed_ref(ref);
2902
2903                 cond_resched();
2904                 spin_lock(&delayed_refs->lock);
2905         }
2906
2907         spin_unlock(&delayed_refs->lock);
2908
2909         return ret;
2910 }
2911
2912 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
2913 {
2914         struct btrfs_pending_snapshot *snapshot;
2915         struct list_head splice;
2916
2917         INIT_LIST_HEAD(&splice);
2918
2919         list_splice_init(&t->pending_snapshots, &splice);
2920
2921         while (!list_empty(&splice)) {
2922                 snapshot = list_entry(splice.next,
2923                                       struct btrfs_pending_snapshot,
2924                                       list);
2925
2926                 list_del_init(&snapshot->list);
2927
2928                 kfree(snapshot);
2929         }
2930
2931         return 0;
2932 }
2933
2934 static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
2935 {
2936         struct btrfs_inode *btrfs_inode;
2937         struct list_head splice;
2938
2939         INIT_LIST_HEAD(&splice);
2940
2941         spin_lock(&root->fs_info->delalloc_lock);
2942         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
2943
2944         while (!list_empty(&splice)) {
2945                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
2946                                     delalloc_inodes);
2947
2948                 list_del_init(&btrfs_inode->delalloc_inodes);
2949
2950                 btrfs_invalidate_inodes(btrfs_inode->root);
2951         }
2952
2953         spin_unlock(&root->fs_info->delalloc_lock);
2954
2955         return 0;
2956 }
2957
2958 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
2959                                         struct extent_io_tree *dirty_pages,
2960                                         int mark)
2961 {
2962         int ret;
2963         struct page *page;
2964         struct inode *btree_inode = root->fs_info->btree_inode;
2965         struct extent_buffer *eb;
2966         u64 start = 0;
2967         u64 end;
2968         u64 offset;
2969         unsigned long index;
2970
2971         while (1) {
2972                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
2973                                             mark);
2974                 if (ret)
2975                         break;
2976
2977                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
2978                 while (start <= end) {
2979                         index = start >> PAGE_CACHE_SHIFT;
2980                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
2981                         page = find_get_page(btree_inode->i_mapping, index);
2982                         if (!page)
2983                                 continue;
2984                         offset = page_offset(page);
2985
2986                         spin_lock(&dirty_pages->buffer_lock);
2987                         eb = radix_tree_lookup(
2988                              &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
2989                                                offset >> PAGE_CACHE_SHIFT);
2990                         spin_unlock(&dirty_pages->buffer_lock);
2991                         if (eb) {
2992                                 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
2993                                                          &eb->bflags);
2994                                 atomic_set(&eb->refs, 1);
2995                         }
2996                         if (PageWriteback(page))
2997                                 end_page_writeback(page);
2998
2999                         lock_page(page);
3000                         if (PageDirty(page)) {
3001                                 clear_page_dirty_for_io(page);
3002                                 spin_lock_irq(&page->mapping->tree_lock);
3003                                 radix_tree_tag_clear(&page->mapping->page_tree,
3004                                                         page_index(page),
3005                                                         PAGECACHE_TAG_DIRTY);
3006                                 spin_unlock_irq(&page->mapping->tree_lock);
3007                         }
3008
3009                         page->mapping->a_ops->invalidatepage(page, 0);
3010                         unlock_page(page);
3011                 }
3012         }
3013
3014         return ret;
3015 }
3016
3017 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3018                                        struct extent_io_tree *pinned_extents)
3019 {
3020         struct extent_io_tree *unpin;
3021         u64 start;
3022         u64 end;
3023         int ret;
3024
3025         unpin = pinned_extents;
3026         while (1) {
3027                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3028                                             EXTENT_DIRTY);
3029                 if (ret)
3030                         break;
3031
3032                 /* opt_discard */
3033                 if (btrfs_test_opt(root, DISCARD))
3034                         ret = btrfs_error_discard_extent(root, start,
3035                                                          end + 1 - start,
3036                                                          NULL);
3037
3038                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3039                 btrfs_error_unpin_extent_range(root, start, end);
3040                 cond_resched();
3041         }
3042
3043         return 0;
3044 }
3045
3046 static int btrfs_cleanup_transaction(struct btrfs_root *root)
3047 {
3048         struct btrfs_transaction *t;
3049         LIST_HEAD(list);
3050
3051         WARN_ON(1);
3052
3053         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3054
3055         spin_lock(&root->fs_info->trans_lock);
3056         list_splice_init(&root->fs_info->trans_list, &list);
3057         root->fs_info->trans_no_join = 1;
3058         spin_unlock(&root->fs_info->trans_lock);
3059
3060         while (!list_empty(&list)) {
3061                 t = list_entry(list.next, struct btrfs_transaction, list);
3062                 if (!t)
3063                         break;
3064
3065                 btrfs_destroy_ordered_operations(root);
3066
3067                 btrfs_destroy_ordered_extents(root);
3068
3069                 btrfs_destroy_delayed_refs(t, root);
3070
3071                 btrfs_block_rsv_release(root,
3072                                         &root->fs_info->trans_block_rsv,
3073                                         t->dirty_pages.dirty_bytes);
3074
3075                 /* FIXME: cleanup wait for commit */
3076                 t->in_commit = 1;
3077                 t->blocked = 1;
3078                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3079                         wake_up(&root->fs_info->transaction_blocked_wait);
3080
3081                 t->blocked = 0;
3082                 if (waitqueue_active(&root->fs_info->transaction_wait))
3083                         wake_up(&root->fs_info->transaction_wait);
3084
3085                 t->commit_done = 1;
3086                 if (waitqueue_active(&t->commit_wait))
3087                         wake_up(&t->commit_wait);
3088
3089                 btrfs_destroy_pending_snapshots(t);
3090
3091                 btrfs_destroy_delalloc_inodes(root);
3092
3093                 spin_lock(&root->fs_info->trans_lock);
3094                 root->fs_info->running_transaction = NULL;
3095                 spin_unlock(&root->fs_info->trans_lock);
3096
3097                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3098                                              EXTENT_DIRTY);
3099
3100                 btrfs_destroy_pinned_extent(root,
3101                                             root->fs_info->pinned_extents);
3102
3103                 atomic_set(&t->use_count, 0);
3104                 list_del_init(&t->list);
3105                 memset(t, 0, sizeof(*t));
3106                 kmem_cache_free(btrfs_transaction_cachep, t);
3107         }
3108
3109         spin_lock(&root->fs_info->trans_lock);
3110         root->fs_info->trans_no_join = 0;
3111         spin_unlock(&root->fs_info->trans_lock);
3112         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3113
3114         return 0;
3115 }
3116
3117 static struct extent_io_ops btree_extent_io_ops = {
3118         .write_cache_pages_lock_hook = btree_lock_page_hook,
3119         .readpage_end_io_hook = btree_readpage_end_io_hook,
3120         .submit_bio_hook = btree_submit_bio_hook,
3121         /* note we're sharing with inode.c for the merge bio hook */
3122         .merge_bio_hook = btrfs_merge_bio_hook,
3123 };