]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/disk-io.c
btrfs: add READAHEAD extent buffer flag
[karo-tx-linux.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <asm/unaligned.h>
34 #include "compat.h"
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
42 #include "locking.h"
43 #include "tree-log.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
46
47 static struct extent_io_ops btree_extent_io_ops;
48 static void end_workqueue_fn(struct btrfs_work *work);
49 static void free_fs_root(struct btrfs_root *root);
50 static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
51                                     int read_only);
52 static int btrfs_destroy_ordered_operations(struct btrfs_root *root);
53 static int btrfs_destroy_ordered_extents(struct btrfs_root *root);
54 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
55                                       struct btrfs_root *root);
56 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
57 static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
58 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
59                                         struct extent_io_tree *dirty_pages,
60                                         int mark);
61 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
62                                        struct extent_io_tree *pinned_extents);
63 static int btrfs_cleanup_transaction(struct btrfs_root *root);
64
65 /*
66  * end_io_wq structs are used to do processing in task context when an IO is
67  * complete.  This is used during reads to verify checksums, and it is used
68  * by writes to insert metadata for new file extents after IO is complete.
69  */
70 struct end_io_wq {
71         struct bio *bio;
72         bio_end_io_t *end_io;
73         void *private;
74         struct btrfs_fs_info *info;
75         int error;
76         int metadata;
77         struct list_head list;
78         struct btrfs_work work;
79 };
80
81 /*
82  * async submit bios are used to offload expensive checksumming
83  * onto the worker threads.  They checksum file and metadata bios
84  * just before they are sent down the IO stack.
85  */
86 struct async_submit_bio {
87         struct inode *inode;
88         struct bio *bio;
89         struct list_head list;
90         extent_submit_bio_hook_t *submit_bio_start;
91         extent_submit_bio_hook_t *submit_bio_done;
92         int rw;
93         int mirror_num;
94         unsigned long bio_flags;
95         /*
96          * bio_offset is optional, can be used if the pages in the bio
97          * can't tell us where in the file the bio should go
98          */
99         u64 bio_offset;
100         struct btrfs_work work;
101 };
102
103 /*
104  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
105  * eb, the lockdep key is determined by the btrfs_root it belongs to and
106  * the level the eb occupies in the tree.
107  *
108  * Different roots are used for different purposes and may nest inside each
109  * other and they require separate keysets.  As lockdep keys should be
110  * static, assign keysets according to the purpose of the root as indicated
111  * by btrfs_root->objectid.  This ensures that all special purpose roots
112  * have separate keysets.
113  *
114  * Lock-nesting across peer nodes is always done with the immediate parent
115  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
116  * subclass to avoid triggering lockdep warning in such cases.
117  *
118  * The key is set by the readpage_end_io_hook after the buffer has passed
119  * csum validation but before the pages are unlocked.  It is also set by
120  * btrfs_init_new_buffer on freshly allocated blocks.
121  *
122  * We also add a check to make sure the highest level of the tree is the
123  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
124  * needs update as well.
125  */
126 #ifdef CONFIG_DEBUG_LOCK_ALLOC
127 # if BTRFS_MAX_LEVEL != 8
128 #  error
129 # endif
130
131 static struct btrfs_lockdep_keyset {
132         u64                     id;             /* root objectid */
133         const char              *name_stem;     /* lock name stem */
134         char                    names[BTRFS_MAX_LEVEL + 1][20];
135         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
136 } btrfs_lockdep_keysets[] = {
137         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
138         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
139         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
140         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
141         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
142         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
143         { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
144         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
145         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
146         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
147         { .id = 0,                              .name_stem = "tree"     },
148 };
149
150 void __init btrfs_init_lockdep(void)
151 {
152         int i, j;
153
154         /* initialize lockdep class names */
155         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
156                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
157
158                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
159                         snprintf(ks->names[j], sizeof(ks->names[j]),
160                                  "btrfs-%s-%02d", ks->name_stem, j);
161         }
162 }
163
164 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
165                                     int level)
166 {
167         struct btrfs_lockdep_keyset *ks;
168
169         BUG_ON(level >= ARRAY_SIZE(ks->keys));
170
171         /* find the matching keyset, id 0 is the default entry */
172         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
173                 if (ks->id == objectid)
174                         break;
175
176         lockdep_set_class_and_name(&eb->lock,
177                                    &ks->keys[level], ks->names[level]);
178 }
179
180 #endif
181
182 /*
183  * extents on the btree inode are pretty simple, there's one extent
184  * that covers the entire device
185  */
186 static struct extent_map *btree_get_extent(struct inode *inode,
187                 struct page *page, size_t pg_offset, u64 start, u64 len,
188                 int create)
189 {
190         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
191         struct extent_map *em;
192         int ret;
193
194         read_lock(&em_tree->lock);
195         em = lookup_extent_mapping(em_tree, start, len);
196         if (em) {
197                 em->bdev =
198                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
199                 read_unlock(&em_tree->lock);
200                 goto out;
201         }
202         read_unlock(&em_tree->lock);
203
204         em = alloc_extent_map();
205         if (!em) {
206                 em = ERR_PTR(-ENOMEM);
207                 goto out;
208         }
209         em->start = 0;
210         em->len = (u64)-1;
211         em->block_len = (u64)-1;
212         em->block_start = 0;
213         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
214
215         write_lock(&em_tree->lock);
216         ret = add_extent_mapping(em_tree, em);
217         if (ret == -EEXIST) {
218                 u64 failed_start = em->start;
219                 u64 failed_len = em->len;
220
221                 free_extent_map(em);
222                 em = lookup_extent_mapping(em_tree, start, len);
223                 if (em) {
224                         ret = 0;
225                 } else {
226                         em = lookup_extent_mapping(em_tree, failed_start,
227                                                    failed_len);
228                         ret = -EIO;
229                 }
230         } else if (ret) {
231                 free_extent_map(em);
232                 em = NULL;
233         }
234         write_unlock(&em_tree->lock);
235
236         if (ret)
237                 em = ERR_PTR(ret);
238 out:
239         return em;
240 }
241
242 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
243 {
244         return crc32c(seed, data, len);
245 }
246
247 void btrfs_csum_final(u32 crc, char *result)
248 {
249         put_unaligned_le32(~crc, result);
250 }
251
252 /*
253  * compute the csum for a btree block, and either verify it or write it
254  * into the csum field of the block.
255  */
256 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
257                            int verify)
258 {
259         u16 csum_size =
260                 btrfs_super_csum_size(&root->fs_info->super_copy);
261         char *result = NULL;
262         unsigned long len;
263         unsigned long cur_len;
264         unsigned long offset = BTRFS_CSUM_SIZE;
265         char *kaddr;
266         unsigned long map_start;
267         unsigned long map_len;
268         int err;
269         u32 crc = ~(u32)0;
270         unsigned long inline_result;
271
272         len = buf->len - offset;
273         while (len > 0) {
274                 err = map_private_extent_buffer(buf, offset, 32,
275                                         &kaddr, &map_start, &map_len);
276                 if (err)
277                         return 1;
278                 cur_len = min(len, map_len - (offset - map_start));
279                 crc = btrfs_csum_data(root, kaddr + offset - map_start,
280                                       crc, cur_len);
281                 len -= cur_len;
282                 offset += cur_len;
283         }
284         if (csum_size > sizeof(inline_result)) {
285                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
286                 if (!result)
287                         return 1;
288         } else {
289                 result = (char *)&inline_result;
290         }
291
292         btrfs_csum_final(crc, result);
293
294         if (verify) {
295                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
296                         u32 val;
297                         u32 found = 0;
298                         memcpy(&found, result, csum_size);
299
300                         read_extent_buffer(buf, &val, 0, csum_size);
301                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
302                                        "failed on %llu wanted %X found %X "
303                                        "level %d\n",
304                                        root->fs_info->sb->s_id,
305                                        (unsigned long long)buf->start, val, found,
306                                        btrfs_header_level(buf));
307                         if (result != (char *)&inline_result)
308                                 kfree(result);
309                         return 1;
310                 }
311         } else {
312                 write_extent_buffer(buf, result, 0, csum_size);
313         }
314         if (result != (char *)&inline_result)
315                 kfree(result);
316         return 0;
317 }
318
319 /*
320  * we can't consider a given block up to date unless the transid of the
321  * block matches the transid in the parent node's pointer.  This is how we
322  * detect blocks that either didn't get written at all or got written
323  * in the wrong place.
324  */
325 static int verify_parent_transid(struct extent_io_tree *io_tree,
326                                  struct extent_buffer *eb, u64 parent_transid)
327 {
328         struct extent_state *cached_state = NULL;
329         int ret;
330
331         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
332                 return 0;
333
334         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
335                          0, &cached_state, GFP_NOFS);
336         if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
337             btrfs_header_generation(eb) == parent_transid) {
338                 ret = 0;
339                 goto out;
340         }
341         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
342                        "found %llu\n",
343                        (unsigned long long)eb->start,
344                        (unsigned long long)parent_transid,
345                        (unsigned long long)btrfs_header_generation(eb));
346         ret = 1;
347         clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
348 out:
349         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
350                              &cached_state, GFP_NOFS);
351         return ret;
352 }
353
354 /*
355  * helper to read a given tree block, doing retries as required when
356  * the checksums don't match and we have alternate mirrors to try.
357  */
358 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
359                                           struct extent_buffer *eb,
360                                           u64 start, u64 parent_transid)
361 {
362         struct extent_io_tree *io_tree;
363         int ret;
364         int num_copies = 0;
365         int mirror_num = 0;
366
367         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
368         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
369         while (1) {
370                 ret = read_extent_buffer_pages(io_tree, eb, start,
371                                                WAIT_COMPLETE,
372                                                btree_get_extent, mirror_num);
373                 if (!ret &&
374                     !verify_parent_transid(io_tree, eb, parent_transid))
375                         return ret;
376
377                 /*
378                  * This buffer's crc is fine, but its contents are corrupted, so
379                  * there is no reason to read the other copies, they won't be
380                  * any less wrong.
381                  */
382                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
383                         return ret;
384
385                 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
386                                               eb->start, eb->len);
387                 if (num_copies == 1)
388                         return ret;
389
390                 mirror_num++;
391                 if (mirror_num > num_copies)
392                         return ret;
393         }
394         return -EIO;
395 }
396
397 /*
398  * checksum a dirty tree block before IO.  This has extra checks to make sure
399  * we only fill in the checksum field in the first page of a multi-page block
400  */
401
402 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
403 {
404         struct extent_io_tree *tree;
405         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
406         u64 found_start;
407         unsigned long len;
408         struct extent_buffer *eb;
409         int ret;
410
411         tree = &BTRFS_I(page->mapping->host)->io_tree;
412
413         if (page->private == EXTENT_PAGE_PRIVATE) {
414                 WARN_ON(1);
415                 goto out;
416         }
417         if (!page->private) {
418                 WARN_ON(1);
419                 goto out;
420         }
421         len = page->private >> 2;
422         WARN_ON(len == 0);
423
424         eb = alloc_extent_buffer(tree, start, len, page);
425         if (eb == NULL) {
426                 WARN_ON(1);
427                 goto out;
428         }
429         ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
430                                              btrfs_header_generation(eb));
431         BUG_ON(ret);
432         WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
433
434         found_start = btrfs_header_bytenr(eb);
435         if (found_start != start) {
436                 WARN_ON(1);
437                 goto err;
438         }
439         if (eb->first_page != page) {
440                 WARN_ON(1);
441                 goto err;
442         }
443         if (!PageUptodate(page)) {
444                 WARN_ON(1);
445                 goto err;
446         }
447         csum_tree_block(root, eb, 0);
448 err:
449         free_extent_buffer(eb);
450 out:
451         return 0;
452 }
453
454 static int check_tree_block_fsid(struct btrfs_root *root,
455                                  struct extent_buffer *eb)
456 {
457         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
458         u8 fsid[BTRFS_UUID_SIZE];
459         int ret = 1;
460
461         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
462                            BTRFS_FSID_SIZE);
463         while (fs_devices) {
464                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
465                         ret = 0;
466                         break;
467                 }
468                 fs_devices = fs_devices->seed;
469         }
470         return ret;
471 }
472
473 #define CORRUPT(reason, eb, root, slot)                         \
474         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
475                "root=%llu, slot=%d\n", reason,                  \
476                (unsigned long long)btrfs_header_bytenr(eb),     \
477                (unsigned long long)root->objectid, slot)
478
479 static noinline int check_leaf(struct btrfs_root *root,
480                                struct extent_buffer *leaf)
481 {
482         struct btrfs_key key;
483         struct btrfs_key leaf_key;
484         u32 nritems = btrfs_header_nritems(leaf);
485         int slot;
486
487         if (nritems == 0)
488                 return 0;
489
490         /* Check the 0 item */
491         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
492             BTRFS_LEAF_DATA_SIZE(root)) {
493                 CORRUPT("invalid item offset size pair", leaf, root, 0);
494                 return -EIO;
495         }
496
497         /*
498          * Check to make sure each items keys are in the correct order and their
499          * offsets make sense.  We only have to loop through nritems-1 because
500          * we check the current slot against the next slot, which verifies the
501          * next slot's offset+size makes sense and that the current's slot
502          * offset is correct.
503          */
504         for (slot = 0; slot < nritems - 1; slot++) {
505                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
506                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
507
508                 /* Make sure the keys are in the right order */
509                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
510                         CORRUPT("bad key order", leaf, root, slot);
511                         return -EIO;
512                 }
513
514                 /*
515                  * Make sure the offset and ends are right, remember that the
516                  * item data starts at the end of the leaf and grows towards the
517                  * front.
518                  */
519                 if (btrfs_item_offset_nr(leaf, slot) !=
520                         btrfs_item_end_nr(leaf, slot + 1)) {
521                         CORRUPT("slot offset bad", leaf, root, slot);
522                         return -EIO;
523                 }
524
525                 /*
526                  * Check to make sure that we don't point outside of the leaf,
527                  * just incase all the items are consistent to eachother, but
528                  * all point outside of the leaf.
529                  */
530                 if (btrfs_item_end_nr(leaf, slot) >
531                     BTRFS_LEAF_DATA_SIZE(root)) {
532                         CORRUPT("slot end outside of leaf", leaf, root, slot);
533                         return -EIO;
534                 }
535         }
536
537         return 0;
538 }
539
540 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
541                                struct extent_state *state)
542 {
543         struct extent_io_tree *tree;
544         u64 found_start;
545         int found_level;
546         unsigned long len;
547         struct extent_buffer *eb;
548         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
549         int ret = 0;
550
551         tree = &BTRFS_I(page->mapping->host)->io_tree;
552         if (page->private == EXTENT_PAGE_PRIVATE)
553                 goto out;
554         if (!page->private)
555                 goto out;
556
557         len = page->private >> 2;
558         WARN_ON(len == 0);
559
560         eb = alloc_extent_buffer(tree, start, len, page);
561         if (eb == NULL) {
562                 ret = -EIO;
563                 goto out;
564         }
565
566         found_start = btrfs_header_bytenr(eb);
567         if (found_start != start) {
568                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
569                                "%llu %llu\n",
570                                (unsigned long long)found_start,
571                                (unsigned long long)eb->start);
572                 ret = -EIO;
573                 goto err;
574         }
575         if (eb->first_page != page) {
576                 printk(KERN_INFO "btrfs bad first page %lu %lu\n",
577                        eb->first_page->index, page->index);
578                 WARN_ON(1);
579                 ret = -EIO;
580                 goto err;
581         }
582         if (check_tree_block_fsid(root, eb)) {
583                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
584                                (unsigned long long)eb->start);
585                 ret = -EIO;
586                 goto err;
587         }
588         found_level = btrfs_header_level(eb);
589
590         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
591                                        eb, found_level);
592
593         ret = csum_tree_block(root, eb, 1);
594         if (ret) {
595                 ret = -EIO;
596                 goto err;
597         }
598
599         /*
600          * If this is a leaf block and it is corrupt, set the corrupt bit so
601          * that we don't try and read the other copies of this block, just
602          * return -EIO.
603          */
604         if (found_level == 0 && check_leaf(root, eb)) {
605                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
606                 ret = -EIO;
607         }
608
609         end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
610         end = eb->start + end - 1;
611 err:
612         free_extent_buffer(eb);
613 out:
614         return ret;
615 }
616
617 static void end_workqueue_bio(struct bio *bio, int err)
618 {
619         struct end_io_wq *end_io_wq = bio->bi_private;
620         struct btrfs_fs_info *fs_info;
621
622         fs_info = end_io_wq->info;
623         end_io_wq->error = err;
624         end_io_wq->work.func = end_workqueue_fn;
625         end_io_wq->work.flags = 0;
626
627         if (bio->bi_rw & REQ_WRITE) {
628                 if (end_io_wq->metadata == 1)
629                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
630                                            &end_io_wq->work);
631                 else if (end_io_wq->metadata == 2)
632                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
633                                            &end_io_wq->work);
634                 else
635                         btrfs_queue_worker(&fs_info->endio_write_workers,
636                                            &end_io_wq->work);
637         } else {
638                 if (end_io_wq->metadata)
639                         btrfs_queue_worker(&fs_info->endio_meta_workers,
640                                            &end_io_wq->work);
641                 else
642                         btrfs_queue_worker(&fs_info->endio_workers,
643                                            &end_io_wq->work);
644         }
645 }
646
647 /*
648  * For the metadata arg you want
649  *
650  * 0 - if data
651  * 1 - if normal metadta
652  * 2 - if writing to the free space cache area
653  */
654 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
655                         int metadata)
656 {
657         struct end_io_wq *end_io_wq;
658         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
659         if (!end_io_wq)
660                 return -ENOMEM;
661
662         end_io_wq->private = bio->bi_private;
663         end_io_wq->end_io = bio->bi_end_io;
664         end_io_wq->info = info;
665         end_io_wq->error = 0;
666         end_io_wq->bio = bio;
667         end_io_wq->metadata = metadata;
668
669         bio->bi_private = end_io_wq;
670         bio->bi_end_io = end_workqueue_bio;
671         return 0;
672 }
673
674 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
675 {
676         unsigned long limit = min_t(unsigned long,
677                                     info->workers.max_workers,
678                                     info->fs_devices->open_devices);
679         return 256 * limit;
680 }
681
682 static void run_one_async_start(struct btrfs_work *work)
683 {
684         struct async_submit_bio *async;
685
686         async = container_of(work, struct  async_submit_bio, work);
687         async->submit_bio_start(async->inode, async->rw, async->bio,
688                                async->mirror_num, async->bio_flags,
689                                async->bio_offset);
690 }
691
692 static void run_one_async_done(struct btrfs_work *work)
693 {
694         struct btrfs_fs_info *fs_info;
695         struct async_submit_bio *async;
696         int limit;
697
698         async = container_of(work, struct  async_submit_bio, work);
699         fs_info = BTRFS_I(async->inode)->root->fs_info;
700
701         limit = btrfs_async_submit_limit(fs_info);
702         limit = limit * 2 / 3;
703
704         atomic_dec(&fs_info->nr_async_submits);
705
706         if (atomic_read(&fs_info->nr_async_submits) < limit &&
707             waitqueue_active(&fs_info->async_submit_wait))
708                 wake_up(&fs_info->async_submit_wait);
709
710         async->submit_bio_done(async->inode, async->rw, async->bio,
711                                async->mirror_num, async->bio_flags,
712                                async->bio_offset);
713 }
714
715 static void run_one_async_free(struct btrfs_work *work)
716 {
717         struct async_submit_bio *async;
718
719         async = container_of(work, struct  async_submit_bio, work);
720         kfree(async);
721 }
722
723 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
724                         int rw, struct bio *bio, int mirror_num,
725                         unsigned long bio_flags,
726                         u64 bio_offset,
727                         extent_submit_bio_hook_t *submit_bio_start,
728                         extent_submit_bio_hook_t *submit_bio_done)
729 {
730         struct async_submit_bio *async;
731
732         async = kmalloc(sizeof(*async), GFP_NOFS);
733         if (!async)
734                 return -ENOMEM;
735
736         async->inode = inode;
737         async->rw = rw;
738         async->bio = bio;
739         async->mirror_num = mirror_num;
740         async->submit_bio_start = submit_bio_start;
741         async->submit_bio_done = submit_bio_done;
742
743         async->work.func = run_one_async_start;
744         async->work.ordered_func = run_one_async_done;
745         async->work.ordered_free = run_one_async_free;
746
747         async->work.flags = 0;
748         async->bio_flags = bio_flags;
749         async->bio_offset = bio_offset;
750
751         atomic_inc(&fs_info->nr_async_submits);
752
753         if (rw & REQ_SYNC)
754                 btrfs_set_work_high_prio(&async->work);
755
756         btrfs_queue_worker(&fs_info->workers, &async->work);
757
758         while (atomic_read(&fs_info->async_submit_draining) &&
759               atomic_read(&fs_info->nr_async_submits)) {
760                 wait_event(fs_info->async_submit_wait,
761                            (atomic_read(&fs_info->nr_async_submits) == 0));
762         }
763
764         return 0;
765 }
766
767 static int btree_csum_one_bio(struct bio *bio)
768 {
769         struct bio_vec *bvec = bio->bi_io_vec;
770         int bio_index = 0;
771         struct btrfs_root *root;
772
773         WARN_ON(bio->bi_vcnt <= 0);
774         while (bio_index < bio->bi_vcnt) {
775                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
776                 csum_dirty_buffer(root, bvec->bv_page);
777                 bio_index++;
778                 bvec++;
779         }
780         return 0;
781 }
782
783 static int __btree_submit_bio_start(struct inode *inode, int rw,
784                                     struct bio *bio, int mirror_num,
785                                     unsigned long bio_flags,
786                                     u64 bio_offset)
787 {
788         /*
789          * when we're called for a write, we're already in the async
790          * submission context.  Just jump into btrfs_map_bio
791          */
792         btree_csum_one_bio(bio);
793         return 0;
794 }
795
796 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
797                                  int mirror_num, unsigned long bio_flags,
798                                  u64 bio_offset)
799 {
800         /*
801          * when we're called for a write, we're already in the async
802          * submission context.  Just jump into btrfs_map_bio
803          */
804         return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
805 }
806
807 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
808                                  int mirror_num, unsigned long bio_flags,
809                                  u64 bio_offset)
810 {
811         int ret;
812
813         ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
814                                           bio, 1);
815         BUG_ON(ret);
816
817         if (!(rw & REQ_WRITE)) {
818                 /*
819                  * called for a read, do the setup so that checksum validation
820                  * can happen in the async kernel threads
821                  */
822                 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
823                                      mirror_num, 0);
824         }
825
826         /*
827          * kthread helpers are used to submit writes so that checksumming
828          * can happen in parallel across all CPUs
829          */
830         return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
831                                    inode, rw, bio, mirror_num, 0,
832                                    bio_offset,
833                                    __btree_submit_bio_start,
834                                    __btree_submit_bio_done);
835 }
836
837 #ifdef CONFIG_MIGRATION
838 static int btree_migratepage(struct address_space *mapping,
839                         struct page *newpage, struct page *page)
840 {
841         /*
842          * we can't safely write a btree page from here,
843          * we haven't done the locking hook
844          */
845         if (PageDirty(page))
846                 return -EAGAIN;
847         /*
848          * Buffers may be managed in a filesystem specific way.
849          * We must have no buffers or drop them.
850          */
851         if (page_has_private(page) &&
852             !try_to_release_page(page, GFP_KERNEL))
853                 return -EAGAIN;
854         return migrate_page(mapping, newpage, page);
855 }
856 #endif
857
858 static int btree_writepage(struct page *page, struct writeback_control *wbc)
859 {
860         struct extent_io_tree *tree;
861         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
862         struct extent_buffer *eb;
863         int was_dirty;
864
865         tree = &BTRFS_I(page->mapping->host)->io_tree;
866         if (!(current->flags & PF_MEMALLOC)) {
867                 return extent_write_full_page(tree, page,
868                                               btree_get_extent, wbc);
869         }
870
871         redirty_page_for_writepage(wbc, page);
872         eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
873         WARN_ON(!eb);
874
875         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
876         if (!was_dirty) {
877                 spin_lock(&root->fs_info->delalloc_lock);
878                 root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
879                 spin_unlock(&root->fs_info->delalloc_lock);
880         }
881         free_extent_buffer(eb);
882
883         unlock_page(page);
884         return 0;
885 }
886
887 static int btree_writepages(struct address_space *mapping,
888                             struct writeback_control *wbc)
889 {
890         struct extent_io_tree *tree;
891         tree = &BTRFS_I(mapping->host)->io_tree;
892         if (wbc->sync_mode == WB_SYNC_NONE) {
893                 struct btrfs_root *root = BTRFS_I(mapping->host)->root;
894                 u64 num_dirty;
895                 unsigned long thresh = 32 * 1024 * 1024;
896
897                 if (wbc->for_kupdate)
898                         return 0;
899
900                 /* this is a bit racy, but that's ok */
901                 num_dirty = root->fs_info->dirty_metadata_bytes;
902                 if (num_dirty < thresh)
903                         return 0;
904         }
905         return extent_writepages(tree, mapping, btree_get_extent, wbc);
906 }
907
908 static int btree_readpage(struct file *file, struct page *page)
909 {
910         struct extent_io_tree *tree;
911         tree = &BTRFS_I(page->mapping->host)->io_tree;
912         return extent_read_full_page(tree, page, btree_get_extent);
913 }
914
915 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
916 {
917         struct extent_io_tree *tree;
918         struct extent_map_tree *map;
919         int ret;
920
921         if (PageWriteback(page) || PageDirty(page))
922                 return 0;
923
924         tree = &BTRFS_I(page->mapping->host)->io_tree;
925         map = &BTRFS_I(page->mapping->host)->extent_tree;
926
927         ret = try_release_extent_state(map, tree, page, gfp_flags);
928         if (!ret)
929                 return 0;
930
931         ret = try_release_extent_buffer(tree, page);
932         if (ret == 1) {
933                 ClearPagePrivate(page);
934                 set_page_private(page, 0);
935                 page_cache_release(page);
936         }
937
938         return ret;
939 }
940
941 static void btree_invalidatepage(struct page *page, unsigned long offset)
942 {
943         struct extent_io_tree *tree;
944         tree = &BTRFS_I(page->mapping->host)->io_tree;
945         extent_invalidatepage(tree, page, offset);
946         btree_releasepage(page, GFP_NOFS);
947         if (PagePrivate(page)) {
948                 printk(KERN_WARNING "btrfs warning page private not zero "
949                        "on page %llu\n", (unsigned long long)page_offset(page));
950                 ClearPagePrivate(page);
951                 set_page_private(page, 0);
952                 page_cache_release(page);
953         }
954 }
955
956 static const struct address_space_operations btree_aops = {
957         .readpage       = btree_readpage,
958         .writepage      = btree_writepage,
959         .writepages     = btree_writepages,
960         .releasepage    = btree_releasepage,
961         .invalidatepage = btree_invalidatepage,
962 #ifdef CONFIG_MIGRATION
963         .migratepage    = btree_migratepage,
964 #endif
965 };
966
967 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
968                          u64 parent_transid)
969 {
970         struct extent_buffer *buf = NULL;
971         struct inode *btree_inode = root->fs_info->btree_inode;
972         int ret = 0;
973
974         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
975         if (!buf)
976                 return 0;
977         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
978                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
979         free_extent_buffer(buf);
980         return ret;
981 }
982
983 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
984                          int mirror_num, struct extent_buffer **eb)
985 {
986         struct extent_buffer *buf = NULL;
987         struct inode *btree_inode = root->fs_info->btree_inode;
988         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
989         int ret;
990
991         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
992         if (!buf)
993                 return 0;
994
995         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
996
997         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
998                                        btree_get_extent, mirror_num);
999         if (ret) {
1000                 free_extent_buffer(buf);
1001                 return ret;
1002         }
1003
1004         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1005                 free_extent_buffer(buf);
1006                 return -EIO;
1007         } else if (extent_buffer_uptodate(io_tree, buf, NULL)) {
1008                 *eb = buf;
1009         } else {
1010                 free_extent_buffer(buf);
1011         }
1012         return 0;
1013 }
1014
1015 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1016                                             u64 bytenr, u32 blocksize)
1017 {
1018         struct inode *btree_inode = root->fs_info->btree_inode;
1019         struct extent_buffer *eb;
1020         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1021                                 bytenr, blocksize);
1022         return eb;
1023 }
1024
1025 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1026                                                  u64 bytenr, u32 blocksize)
1027 {
1028         struct inode *btree_inode = root->fs_info->btree_inode;
1029         struct extent_buffer *eb;
1030
1031         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1032                                  bytenr, blocksize, NULL);
1033         return eb;
1034 }
1035
1036
1037 int btrfs_write_tree_block(struct extent_buffer *buf)
1038 {
1039         return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
1040                                         buf->start + buf->len - 1);
1041 }
1042
1043 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1044 {
1045         return filemap_fdatawait_range(buf->first_page->mapping,
1046                                        buf->start, buf->start + buf->len - 1);
1047 }
1048
1049 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1050                                       u32 blocksize, u64 parent_transid)
1051 {
1052         struct extent_buffer *buf = NULL;
1053         int ret;
1054
1055         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1056         if (!buf)
1057                 return NULL;
1058
1059         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1060
1061         if (ret == 0)
1062                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
1063         return buf;
1064
1065 }
1066
1067 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1068                      struct extent_buffer *buf)
1069 {
1070         struct inode *btree_inode = root->fs_info->btree_inode;
1071         if (btrfs_header_generation(buf) ==
1072             root->fs_info->running_transaction->transid) {
1073                 btrfs_assert_tree_locked(buf);
1074
1075                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1076                         spin_lock(&root->fs_info->delalloc_lock);
1077                         if (root->fs_info->dirty_metadata_bytes >= buf->len)
1078                                 root->fs_info->dirty_metadata_bytes -= buf->len;
1079                         else
1080                                 WARN_ON(1);
1081                         spin_unlock(&root->fs_info->delalloc_lock);
1082                 }
1083
1084                 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1085                 btrfs_set_lock_blocking(buf);
1086                 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
1087                                           buf);
1088         }
1089         return 0;
1090 }
1091
1092 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1093                         u32 stripesize, struct btrfs_root *root,
1094                         struct btrfs_fs_info *fs_info,
1095                         u64 objectid)
1096 {
1097         root->node = NULL;
1098         root->commit_root = NULL;
1099         root->sectorsize = sectorsize;
1100         root->nodesize = nodesize;
1101         root->leafsize = leafsize;
1102         root->stripesize = stripesize;
1103         root->ref_cows = 0;
1104         root->track_dirty = 0;
1105         root->in_radix = 0;
1106         root->orphan_item_inserted = 0;
1107         root->orphan_cleanup_state = 0;
1108
1109         root->fs_info = fs_info;
1110         root->objectid = objectid;
1111         root->last_trans = 0;
1112         root->highest_objectid = 0;
1113         root->name = NULL;
1114         root->inode_tree = RB_ROOT;
1115         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1116         root->block_rsv = NULL;
1117         root->orphan_block_rsv = NULL;
1118
1119         INIT_LIST_HEAD(&root->dirty_list);
1120         INIT_LIST_HEAD(&root->orphan_list);
1121         INIT_LIST_HEAD(&root->root_list);
1122         spin_lock_init(&root->orphan_lock);
1123         spin_lock_init(&root->inode_lock);
1124         spin_lock_init(&root->accounting_lock);
1125         mutex_init(&root->objectid_mutex);
1126         mutex_init(&root->log_mutex);
1127         init_waitqueue_head(&root->log_writer_wait);
1128         init_waitqueue_head(&root->log_commit_wait[0]);
1129         init_waitqueue_head(&root->log_commit_wait[1]);
1130         atomic_set(&root->log_commit[0], 0);
1131         atomic_set(&root->log_commit[1], 0);
1132         atomic_set(&root->log_writers, 0);
1133         root->log_batch = 0;
1134         root->log_transid = 0;
1135         root->last_log_commit = 0;
1136         extent_io_tree_init(&root->dirty_log_pages,
1137                              fs_info->btree_inode->i_mapping);
1138
1139         memset(&root->root_key, 0, sizeof(root->root_key));
1140         memset(&root->root_item, 0, sizeof(root->root_item));
1141         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1142         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1143         root->defrag_trans_start = fs_info->generation;
1144         init_completion(&root->kobj_unregister);
1145         root->defrag_running = 0;
1146         root->root_key.objectid = objectid;
1147         root->anon_dev = 0;
1148         return 0;
1149 }
1150
1151 static int find_and_setup_root(struct btrfs_root *tree_root,
1152                                struct btrfs_fs_info *fs_info,
1153                                u64 objectid,
1154                                struct btrfs_root *root)
1155 {
1156         int ret;
1157         u32 blocksize;
1158         u64 generation;
1159
1160         __setup_root(tree_root->nodesize, tree_root->leafsize,
1161                      tree_root->sectorsize, tree_root->stripesize,
1162                      root, fs_info, objectid);
1163         ret = btrfs_find_last_root(tree_root, objectid,
1164                                    &root->root_item, &root->root_key);
1165         if (ret > 0)
1166                 return -ENOENT;
1167         BUG_ON(ret);
1168
1169         generation = btrfs_root_generation(&root->root_item);
1170         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1171         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1172                                      blocksize, generation);
1173         if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
1174                 free_extent_buffer(root->node);
1175                 return -EIO;
1176         }
1177         root->commit_root = btrfs_root_node(root);
1178         return 0;
1179 }
1180
1181 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1182                                          struct btrfs_fs_info *fs_info)
1183 {
1184         struct btrfs_root *root;
1185         struct btrfs_root *tree_root = fs_info->tree_root;
1186         struct extent_buffer *leaf;
1187
1188         root = kzalloc(sizeof(*root), GFP_NOFS);
1189         if (!root)
1190                 return ERR_PTR(-ENOMEM);
1191
1192         __setup_root(tree_root->nodesize, tree_root->leafsize,
1193                      tree_root->sectorsize, tree_root->stripesize,
1194                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1195
1196         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1197         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1198         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1199         /*
1200          * log trees do not get reference counted because they go away
1201          * before a real commit is actually done.  They do store pointers
1202          * to file data extents, and those reference counts still get
1203          * updated (along with back refs to the log tree).
1204          */
1205         root->ref_cows = 0;
1206
1207         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1208                                       BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0);
1209         if (IS_ERR(leaf)) {
1210                 kfree(root);
1211                 return ERR_CAST(leaf);
1212         }
1213
1214         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1215         btrfs_set_header_bytenr(leaf, leaf->start);
1216         btrfs_set_header_generation(leaf, trans->transid);
1217         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1218         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1219         root->node = leaf;
1220
1221         write_extent_buffer(root->node, root->fs_info->fsid,
1222                             (unsigned long)btrfs_header_fsid(root->node),
1223                             BTRFS_FSID_SIZE);
1224         btrfs_mark_buffer_dirty(root->node);
1225         btrfs_tree_unlock(root->node);
1226         return root;
1227 }
1228
1229 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1230                              struct btrfs_fs_info *fs_info)
1231 {
1232         struct btrfs_root *log_root;
1233
1234         log_root = alloc_log_tree(trans, fs_info);
1235         if (IS_ERR(log_root))
1236                 return PTR_ERR(log_root);
1237         WARN_ON(fs_info->log_root_tree);
1238         fs_info->log_root_tree = log_root;
1239         return 0;
1240 }
1241
1242 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1243                        struct btrfs_root *root)
1244 {
1245         struct btrfs_root *log_root;
1246         struct btrfs_inode_item *inode_item;
1247
1248         log_root = alloc_log_tree(trans, root->fs_info);
1249         if (IS_ERR(log_root))
1250                 return PTR_ERR(log_root);
1251
1252         log_root->last_trans = trans->transid;
1253         log_root->root_key.offset = root->root_key.objectid;
1254
1255         inode_item = &log_root->root_item.inode;
1256         inode_item->generation = cpu_to_le64(1);
1257         inode_item->size = cpu_to_le64(3);
1258         inode_item->nlink = cpu_to_le32(1);
1259         inode_item->nbytes = cpu_to_le64(root->leafsize);
1260         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1261
1262         btrfs_set_root_node(&log_root->root_item, log_root->node);
1263
1264         WARN_ON(root->log_root);
1265         root->log_root = log_root;
1266         root->log_transid = 0;
1267         root->last_log_commit = 0;
1268         return 0;
1269 }
1270
1271 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1272                                                struct btrfs_key *location)
1273 {
1274         struct btrfs_root *root;
1275         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1276         struct btrfs_path *path;
1277         struct extent_buffer *l;
1278         u64 generation;
1279         u32 blocksize;
1280         int ret = 0;
1281
1282         root = kzalloc(sizeof(*root), GFP_NOFS);
1283         if (!root)
1284                 return ERR_PTR(-ENOMEM);
1285         if (location->offset == (u64)-1) {
1286                 ret = find_and_setup_root(tree_root, fs_info,
1287                                           location->objectid, root);
1288                 if (ret) {
1289                         kfree(root);
1290                         return ERR_PTR(ret);
1291                 }
1292                 goto out;
1293         }
1294
1295         __setup_root(tree_root->nodesize, tree_root->leafsize,
1296                      tree_root->sectorsize, tree_root->stripesize,
1297                      root, fs_info, location->objectid);
1298
1299         path = btrfs_alloc_path();
1300         if (!path) {
1301                 kfree(root);
1302                 return ERR_PTR(-ENOMEM);
1303         }
1304         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1305         if (ret == 0) {
1306                 l = path->nodes[0];
1307                 read_extent_buffer(l, &root->root_item,
1308                                 btrfs_item_ptr_offset(l, path->slots[0]),
1309                                 sizeof(root->root_item));
1310                 memcpy(&root->root_key, location, sizeof(*location));
1311         }
1312         btrfs_free_path(path);
1313         if (ret) {
1314                 kfree(root);
1315                 if (ret > 0)
1316                         ret = -ENOENT;
1317                 return ERR_PTR(ret);
1318         }
1319
1320         generation = btrfs_root_generation(&root->root_item);
1321         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1322         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1323                                      blocksize, generation);
1324         root->commit_root = btrfs_root_node(root);
1325         BUG_ON(!root->node);
1326 out:
1327         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1328                 root->ref_cows = 1;
1329                 btrfs_check_and_init_root_item(&root->root_item);
1330         }
1331
1332         return root;
1333 }
1334
1335 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1336                                               struct btrfs_key *location)
1337 {
1338         struct btrfs_root *root;
1339         int ret;
1340
1341         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1342                 return fs_info->tree_root;
1343         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1344                 return fs_info->extent_root;
1345         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1346                 return fs_info->chunk_root;
1347         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1348                 return fs_info->dev_root;
1349         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1350                 return fs_info->csum_root;
1351 again:
1352         spin_lock(&fs_info->fs_roots_radix_lock);
1353         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1354                                  (unsigned long)location->objectid);
1355         spin_unlock(&fs_info->fs_roots_radix_lock);
1356         if (root)
1357                 return root;
1358
1359         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1360         if (IS_ERR(root))
1361                 return root;
1362
1363         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1364         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1365                                         GFP_NOFS);
1366         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1367                 ret = -ENOMEM;
1368                 goto fail;
1369         }
1370
1371         btrfs_init_free_ino_ctl(root);
1372         mutex_init(&root->fs_commit_mutex);
1373         spin_lock_init(&root->cache_lock);
1374         init_waitqueue_head(&root->cache_wait);
1375
1376         ret = get_anon_bdev(&root->anon_dev);
1377         if (ret)
1378                 goto fail;
1379
1380         if (btrfs_root_refs(&root->root_item) == 0) {
1381                 ret = -ENOENT;
1382                 goto fail;
1383         }
1384
1385         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1386         if (ret < 0)
1387                 goto fail;
1388         if (ret == 0)
1389                 root->orphan_item_inserted = 1;
1390
1391         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1392         if (ret)
1393                 goto fail;
1394
1395         spin_lock(&fs_info->fs_roots_radix_lock);
1396         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1397                                 (unsigned long)root->root_key.objectid,
1398                                 root);
1399         if (ret == 0)
1400                 root->in_radix = 1;
1401
1402         spin_unlock(&fs_info->fs_roots_radix_lock);
1403         radix_tree_preload_end();
1404         if (ret) {
1405                 if (ret == -EEXIST) {
1406                         free_fs_root(root);
1407                         goto again;
1408                 }
1409                 goto fail;
1410         }
1411
1412         ret = btrfs_find_dead_roots(fs_info->tree_root,
1413                                     root->root_key.objectid);
1414         WARN_ON(ret);
1415         return root;
1416 fail:
1417         free_fs_root(root);
1418         return ERR_PTR(ret);
1419 }
1420
1421 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1422 {
1423         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1424         int ret = 0;
1425         struct btrfs_device *device;
1426         struct backing_dev_info *bdi;
1427
1428         rcu_read_lock();
1429         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1430                 if (!device->bdev)
1431                         continue;
1432                 bdi = blk_get_backing_dev_info(device->bdev);
1433                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1434                         ret = 1;
1435                         break;
1436                 }
1437         }
1438         rcu_read_unlock();
1439         return ret;
1440 }
1441
1442 /*
1443  * If this fails, caller must call bdi_destroy() to get rid of the
1444  * bdi again.
1445  */
1446 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1447 {
1448         int err;
1449
1450         bdi->capabilities = BDI_CAP_MAP_COPY;
1451         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1452         if (err)
1453                 return err;
1454
1455         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1456         bdi->congested_fn       = btrfs_congested_fn;
1457         bdi->congested_data     = info;
1458         return 0;
1459 }
1460
1461 static int bio_ready_for_csum(struct bio *bio)
1462 {
1463         u64 length = 0;
1464         u64 buf_len = 0;
1465         u64 start = 0;
1466         struct page *page;
1467         struct extent_io_tree *io_tree = NULL;
1468         struct bio_vec *bvec;
1469         int i;
1470         int ret;
1471
1472         bio_for_each_segment(bvec, bio, i) {
1473                 page = bvec->bv_page;
1474                 if (page->private == EXTENT_PAGE_PRIVATE) {
1475                         length += bvec->bv_len;
1476                         continue;
1477                 }
1478                 if (!page->private) {
1479                         length += bvec->bv_len;
1480                         continue;
1481                 }
1482                 length = bvec->bv_len;
1483                 buf_len = page->private >> 2;
1484                 start = page_offset(page) + bvec->bv_offset;
1485                 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1486         }
1487         /* are we fully contained in this bio? */
1488         if (buf_len <= length)
1489                 return 1;
1490
1491         ret = extent_range_uptodate(io_tree, start + length,
1492                                     start + buf_len - 1);
1493         return ret;
1494 }
1495
1496 /*
1497  * called by the kthread helper functions to finally call the bio end_io
1498  * functions.  This is where read checksum verification actually happens
1499  */
1500 static void end_workqueue_fn(struct btrfs_work *work)
1501 {
1502         struct bio *bio;
1503         struct end_io_wq *end_io_wq;
1504         struct btrfs_fs_info *fs_info;
1505         int error;
1506
1507         end_io_wq = container_of(work, struct end_io_wq, work);
1508         bio = end_io_wq->bio;
1509         fs_info = end_io_wq->info;
1510
1511         /* metadata bio reads are special because the whole tree block must
1512          * be checksummed at once.  This makes sure the entire block is in
1513          * ram and up to date before trying to verify things.  For
1514          * blocksize <= pagesize, it is basically a noop
1515          */
1516         if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
1517             !bio_ready_for_csum(bio)) {
1518                 btrfs_queue_worker(&fs_info->endio_meta_workers,
1519                                    &end_io_wq->work);
1520                 return;
1521         }
1522         error = end_io_wq->error;
1523         bio->bi_private = end_io_wq->private;
1524         bio->bi_end_io = end_io_wq->end_io;
1525         kfree(end_io_wq);
1526         bio_endio(bio, error);
1527 }
1528
1529 static int cleaner_kthread(void *arg)
1530 {
1531         struct btrfs_root *root = arg;
1532
1533         do {
1534                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1535
1536                 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1537                     mutex_trylock(&root->fs_info->cleaner_mutex)) {
1538                         btrfs_run_delayed_iputs(root);
1539                         btrfs_clean_old_snapshots(root);
1540                         mutex_unlock(&root->fs_info->cleaner_mutex);
1541                         btrfs_run_defrag_inodes(root->fs_info);
1542                 }
1543
1544                 if (freezing(current)) {
1545                         refrigerator();
1546                 } else {
1547                         set_current_state(TASK_INTERRUPTIBLE);
1548                         if (!kthread_should_stop())
1549                                 schedule();
1550                         __set_current_state(TASK_RUNNING);
1551                 }
1552         } while (!kthread_should_stop());
1553         return 0;
1554 }
1555
1556 static int transaction_kthread(void *arg)
1557 {
1558         struct btrfs_root *root = arg;
1559         struct btrfs_trans_handle *trans;
1560         struct btrfs_transaction *cur;
1561         u64 transid;
1562         unsigned long now;
1563         unsigned long delay;
1564         int ret;
1565
1566         do {
1567                 delay = HZ * 30;
1568                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1569                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1570
1571                 spin_lock(&root->fs_info->trans_lock);
1572                 cur = root->fs_info->running_transaction;
1573                 if (!cur) {
1574                         spin_unlock(&root->fs_info->trans_lock);
1575                         goto sleep;
1576                 }
1577
1578                 now = get_seconds();
1579                 if (!cur->blocked &&
1580                     (now < cur->start_time || now - cur->start_time < 30)) {
1581                         spin_unlock(&root->fs_info->trans_lock);
1582                         delay = HZ * 5;
1583                         goto sleep;
1584                 }
1585                 transid = cur->transid;
1586                 spin_unlock(&root->fs_info->trans_lock);
1587
1588                 trans = btrfs_join_transaction(root);
1589                 BUG_ON(IS_ERR(trans));
1590                 if (transid == trans->transid) {
1591                         ret = btrfs_commit_transaction(trans, root);
1592                         BUG_ON(ret);
1593                 } else {
1594                         btrfs_end_transaction(trans, root);
1595                 }
1596 sleep:
1597                 wake_up_process(root->fs_info->cleaner_kthread);
1598                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1599
1600                 if (freezing(current)) {
1601                         refrigerator();
1602                 } else {
1603                         set_current_state(TASK_INTERRUPTIBLE);
1604                         if (!kthread_should_stop() &&
1605                             !btrfs_transaction_blocked(root->fs_info))
1606                                 schedule_timeout(delay);
1607                         __set_current_state(TASK_RUNNING);
1608                 }
1609         } while (!kthread_should_stop());
1610         return 0;
1611 }
1612
1613 struct btrfs_root *open_ctree(struct super_block *sb,
1614                               struct btrfs_fs_devices *fs_devices,
1615                               char *options)
1616 {
1617         u32 sectorsize;
1618         u32 nodesize;
1619         u32 leafsize;
1620         u32 blocksize;
1621         u32 stripesize;
1622         u64 generation;
1623         u64 features;
1624         struct btrfs_key location;
1625         struct buffer_head *bh;
1626         struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1627                                                  GFP_NOFS);
1628         struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1629                                                  GFP_NOFS);
1630         struct btrfs_root *tree_root = btrfs_sb(sb);
1631         struct btrfs_fs_info *fs_info = NULL;
1632         struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1633                                                 GFP_NOFS);
1634         struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1635                                               GFP_NOFS);
1636         struct btrfs_root *log_tree_root;
1637
1638         int ret;
1639         int err = -EINVAL;
1640
1641         struct btrfs_super_block *disk_super;
1642
1643         if (!extent_root || !tree_root || !tree_root->fs_info ||
1644             !chunk_root || !dev_root || !csum_root) {
1645                 err = -ENOMEM;
1646                 goto fail;
1647         }
1648         fs_info = tree_root->fs_info;
1649
1650         ret = init_srcu_struct(&fs_info->subvol_srcu);
1651         if (ret) {
1652                 err = ret;
1653                 goto fail;
1654         }
1655
1656         ret = setup_bdi(fs_info, &fs_info->bdi);
1657         if (ret) {
1658                 err = ret;
1659                 goto fail_srcu;
1660         }
1661
1662         fs_info->btree_inode = new_inode(sb);
1663         if (!fs_info->btree_inode) {
1664                 err = -ENOMEM;
1665                 goto fail_bdi;
1666         }
1667
1668         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1669
1670         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1671         INIT_LIST_HEAD(&fs_info->trans_list);
1672         INIT_LIST_HEAD(&fs_info->dead_roots);
1673         INIT_LIST_HEAD(&fs_info->delayed_iputs);
1674         INIT_LIST_HEAD(&fs_info->hashers);
1675         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1676         INIT_LIST_HEAD(&fs_info->ordered_operations);
1677         INIT_LIST_HEAD(&fs_info->caching_block_groups);
1678         spin_lock_init(&fs_info->delalloc_lock);
1679         spin_lock_init(&fs_info->trans_lock);
1680         spin_lock_init(&fs_info->ref_cache_lock);
1681         spin_lock_init(&fs_info->fs_roots_radix_lock);
1682         spin_lock_init(&fs_info->delayed_iput_lock);
1683         spin_lock_init(&fs_info->defrag_inodes_lock);
1684         mutex_init(&fs_info->reloc_mutex);
1685
1686         init_completion(&fs_info->kobj_unregister);
1687         fs_info->tree_root = tree_root;
1688         fs_info->extent_root = extent_root;
1689         fs_info->csum_root = csum_root;
1690         fs_info->chunk_root = chunk_root;
1691         fs_info->dev_root = dev_root;
1692         fs_info->fs_devices = fs_devices;
1693         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1694         INIT_LIST_HEAD(&fs_info->space_info);
1695         btrfs_mapping_init(&fs_info->mapping_tree);
1696         btrfs_init_block_rsv(&fs_info->global_block_rsv);
1697         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
1698         btrfs_init_block_rsv(&fs_info->trans_block_rsv);
1699         btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
1700         btrfs_init_block_rsv(&fs_info->empty_block_rsv);
1701         INIT_LIST_HEAD(&fs_info->durable_block_rsv_list);
1702         mutex_init(&fs_info->durable_block_rsv_mutex);
1703         atomic_set(&fs_info->nr_async_submits, 0);
1704         atomic_set(&fs_info->async_delalloc_pages, 0);
1705         atomic_set(&fs_info->async_submit_draining, 0);
1706         atomic_set(&fs_info->nr_async_bios, 0);
1707         atomic_set(&fs_info->defrag_running, 0);
1708         fs_info->sb = sb;
1709         fs_info->max_inline = 8192 * 1024;
1710         fs_info->metadata_ratio = 0;
1711         fs_info->defrag_inodes = RB_ROOT;
1712         fs_info->trans_no_join = 0;
1713
1714         fs_info->thread_pool_size = min_t(unsigned long,
1715                                           num_online_cpus() + 2, 8);
1716
1717         INIT_LIST_HEAD(&fs_info->ordered_extents);
1718         spin_lock_init(&fs_info->ordered_extent_lock);
1719         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
1720                                         GFP_NOFS);
1721         if (!fs_info->delayed_root) {
1722                 err = -ENOMEM;
1723                 goto fail_iput;
1724         }
1725         btrfs_init_delayed_root(fs_info->delayed_root);
1726
1727         mutex_init(&fs_info->scrub_lock);
1728         atomic_set(&fs_info->scrubs_running, 0);
1729         atomic_set(&fs_info->scrub_pause_req, 0);
1730         atomic_set(&fs_info->scrubs_paused, 0);
1731         atomic_set(&fs_info->scrub_cancel_req, 0);
1732         init_waitqueue_head(&fs_info->scrub_pause_wait);
1733         init_rwsem(&fs_info->scrub_super_lock);
1734         fs_info->scrub_workers_refcnt = 0;
1735
1736         sb->s_blocksize = 4096;
1737         sb->s_blocksize_bits = blksize_bits(4096);
1738         sb->s_bdi = &fs_info->bdi;
1739
1740         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1741         fs_info->btree_inode->i_nlink = 1;
1742         /*
1743          * we set the i_size on the btree inode to the max possible int.
1744          * the real end of the address space is determined by all of
1745          * the devices in the system
1746          */
1747         fs_info->btree_inode->i_size = OFFSET_MAX;
1748         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1749         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1750
1751         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
1752         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1753                              fs_info->btree_inode->i_mapping);
1754         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
1755
1756         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1757
1758         BTRFS_I(fs_info->btree_inode)->root = tree_root;
1759         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1760                sizeof(struct btrfs_key));
1761         BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
1762         insert_inode_hash(fs_info->btree_inode);
1763
1764         spin_lock_init(&fs_info->block_group_cache_lock);
1765         fs_info->block_group_cache_tree = RB_ROOT;
1766
1767         extent_io_tree_init(&fs_info->freed_extents[0],
1768                              fs_info->btree_inode->i_mapping);
1769         extent_io_tree_init(&fs_info->freed_extents[1],
1770                              fs_info->btree_inode->i_mapping);
1771         fs_info->pinned_extents = &fs_info->freed_extents[0];
1772         fs_info->do_barriers = 1;
1773
1774
1775         mutex_init(&fs_info->ordered_operations_mutex);
1776         mutex_init(&fs_info->tree_log_mutex);
1777         mutex_init(&fs_info->chunk_mutex);
1778         mutex_init(&fs_info->transaction_kthread_mutex);
1779         mutex_init(&fs_info->cleaner_mutex);
1780         mutex_init(&fs_info->volume_mutex);
1781         init_rwsem(&fs_info->extent_commit_sem);
1782         init_rwsem(&fs_info->cleanup_work_sem);
1783         init_rwsem(&fs_info->subvol_sem);
1784
1785         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1786         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
1787
1788         init_waitqueue_head(&fs_info->transaction_throttle);
1789         init_waitqueue_head(&fs_info->transaction_wait);
1790         init_waitqueue_head(&fs_info->transaction_blocked_wait);
1791         init_waitqueue_head(&fs_info->async_submit_wait);
1792
1793         __setup_root(4096, 4096, 4096, 4096, tree_root,
1794                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
1795
1796         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1797         if (!bh) {
1798                 err = -EINVAL;
1799                 goto fail_alloc;
1800         }
1801
1802         memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1803         memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
1804                sizeof(fs_info->super_for_commit));
1805         brelse(bh);
1806
1807         memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1808
1809         disk_super = &fs_info->super_copy;
1810         if (!btrfs_super_root(disk_super))
1811                 goto fail_alloc;
1812
1813         /* check FS state, whether FS is broken. */
1814         fs_info->fs_state |= btrfs_super_flags(disk_super);
1815
1816         btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
1817
1818         /*
1819          * In the long term, we'll store the compression type in the super
1820          * block, and it'll be used for per file compression control.
1821          */
1822         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
1823
1824         ret = btrfs_parse_options(tree_root, options);
1825         if (ret) {
1826                 err = ret;
1827                 goto fail_alloc;
1828         }
1829
1830         features = btrfs_super_incompat_flags(disk_super) &
1831                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
1832         if (features) {
1833                 printk(KERN_ERR "BTRFS: couldn't mount because of "
1834                        "unsupported optional features (%Lx).\n",
1835                        (unsigned long long)features);
1836                 err = -EINVAL;
1837                 goto fail_alloc;
1838         }
1839
1840         features = btrfs_super_incompat_flags(disk_super);
1841         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
1842         if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
1843                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
1844         btrfs_set_super_incompat_flags(disk_super, features);
1845
1846         features = btrfs_super_compat_ro_flags(disk_super) &
1847                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
1848         if (!(sb->s_flags & MS_RDONLY) && features) {
1849                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
1850                        "unsupported option features (%Lx).\n",
1851                        (unsigned long long)features);
1852                 err = -EINVAL;
1853                 goto fail_alloc;
1854         }
1855
1856         btrfs_init_workers(&fs_info->generic_worker,
1857                            "genwork", 1, NULL);
1858
1859         btrfs_init_workers(&fs_info->workers, "worker",
1860                            fs_info->thread_pool_size,
1861                            &fs_info->generic_worker);
1862
1863         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1864                            fs_info->thread_pool_size,
1865                            &fs_info->generic_worker);
1866
1867         btrfs_init_workers(&fs_info->submit_workers, "submit",
1868                            min_t(u64, fs_devices->num_devices,
1869                            fs_info->thread_pool_size),
1870                            &fs_info->generic_worker);
1871
1872         btrfs_init_workers(&fs_info->caching_workers, "cache",
1873                            2, &fs_info->generic_worker);
1874
1875         /* a higher idle thresh on the submit workers makes it much more
1876          * likely that bios will be send down in a sane order to the
1877          * devices
1878          */
1879         fs_info->submit_workers.idle_thresh = 64;
1880
1881         fs_info->workers.idle_thresh = 16;
1882         fs_info->workers.ordered = 1;
1883
1884         fs_info->delalloc_workers.idle_thresh = 2;
1885         fs_info->delalloc_workers.ordered = 1;
1886
1887         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
1888                            &fs_info->generic_worker);
1889         btrfs_init_workers(&fs_info->endio_workers, "endio",
1890                            fs_info->thread_pool_size,
1891                            &fs_info->generic_worker);
1892         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1893                            fs_info->thread_pool_size,
1894                            &fs_info->generic_worker);
1895         btrfs_init_workers(&fs_info->endio_meta_write_workers,
1896                            "endio-meta-write", fs_info->thread_pool_size,
1897                            &fs_info->generic_worker);
1898         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1899                            fs_info->thread_pool_size,
1900                            &fs_info->generic_worker);
1901         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
1902                            1, &fs_info->generic_worker);
1903         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
1904                            fs_info->thread_pool_size,
1905                            &fs_info->generic_worker);
1906
1907         /*
1908          * endios are largely parallel and should have a very
1909          * low idle thresh
1910          */
1911         fs_info->endio_workers.idle_thresh = 4;
1912         fs_info->endio_meta_workers.idle_thresh = 4;
1913
1914         fs_info->endio_write_workers.idle_thresh = 2;
1915         fs_info->endio_meta_write_workers.idle_thresh = 2;
1916
1917         btrfs_start_workers(&fs_info->workers, 1);
1918         btrfs_start_workers(&fs_info->generic_worker, 1);
1919         btrfs_start_workers(&fs_info->submit_workers, 1);
1920         btrfs_start_workers(&fs_info->delalloc_workers, 1);
1921         btrfs_start_workers(&fs_info->fixup_workers, 1);
1922         btrfs_start_workers(&fs_info->endio_workers, 1);
1923         btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1924         btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1925         btrfs_start_workers(&fs_info->endio_write_workers, 1);
1926         btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
1927         btrfs_start_workers(&fs_info->delayed_workers, 1);
1928         btrfs_start_workers(&fs_info->caching_workers, 1);
1929
1930         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1931         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
1932                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1933
1934         nodesize = btrfs_super_nodesize(disk_super);
1935         leafsize = btrfs_super_leafsize(disk_super);
1936         sectorsize = btrfs_super_sectorsize(disk_super);
1937         stripesize = btrfs_super_stripesize(disk_super);
1938         tree_root->nodesize = nodesize;
1939         tree_root->leafsize = leafsize;
1940         tree_root->sectorsize = sectorsize;
1941         tree_root->stripesize = stripesize;
1942
1943         sb->s_blocksize = sectorsize;
1944         sb->s_blocksize_bits = blksize_bits(sectorsize);
1945
1946         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1947                     sizeof(disk_super->magic))) {
1948                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1949                 goto fail_sb_buffer;
1950         }
1951
1952         mutex_lock(&fs_info->chunk_mutex);
1953         ret = btrfs_read_sys_array(tree_root);
1954         mutex_unlock(&fs_info->chunk_mutex);
1955         if (ret) {
1956                 printk(KERN_WARNING "btrfs: failed to read the system "
1957                        "array on %s\n", sb->s_id);
1958                 goto fail_sb_buffer;
1959         }
1960
1961         blocksize = btrfs_level_size(tree_root,
1962                                      btrfs_super_chunk_root_level(disk_super));
1963         generation = btrfs_super_chunk_root_generation(disk_super);
1964
1965         __setup_root(nodesize, leafsize, sectorsize, stripesize,
1966                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1967
1968         chunk_root->node = read_tree_block(chunk_root,
1969                                            btrfs_super_chunk_root(disk_super),
1970                                            blocksize, generation);
1971         BUG_ON(!chunk_root->node);
1972         if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
1973                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
1974                        sb->s_id);
1975                 goto fail_chunk_root;
1976         }
1977         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
1978         chunk_root->commit_root = btrfs_root_node(chunk_root);
1979
1980         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1981            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1982            BTRFS_UUID_SIZE);
1983
1984         mutex_lock(&fs_info->chunk_mutex);
1985         ret = btrfs_read_chunk_tree(chunk_root);
1986         mutex_unlock(&fs_info->chunk_mutex);
1987         if (ret) {
1988                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1989                        sb->s_id);
1990                 goto fail_chunk_root;
1991         }
1992
1993         btrfs_close_extra_devices(fs_devices);
1994
1995         blocksize = btrfs_level_size(tree_root,
1996                                      btrfs_super_root_level(disk_super));
1997         generation = btrfs_super_generation(disk_super);
1998
1999         tree_root->node = read_tree_block(tree_root,
2000                                           btrfs_super_root(disk_super),
2001                                           blocksize, generation);
2002         if (!tree_root->node)
2003                 goto fail_chunk_root;
2004         if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2005                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2006                        sb->s_id);
2007                 goto fail_tree_root;
2008         }
2009         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2010         tree_root->commit_root = btrfs_root_node(tree_root);
2011
2012         ret = find_and_setup_root(tree_root, fs_info,
2013                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
2014         if (ret)
2015                 goto fail_tree_root;
2016         extent_root->track_dirty = 1;
2017
2018         ret = find_and_setup_root(tree_root, fs_info,
2019                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
2020         if (ret)
2021                 goto fail_extent_root;
2022         dev_root->track_dirty = 1;
2023
2024         ret = find_and_setup_root(tree_root, fs_info,
2025                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
2026         if (ret)
2027                 goto fail_dev_root;
2028
2029         csum_root->track_dirty = 1;
2030
2031         fs_info->generation = generation;
2032         fs_info->last_trans_committed = generation;
2033         fs_info->data_alloc_profile = (u64)-1;
2034         fs_info->metadata_alloc_profile = (u64)-1;
2035         fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
2036
2037         ret = btrfs_init_space_info(fs_info);
2038         if (ret) {
2039                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2040                 goto fail_block_groups;
2041         }
2042
2043         ret = btrfs_read_block_groups(extent_root);
2044         if (ret) {
2045                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2046                 goto fail_block_groups;
2047         }
2048
2049         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2050                                                "btrfs-cleaner");
2051         if (IS_ERR(fs_info->cleaner_kthread))
2052                 goto fail_block_groups;
2053
2054         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2055                                                    tree_root,
2056                                                    "btrfs-transaction");
2057         if (IS_ERR(fs_info->transaction_kthread))
2058                 goto fail_cleaner;
2059
2060         if (!btrfs_test_opt(tree_root, SSD) &&
2061             !btrfs_test_opt(tree_root, NOSSD) &&
2062             !fs_info->fs_devices->rotating) {
2063                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2064                        "mode\n");
2065                 btrfs_set_opt(fs_info->mount_opt, SSD);
2066         }
2067
2068         /* do not make disk changes in broken FS */
2069         if (btrfs_super_log_root(disk_super) != 0 &&
2070             !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
2071                 u64 bytenr = btrfs_super_log_root(disk_super);
2072
2073                 if (fs_devices->rw_devices == 0) {
2074                         printk(KERN_WARNING "Btrfs log replay required "
2075                                "on RO media\n");
2076                         err = -EIO;
2077                         goto fail_trans_kthread;
2078                 }
2079                 blocksize =
2080                      btrfs_level_size(tree_root,
2081                                       btrfs_super_log_root_level(disk_super));
2082
2083                 log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
2084                 if (!log_tree_root) {
2085                         err = -ENOMEM;
2086                         goto fail_trans_kthread;
2087                 }
2088
2089                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2090                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2091
2092                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2093                                                       blocksize,
2094                                                       generation + 1);
2095                 ret = btrfs_recover_log_trees(log_tree_root);
2096                 BUG_ON(ret);
2097
2098                 if (sb->s_flags & MS_RDONLY) {
2099                         ret =  btrfs_commit_super(tree_root);
2100                         BUG_ON(ret);
2101                 }
2102         }
2103
2104         ret = btrfs_find_orphan_roots(tree_root);
2105         BUG_ON(ret);
2106
2107         if (!(sb->s_flags & MS_RDONLY)) {
2108                 ret = btrfs_cleanup_fs_roots(fs_info);
2109                 BUG_ON(ret);
2110
2111                 ret = btrfs_recover_relocation(tree_root);
2112                 if (ret < 0) {
2113                         printk(KERN_WARNING
2114                                "btrfs: failed to recover relocation\n");
2115                         err = -EINVAL;
2116                         goto fail_trans_kthread;
2117                 }
2118         }
2119
2120         location.objectid = BTRFS_FS_TREE_OBJECTID;
2121         location.type = BTRFS_ROOT_ITEM_KEY;
2122         location.offset = (u64)-1;
2123
2124         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2125         if (!fs_info->fs_root)
2126                 goto fail_trans_kthread;
2127         if (IS_ERR(fs_info->fs_root)) {
2128                 err = PTR_ERR(fs_info->fs_root);
2129                 goto fail_trans_kthread;
2130         }
2131
2132         if (!(sb->s_flags & MS_RDONLY)) {
2133                 down_read(&fs_info->cleanup_work_sem);
2134                 err = btrfs_orphan_cleanup(fs_info->fs_root);
2135                 if (!err)
2136                         err = btrfs_orphan_cleanup(fs_info->tree_root);
2137                 up_read(&fs_info->cleanup_work_sem);
2138                 if (err) {
2139                         close_ctree(tree_root);
2140                         return ERR_PTR(err);
2141                 }
2142         }
2143
2144         return tree_root;
2145
2146 fail_trans_kthread:
2147         kthread_stop(fs_info->transaction_kthread);
2148 fail_cleaner:
2149         kthread_stop(fs_info->cleaner_kthread);
2150
2151         /*
2152          * make sure we're done with the btree inode before we stop our
2153          * kthreads
2154          */
2155         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2156         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2157
2158 fail_block_groups:
2159         btrfs_free_block_groups(fs_info);
2160         free_extent_buffer(csum_root->node);
2161         free_extent_buffer(csum_root->commit_root);
2162 fail_dev_root:
2163         free_extent_buffer(dev_root->node);
2164         free_extent_buffer(dev_root->commit_root);
2165 fail_extent_root:
2166         free_extent_buffer(extent_root->node);
2167         free_extent_buffer(extent_root->commit_root);
2168 fail_tree_root:
2169         free_extent_buffer(tree_root->node);
2170         free_extent_buffer(tree_root->commit_root);
2171 fail_chunk_root:
2172         free_extent_buffer(chunk_root->node);
2173         free_extent_buffer(chunk_root->commit_root);
2174 fail_sb_buffer:
2175         btrfs_stop_workers(&fs_info->generic_worker);
2176         btrfs_stop_workers(&fs_info->fixup_workers);
2177         btrfs_stop_workers(&fs_info->delalloc_workers);
2178         btrfs_stop_workers(&fs_info->workers);
2179         btrfs_stop_workers(&fs_info->endio_workers);
2180         btrfs_stop_workers(&fs_info->endio_meta_workers);
2181         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2182         btrfs_stop_workers(&fs_info->endio_write_workers);
2183         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2184         btrfs_stop_workers(&fs_info->submit_workers);
2185         btrfs_stop_workers(&fs_info->delayed_workers);
2186         btrfs_stop_workers(&fs_info->caching_workers);
2187 fail_alloc:
2188         kfree(fs_info->delayed_root);
2189 fail_iput:
2190         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2191         iput(fs_info->btree_inode);
2192
2193         btrfs_close_devices(fs_info->fs_devices);
2194         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2195 fail_bdi:
2196         bdi_destroy(&fs_info->bdi);
2197 fail_srcu:
2198         cleanup_srcu_struct(&fs_info->subvol_srcu);
2199 fail:
2200         kfree(extent_root);
2201         kfree(tree_root);
2202         kfree(fs_info);
2203         kfree(chunk_root);
2204         kfree(dev_root);
2205         kfree(csum_root);
2206         return ERR_PTR(err);
2207 }
2208
2209 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2210 {
2211         char b[BDEVNAME_SIZE];
2212
2213         if (uptodate) {
2214                 set_buffer_uptodate(bh);
2215         } else {
2216                 printk_ratelimited(KERN_WARNING "lost page write due to "
2217                                         "I/O error on %s\n",
2218                                        bdevname(bh->b_bdev, b));
2219                 /* note, we dont' set_buffer_write_io_error because we have
2220                  * our own ways of dealing with the IO errors
2221                  */
2222                 clear_buffer_uptodate(bh);
2223         }
2224         unlock_buffer(bh);
2225         put_bh(bh);
2226 }
2227
2228 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2229 {
2230         struct buffer_head *bh;
2231         struct buffer_head *latest = NULL;
2232         struct btrfs_super_block *super;
2233         int i;
2234         u64 transid = 0;
2235         u64 bytenr;
2236
2237         /* we would like to check all the supers, but that would make
2238          * a btrfs mount succeed after a mkfs from a different FS.
2239          * So, we need to add a special mount option to scan for
2240          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2241          */
2242         for (i = 0; i < 1; i++) {
2243                 bytenr = btrfs_sb_offset(i);
2244                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2245                         break;
2246                 bh = __bread(bdev, bytenr / 4096, 4096);
2247                 if (!bh)
2248                         continue;
2249
2250                 super = (struct btrfs_super_block *)bh->b_data;
2251                 if (btrfs_super_bytenr(super) != bytenr ||
2252                     strncmp((char *)(&super->magic), BTRFS_MAGIC,
2253                             sizeof(super->magic))) {
2254                         brelse(bh);
2255                         continue;
2256                 }
2257
2258                 if (!latest || btrfs_super_generation(super) > transid) {
2259                         brelse(latest);
2260                         latest = bh;
2261                         transid = btrfs_super_generation(super);
2262                 } else {
2263                         brelse(bh);
2264                 }
2265         }
2266         return latest;
2267 }
2268
2269 /*
2270  * this should be called twice, once with wait == 0 and
2271  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2272  * we write are pinned.
2273  *
2274  * They are released when wait == 1 is done.
2275  * max_mirrors must be the same for both runs, and it indicates how
2276  * many supers on this one device should be written.
2277  *
2278  * max_mirrors == 0 means to write them all.
2279  */
2280 static int write_dev_supers(struct btrfs_device *device,
2281                             struct btrfs_super_block *sb,
2282                             int do_barriers, int wait, int max_mirrors)
2283 {
2284         struct buffer_head *bh;
2285         int i;
2286         int ret;
2287         int errors = 0;
2288         u32 crc;
2289         u64 bytenr;
2290         int last_barrier = 0;
2291
2292         if (max_mirrors == 0)
2293                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2294
2295         /* make sure only the last submit_bh does a barrier */
2296         if (do_barriers) {
2297                 for (i = 0; i < max_mirrors; i++) {
2298                         bytenr = btrfs_sb_offset(i);
2299                         if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2300                             device->total_bytes)
2301                                 break;
2302                         last_barrier = i;
2303                 }
2304         }
2305
2306         for (i = 0; i < max_mirrors; i++) {
2307                 bytenr = btrfs_sb_offset(i);
2308                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2309                         break;
2310
2311                 if (wait) {
2312                         bh = __find_get_block(device->bdev, bytenr / 4096,
2313                                               BTRFS_SUPER_INFO_SIZE);
2314                         BUG_ON(!bh);
2315                         wait_on_buffer(bh);
2316                         if (!buffer_uptodate(bh))
2317                                 errors++;
2318
2319                         /* drop our reference */
2320                         brelse(bh);
2321
2322                         /* drop the reference from the wait == 0 run */
2323                         brelse(bh);
2324                         continue;
2325                 } else {
2326                         btrfs_set_super_bytenr(sb, bytenr);
2327
2328                         crc = ~(u32)0;
2329                         crc = btrfs_csum_data(NULL, (char *)sb +
2330                                               BTRFS_CSUM_SIZE, crc,
2331                                               BTRFS_SUPER_INFO_SIZE -
2332                                               BTRFS_CSUM_SIZE);
2333                         btrfs_csum_final(crc, sb->csum);
2334
2335                         /*
2336                          * one reference for us, and we leave it for the
2337                          * caller
2338                          */
2339                         bh = __getblk(device->bdev, bytenr / 4096,
2340                                       BTRFS_SUPER_INFO_SIZE);
2341                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2342
2343                         /* one reference for submit_bh */
2344                         get_bh(bh);
2345
2346                         set_buffer_uptodate(bh);
2347                         lock_buffer(bh);
2348                         bh->b_end_io = btrfs_end_buffer_write_sync;
2349                 }
2350
2351                 if (i == last_barrier && do_barriers)
2352                         ret = submit_bh(WRITE_FLUSH_FUA, bh);
2353                 else
2354                         ret = submit_bh(WRITE_SYNC, bh);
2355
2356                 if (ret)
2357                         errors++;
2358         }
2359         return errors < i ? 0 : -1;
2360 }
2361
2362 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2363 {
2364         struct list_head *head;
2365         struct btrfs_device *dev;
2366         struct btrfs_super_block *sb;
2367         struct btrfs_dev_item *dev_item;
2368         int ret;
2369         int do_barriers;
2370         int max_errors;
2371         int total_errors = 0;
2372         u64 flags;
2373
2374         max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
2375         do_barriers = !btrfs_test_opt(root, NOBARRIER);
2376
2377         sb = &root->fs_info->super_for_commit;
2378         dev_item = &sb->dev_item;
2379
2380         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2381         head = &root->fs_info->fs_devices->devices;
2382         list_for_each_entry_rcu(dev, head, dev_list) {
2383                 if (!dev->bdev) {
2384                         total_errors++;
2385                         continue;
2386                 }
2387                 if (!dev->in_fs_metadata || !dev->writeable)
2388                         continue;
2389
2390                 btrfs_set_stack_device_generation(dev_item, 0);
2391                 btrfs_set_stack_device_type(dev_item, dev->type);
2392                 btrfs_set_stack_device_id(dev_item, dev->devid);
2393                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2394                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2395                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2396                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2397                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2398                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2399                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2400
2401                 flags = btrfs_super_flags(sb);
2402                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2403
2404                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2405                 if (ret)
2406                         total_errors++;
2407         }
2408         if (total_errors > max_errors) {
2409                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2410                        total_errors);
2411                 BUG();
2412         }
2413
2414         total_errors = 0;
2415         list_for_each_entry_rcu(dev, head, dev_list) {
2416                 if (!dev->bdev)
2417                         continue;
2418                 if (!dev->in_fs_metadata || !dev->writeable)
2419                         continue;
2420
2421                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2422                 if (ret)
2423                         total_errors++;
2424         }
2425         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2426         if (total_errors > max_errors) {
2427                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2428                        total_errors);
2429                 BUG();
2430         }
2431         return 0;
2432 }
2433
2434 int write_ctree_super(struct btrfs_trans_handle *trans,
2435                       struct btrfs_root *root, int max_mirrors)
2436 {
2437         int ret;
2438
2439         ret = write_all_supers(root, max_mirrors);
2440         return ret;
2441 }
2442
2443 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2444 {
2445         spin_lock(&fs_info->fs_roots_radix_lock);
2446         radix_tree_delete(&fs_info->fs_roots_radix,
2447                           (unsigned long)root->root_key.objectid);
2448         spin_unlock(&fs_info->fs_roots_radix_lock);
2449
2450         if (btrfs_root_refs(&root->root_item) == 0)
2451                 synchronize_srcu(&fs_info->subvol_srcu);
2452
2453         __btrfs_remove_free_space_cache(root->free_ino_pinned);
2454         __btrfs_remove_free_space_cache(root->free_ino_ctl);
2455         free_fs_root(root);
2456         return 0;
2457 }
2458
2459 static void free_fs_root(struct btrfs_root *root)
2460 {
2461         iput(root->cache_inode);
2462         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2463         if (root->anon_dev)
2464                 free_anon_bdev(root->anon_dev);
2465         free_extent_buffer(root->node);
2466         free_extent_buffer(root->commit_root);
2467         kfree(root->free_ino_ctl);
2468         kfree(root->free_ino_pinned);
2469         kfree(root->name);
2470         kfree(root);
2471 }
2472
2473 static int del_fs_roots(struct btrfs_fs_info *fs_info)
2474 {
2475         int ret;
2476         struct btrfs_root *gang[8];
2477         int i;
2478
2479         while (!list_empty(&fs_info->dead_roots)) {
2480                 gang[0] = list_entry(fs_info->dead_roots.next,
2481                                      struct btrfs_root, root_list);
2482                 list_del(&gang[0]->root_list);
2483
2484                 if (gang[0]->in_radix) {
2485                         btrfs_free_fs_root(fs_info, gang[0]);
2486                 } else {
2487                         free_extent_buffer(gang[0]->node);
2488                         free_extent_buffer(gang[0]->commit_root);
2489                         kfree(gang[0]);
2490                 }
2491         }
2492
2493         while (1) {
2494                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2495                                              (void **)gang, 0,
2496                                              ARRAY_SIZE(gang));
2497                 if (!ret)
2498                         break;
2499                 for (i = 0; i < ret; i++)
2500                         btrfs_free_fs_root(fs_info, gang[i]);
2501         }
2502         return 0;
2503 }
2504
2505 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2506 {
2507         u64 root_objectid = 0;
2508         struct btrfs_root *gang[8];
2509         int i;
2510         int ret;
2511
2512         while (1) {
2513                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2514                                              (void **)gang, root_objectid,
2515                                              ARRAY_SIZE(gang));
2516                 if (!ret)
2517                         break;
2518
2519                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
2520                 for (i = 0; i < ret; i++) {
2521                         int err;
2522
2523                         root_objectid = gang[i]->root_key.objectid;
2524                         err = btrfs_orphan_cleanup(gang[i]);
2525                         if (err)
2526                                 return err;
2527                 }
2528                 root_objectid++;
2529         }
2530         return 0;
2531 }
2532
2533 int btrfs_commit_super(struct btrfs_root *root)
2534 {
2535         struct btrfs_trans_handle *trans;
2536         int ret;
2537
2538         mutex_lock(&root->fs_info->cleaner_mutex);
2539         btrfs_run_delayed_iputs(root);
2540         btrfs_clean_old_snapshots(root);
2541         mutex_unlock(&root->fs_info->cleaner_mutex);
2542
2543         /* wait until ongoing cleanup work done */
2544         down_write(&root->fs_info->cleanup_work_sem);
2545         up_write(&root->fs_info->cleanup_work_sem);
2546
2547         trans = btrfs_join_transaction(root);
2548         if (IS_ERR(trans))
2549                 return PTR_ERR(trans);
2550         ret = btrfs_commit_transaction(trans, root);
2551         BUG_ON(ret);
2552         /* run commit again to drop the original snapshot */
2553         trans = btrfs_join_transaction(root);
2554         if (IS_ERR(trans))
2555                 return PTR_ERR(trans);
2556         btrfs_commit_transaction(trans, root);
2557         ret = btrfs_write_and_wait_transaction(NULL, root);
2558         BUG_ON(ret);
2559
2560         ret = write_ctree_super(NULL, root, 0);
2561         return ret;
2562 }
2563
2564 int close_ctree(struct btrfs_root *root)
2565 {
2566         struct btrfs_fs_info *fs_info = root->fs_info;
2567         int ret;
2568
2569         fs_info->closing = 1;
2570         smp_mb();
2571
2572         btrfs_scrub_cancel(root);
2573
2574         /* wait for any defraggers to finish */
2575         wait_event(fs_info->transaction_wait,
2576                    (atomic_read(&fs_info->defrag_running) == 0));
2577
2578         /* clear out the rbtree of defraggable inodes */
2579         btrfs_run_defrag_inodes(root->fs_info);
2580
2581         btrfs_put_block_group_cache(fs_info);
2582
2583         /*
2584          * Here come 2 situations when btrfs is broken to flip readonly:
2585          *
2586          * 1. when btrfs flips readonly somewhere else before
2587          * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
2588          * and btrfs will skip to write sb directly to keep
2589          * ERROR state on disk.
2590          *
2591          * 2. when btrfs flips readonly just in btrfs_commit_super,
2592          * and in such case, btrfs cannot write sb via btrfs_commit_super,
2593          * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
2594          * btrfs will cleanup all FS resources first and write sb then.
2595          */
2596         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2597                 ret = btrfs_commit_super(root);
2598                 if (ret)
2599                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2600         }
2601
2602         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
2603                 ret = btrfs_error_commit_super(root);
2604                 if (ret)
2605                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2606         }
2607
2608         kthread_stop(root->fs_info->transaction_kthread);
2609         kthread_stop(root->fs_info->cleaner_kthread);
2610
2611         fs_info->closing = 2;
2612         smp_mb();
2613
2614         if (fs_info->delalloc_bytes) {
2615                 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2616                        (unsigned long long)fs_info->delalloc_bytes);
2617         }
2618         if (fs_info->total_ref_cache_size) {
2619                 printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2620                        (unsigned long long)fs_info->total_ref_cache_size);
2621         }
2622
2623         free_extent_buffer(fs_info->extent_root->node);
2624         free_extent_buffer(fs_info->extent_root->commit_root);
2625         free_extent_buffer(fs_info->tree_root->node);
2626         free_extent_buffer(fs_info->tree_root->commit_root);
2627         free_extent_buffer(root->fs_info->chunk_root->node);
2628         free_extent_buffer(root->fs_info->chunk_root->commit_root);
2629         free_extent_buffer(root->fs_info->dev_root->node);
2630         free_extent_buffer(root->fs_info->dev_root->commit_root);
2631         free_extent_buffer(root->fs_info->csum_root->node);
2632         free_extent_buffer(root->fs_info->csum_root->commit_root);
2633
2634         btrfs_free_block_groups(root->fs_info);
2635
2636         del_fs_roots(fs_info);
2637
2638         iput(fs_info->btree_inode);
2639         kfree(fs_info->delayed_root);
2640
2641         btrfs_stop_workers(&fs_info->generic_worker);
2642         btrfs_stop_workers(&fs_info->fixup_workers);
2643         btrfs_stop_workers(&fs_info->delalloc_workers);
2644         btrfs_stop_workers(&fs_info->workers);
2645         btrfs_stop_workers(&fs_info->endio_workers);
2646         btrfs_stop_workers(&fs_info->endio_meta_workers);
2647         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2648         btrfs_stop_workers(&fs_info->endio_write_workers);
2649         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2650         btrfs_stop_workers(&fs_info->submit_workers);
2651         btrfs_stop_workers(&fs_info->delayed_workers);
2652         btrfs_stop_workers(&fs_info->caching_workers);
2653
2654         btrfs_close_devices(fs_info->fs_devices);
2655         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2656
2657         bdi_destroy(&fs_info->bdi);
2658         cleanup_srcu_struct(&fs_info->subvol_srcu);
2659
2660         kfree(fs_info->extent_root);
2661         kfree(fs_info->tree_root);
2662         kfree(fs_info->chunk_root);
2663         kfree(fs_info->dev_root);
2664         kfree(fs_info->csum_root);
2665         kfree(fs_info);
2666
2667         return 0;
2668 }
2669
2670 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2671 {
2672         int ret;
2673         struct inode *btree_inode = buf->first_page->mapping->host;
2674
2675         ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
2676                                      NULL);
2677         if (!ret)
2678                 return ret;
2679
2680         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
2681                                     parent_transid);
2682         return !ret;
2683 }
2684
2685 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
2686 {
2687         struct inode *btree_inode = buf->first_page->mapping->host;
2688         return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2689                                           buf);
2690 }
2691
2692 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2693 {
2694         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2695         u64 transid = btrfs_header_generation(buf);
2696         struct inode *btree_inode = root->fs_info->btree_inode;
2697         int was_dirty;
2698
2699         btrfs_assert_tree_locked(buf);
2700         if (transid != root->fs_info->generation) {
2701                 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2702                        "found %llu running %llu\n",
2703                         (unsigned long long)buf->start,
2704                         (unsigned long long)transid,
2705                         (unsigned long long)root->fs_info->generation);
2706                 WARN_ON(1);
2707         }
2708         was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
2709                                             buf);
2710         if (!was_dirty) {
2711                 spin_lock(&root->fs_info->delalloc_lock);
2712                 root->fs_info->dirty_metadata_bytes += buf->len;
2713                 spin_unlock(&root->fs_info->delalloc_lock);
2714         }
2715 }
2716
2717 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2718 {
2719         /*
2720          * looks as though older kernels can get into trouble with
2721          * this code, they end up stuck in balance_dirty_pages forever
2722          */
2723         u64 num_dirty;
2724         unsigned long thresh = 32 * 1024 * 1024;
2725
2726         if (current->flags & PF_MEMALLOC)
2727                 return;
2728
2729         btrfs_balance_delayed_items(root);
2730
2731         num_dirty = root->fs_info->dirty_metadata_bytes;
2732
2733         if (num_dirty > thresh) {
2734                 balance_dirty_pages_ratelimited_nr(
2735                                    root->fs_info->btree_inode->i_mapping, 1);
2736         }
2737         return;
2738 }
2739
2740 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2741 {
2742         /*
2743          * looks as though older kernels can get into trouble with
2744          * this code, they end up stuck in balance_dirty_pages forever
2745          */
2746         u64 num_dirty;
2747         unsigned long thresh = 32 * 1024 * 1024;
2748
2749         if (current->flags & PF_MEMALLOC)
2750                 return;
2751
2752         num_dirty = root->fs_info->dirty_metadata_bytes;
2753
2754         if (num_dirty > thresh) {
2755                 balance_dirty_pages_ratelimited_nr(
2756                                    root->fs_info->btree_inode->i_mapping, 1);
2757         }
2758         return;
2759 }
2760
2761 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2762 {
2763         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2764         int ret;
2765         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2766         if (ret == 0)
2767                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2768         return ret;
2769 }
2770
2771 int btree_lock_page_hook(struct page *page)
2772 {
2773         struct inode *inode = page->mapping->host;
2774         struct btrfs_root *root = BTRFS_I(inode)->root;
2775         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2776         struct extent_buffer *eb;
2777         unsigned long len;
2778         u64 bytenr = page_offset(page);
2779
2780         if (page->private == EXTENT_PAGE_PRIVATE)
2781                 goto out;
2782
2783         len = page->private >> 2;
2784         eb = find_extent_buffer(io_tree, bytenr, len);
2785         if (!eb)
2786                 goto out;
2787
2788         btrfs_tree_lock(eb);
2789         btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2790
2791         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
2792                 spin_lock(&root->fs_info->delalloc_lock);
2793                 if (root->fs_info->dirty_metadata_bytes >= eb->len)
2794                         root->fs_info->dirty_metadata_bytes -= eb->len;
2795                 else
2796                         WARN_ON(1);
2797                 spin_unlock(&root->fs_info->delalloc_lock);
2798         }
2799
2800         btrfs_tree_unlock(eb);
2801         free_extent_buffer(eb);
2802 out:
2803         lock_page(page);
2804         return 0;
2805 }
2806
2807 static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
2808                               int read_only)
2809 {
2810         if (read_only)
2811                 return;
2812
2813         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2814                 printk(KERN_WARNING "warning: mount fs with errors, "
2815                        "running btrfsck is recommended\n");
2816 }
2817
2818 int btrfs_error_commit_super(struct btrfs_root *root)
2819 {
2820         int ret;
2821
2822         mutex_lock(&root->fs_info->cleaner_mutex);
2823         btrfs_run_delayed_iputs(root);
2824         mutex_unlock(&root->fs_info->cleaner_mutex);
2825
2826         down_write(&root->fs_info->cleanup_work_sem);
2827         up_write(&root->fs_info->cleanup_work_sem);
2828
2829         /* cleanup FS via transaction */
2830         btrfs_cleanup_transaction(root);
2831
2832         ret = write_ctree_super(NULL, root, 0);
2833
2834         return ret;
2835 }
2836
2837 static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
2838 {
2839         struct btrfs_inode *btrfs_inode;
2840         struct list_head splice;
2841
2842         INIT_LIST_HEAD(&splice);
2843
2844         mutex_lock(&root->fs_info->ordered_operations_mutex);
2845         spin_lock(&root->fs_info->ordered_extent_lock);
2846
2847         list_splice_init(&root->fs_info->ordered_operations, &splice);
2848         while (!list_empty(&splice)) {
2849                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
2850                                          ordered_operations);
2851
2852                 list_del_init(&btrfs_inode->ordered_operations);
2853
2854                 btrfs_invalidate_inodes(btrfs_inode->root);
2855         }
2856
2857         spin_unlock(&root->fs_info->ordered_extent_lock);
2858         mutex_unlock(&root->fs_info->ordered_operations_mutex);
2859
2860         return 0;
2861 }
2862
2863 static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
2864 {
2865         struct list_head splice;
2866         struct btrfs_ordered_extent *ordered;
2867         struct inode *inode;
2868
2869         INIT_LIST_HEAD(&splice);
2870
2871         spin_lock(&root->fs_info->ordered_extent_lock);
2872
2873         list_splice_init(&root->fs_info->ordered_extents, &splice);
2874         while (!list_empty(&splice)) {
2875                 ordered = list_entry(splice.next, struct btrfs_ordered_extent,
2876                                      root_extent_list);
2877
2878                 list_del_init(&ordered->root_extent_list);
2879                 atomic_inc(&ordered->refs);
2880
2881                 /* the inode may be getting freed (in sys_unlink path). */
2882                 inode = igrab(ordered->inode);
2883
2884                 spin_unlock(&root->fs_info->ordered_extent_lock);
2885                 if (inode)
2886                         iput(inode);
2887
2888                 atomic_set(&ordered->refs, 1);
2889                 btrfs_put_ordered_extent(ordered);
2890
2891                 spin_lock(&root->fs_info->ordered_extent_lock);
2892         }
2893
2894         spin_unlock(&root->fs_info->ordered_extent_lock);
2895
2896         return 0;
2897 }
2898
2899 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
2900                                       struct btrfs_root *root)
2901 {
2902         struct rb_node *node;
2903         struct btrfs_delayed_ref_root *delayed_refs;
2904         struct btrfs_delayed_ref_node *ref;
2905         int ret = 0;
2906
2907         delayed_refs = &trans->delayed_refs;
2908
2909         spin_lock(&delayed_refs->lock);
2910         if (delayed_refs->num_entries == 0) {
2911                 spin_unlock(&delayed_refs->lock);
2912                 printk(KERN_INFO "delayed_refs has NO entry\n");
2913                 return ret;
2914         }
2915
2916         node = rb_first(&delayed_refs->root);
2917         while (node) {
2918                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2919                 node = rb_next(node);
2920
2921                 ref->in_tree = 0;
2922                 rb_erase(&ref->rb_node, &delayed_refs->root);
2923                 delayed_refs->num_entries--;
2924
2925                 atomic_set(&ref->refs, 1);
2926                 if (btrfs_delayed_ref_is_head(ref)) {
2927                         struct btrfs_delayed_ref_head *head;
2928
2929                         head = btrfs_delayed_node_to_head(ref);
2930                         mutex_lock(&head->mutex);
2931                         kfree(head->extent_op);
2932                         delayed_refs->num_heads--;
2933                         if (list_empty(&head->cluster))
2934                                 delayed_refs->num_heads_ready--;
2935                         list_del_init(&head->cluster);
2936                         mutex_unlock(&head->mutex);
2937                 }
2938
2939                 spin_unlock(&delayed_refs->lock);
2940                 btrfs_put_delayed_ref(ref);
2941
2942                 cond_resched();
2943                 spin_lock(&delayed_refs->lock);
2944         }
2945
2946         spin_unlock(&delayed_refs->lock);
2947
2948         return ret;
2949 }
2950
2951 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
2952 {
2953         struct btrfs_pending_snapshot *snapshot;
2954         struct list_head splice;
2955
2956         INIT_LIST_HEAD(&splice);
2957
2958         list_splice_init(&t->pending_snapshots, &splice);
2959
2960         while (!list_empty(&splice)) {
2961                 snapshot = list_entry(splice.next,
2962                                       struct btrfs_pending_snapshot,
2963                                       list);
2964
2965                 list_del_init(&snapshot->list);
2966
2967                 kfree(snapshot);
2968         }
2969
2970         return 0;
2971 }
2972
2973 static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
2974 {
2975         struct btrfs_inode *btrfs_inode;
2976         struct list_head splice;
2977
2978         INIT_LIST_HEAD(&splice);
2979
2980         spin_lock(&root->fs_info->delalloc_lock);
2981         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
2982
2983         while (!list_empty(&splice)) {
2984                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
2985                                     delalloc_inodes);
2986
2987                 list_del_init(&btrfs_inode->delalloc_inodes);
2988
2989                 btrfs_invalidate_inodes(btrfs_inode->root);
2990         }
2991
2992         spin_unlock(&root->fs_info->delalloc_lock);
2993
2994         return 0;
2995 }
2996
2997 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
2998                                         struct extent_io_tree *dirty_pages,
2999                                         int mark)
3000 {
3001         int ret;
3002         struct page *page;
3003         struct inode *btree_inode = root->fs_info->btree_inode;
3004         struct extent_buffer *eb;
3005         u64 start = 0;
3006         u64 end;
3007         u64 offset;
3008         unsigned long index;
3009
3010         while (1) {
3011                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3012                                             mark);
3013                 if (ret)
3014                         break;
3015
3016                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3017                 while (start <= end) {
3018                         index = start >> PAGE_CACHE_SHIFT;
3019                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
3020                         page = find_get_page(btree_inode->i_mapping, index);
3021                         if (!page)
3022                                 continue;
3023                         offset = page_offset(page);
3024
3025                         spin_lock(&dirty_pages->buffer_lock);
3026                         eb = radix_tree_lookup(
3027                              &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
3028                                                offset >> PAGE_CACHE_SHIFT);
3029                         spin_unlock(&dirty_pages->buffer_lock);
3030                         if (eb) {
3031                                 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3032                                                          &eb->bflags);
3033                                 atomic_set(&eb->refs, 1);
3034                         }
3035                         if (PageWriteback(page))
3036                                 end_page_writeback(page);
3037
3038                         lock_page(page);
3039                         if (PageDirty(page)) {
3040                                 clear_page_dirty_for_io(page);
3041                                 spin_lock_irq(&page->mapping->tree_lock);
3042                                 radix_tree_tag_clear(&page->mapping->page_tree,
3043                                                         page_index(page),
3044                                                         PAGECACHE_TAG_DIRTY);
3045                                 spin_unlock_irq(&page->mapping->tree_lock);
3046                         }
3047
3048                         page->mapping->a_ops->invalidatepage(page, 0);
3049                         unlock_page(page);
3050                 }
3051         }
3052
3053         return ret;
3054 }
3055
3056 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3057                                        struct extent_io_tree *pinned_extents)
3058 {
3059         struct extent_io_tree *unpin;
3060         u64 start;
3061         u64 end;
3062         int ret;
3063
3064         unpin = pinned_extents;
3065         while (1) {
3066                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3067                                             EXTENT_DIRTY);
3068                 if (ret)
3069                         break;
3070
3071                 /* opt_discard */
3072                 if (btrfs_test_opt(root, DISCARD))
3073                         ret = btrfs_error_discard_extent(root, start,
3074                                                          end + 1 - start,
3075                                                          NULL);
3076
3077                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3078                 btrfs_error_unpin_extent_range(root, start, end);
3079                 cond_resched();
3080         }
3081
3082         return 0;
3083 }
3084
3085 static int btrfs_cleanup_transaction(struct btrfs_root *root)
3086 {
3087         struct btrfs_transaction *t;
3088         LIST_HEAD(list);
3089
3090         WARN_ON(1);
3091
3092         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3093
3094         spin_lock(&root->fs_info->trans_lock);
3095         list_splice_init(&root->fs_info->trans_list, &list);
3096         root->fs_info->trans_no_join = 1;
3097         spin_unlock(&root->fs_info->trans_lock);
3098
3099         while (!list_empty(&list)) {
3100                 t = list_entry(list.next, struct btrfs_transaction, list);
3101                 if (!t)
3102                         break;
3103
3104                 btrfs_destroy_ordered_operations(root);
3105
3106                 btrfs_destroy_ordered_extents(root);
3107
3108                 btrfs_destroy_delayed_refs(t, root);
3109
3110                 btrfs_block_rsv_release(root,
3111                                         &root->fs_info->trans_block_rsv,
3112                                         t->dirty_pages.dirty_bytes);
3113
3114                 /* FIXME: cleanup wait for commit */
3115                 t->in_commit = 1;
3116                 t->blocked = 1;
3117                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3118                         wake_up(&root->fs_info->transaction_blocked_wait);
3119
3120                 t->blocked = 0;
3121                 if (waitqueue_active(&root->fs_info->transaction_wait))
3122                         wake_up(&root->fs_info->transaction_wait);
3123
3124                 t->commit_done = 1;
3125                 if (waitqueue_active(&t->commit_wait))
3126                         wake_up(&t->commit_wait);
3127
3128                 btrfs_destroy_pending_snapshots(t);
3129
3130                 btrfs_destroy_delalloc_inodes(root);
3131
3132                 spin_lock(&root->fs_info->trans_lock);
3133                 root->fs_info->running_transaction = NULL;
3134                 spin_unlock(&root->fs_info->trans_lock);
3135
3136                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3137                                              EXTENT_DIRTY);
3138
3139                 btrfs_destroy_pinned_extent(root,
3140                                             root->fs_info->pinned_extents);
3141
3142                 atomic_set(&t->use_count, 0);
3143                 list_del_init(&t->list);
3144                 memset(t, 0, sizeof(*t));
3145                 kmem_cache_free(btrfs_transaction_cachep, t);
3146         }
3147
3148         spin_lock(&root->fs_info->trans_lock);
3149         root->fs_info->trans_no_join = 0;
3150         spin_unlock(&root->fs_info->trans_lock);
3151         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3152
3153         return 0;
3154 }
3155
3156 static struct extent_io_ops btree_extent_io_ops = {
3157         .write_cache_pages_lock_hook = btree_lock_page_hook,
3158         .readpage_end_io_hook = btree_readpage_end_io_hook,
3159         .submit_bio_hook = btree_submit_bio_hook,
3160         /* note we're sharing with inode.c for the merge bio hook */
3161         .merge_bio_hook = btrfs_merge_bio_hook,
3162 };