]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/extent-tree.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[karo-tx-linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include "compat.h"
26 #include "hash.h"
27 #include "ctree.h"
28 #include "disk-io.h"
29 #include "print-tree.h"
30 #include "transaction.h"
31 #include "volumes.h"
32 #include "locking.h"
33 #include "free-space-cache.h"
34
35 static int update_block_group(struct btrfs_trans_handle *trans,
36                               struct btrfs_root *root,
37                               u64 bytenr, u64 num_bytes, int alloc,
38                               int mark_free);
39 static int update_reserved_extents(struct btrfs_block_group_cache *cache,
40                                    u64 num_bytes, int reserve);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42                                 struct btrfs_root *root,
43                                 u64 bytenr, u64 num_bytes, u64 parent,
44                                 u64 root_objectid, u64 owner_objectid,
45                                 u64 owner_offset, int refs_to_drop,
46                                 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48                                     struct extent_buffer *leaf,
49                                     struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51                                       struct btrfs_root *root,
52                                       u64 parent, u64 root_objectid,
53                                       u64 flags, u64 owner, u64 offset,
54                                       struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56                                      struct btrfs_root *root,
57                                      u64 parent, u64 root_objectid,
58                                      u64 flags, struct btrfs_disk_key *key,
59                                      int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61                           struct btrfs_root *extent_root, u64 alloc_bytes,
62                           u64 flags, int force);
63 static int pin_down_bytes(struct btrfs_trans_handle *trans,
64                           struct btrfs_root *root,
65                           struct btrfs_path *path,
66                           u64 bytenr, u64 num_bytes,
67                           int is_data, int reserved,
68                           struct extent_buffer **must_clean);
69 static int find_next_key(struct btrfs_path *path, int level,
70                          struct btrfs_key *key);
71
72 static noinline int
73 block_group_cache_done(struct btrfs_block_group_cache *cache)
74 {
75         smp_mb();
76         return cache->cached == BTRFS_CACHE_FINISHED;
77 }
78
79 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
80 {
81         return (cache->flags & bits) == bits;
82 }
83
84 /*
85  * this adds the block group to the fs_info rb tree for the block group
86  * cache
87  */
88 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
89                                 struct btrfs_block_group_cache *block_group)
90 {
91         struct rb_node **p;
92         struct rb_node *parent = NULL;
93         struct btrfs_block_group_cache *cache;
94
95         spin_lock(&info->block_group_cache_lock);
96         p = &info->block_group_cache_tree.rb_node;
97
98         while (*p) {
99                 parent = *p;
100                 cache = rb_entry(parent, struct btrfs_block_group_cache,
101                                  cache_node);
102                 if (block_group->key.objectid < cache->key.objectid) {
103                         p = &(*p)->rb_left;
104                 } else if (block_group->key.objectid > cache->key.objectid) {
105                         p = &(*p)->rb_right;
106                 } else {
107                         spin_unlock(&info->block_group_cache_lock);
108                         return -EEXIST;
109                 }
110         }
111
112         rb_link_node(&block_group->cache_node, parent, p);
113         rb_insert_color(&block_group->cache_node,
114                         &info->block_group_cache_tree);
115         spin_unlock(&info->block_group_cache_lock);
116
117         return 0;
118 }
119
120 /*
121  * This will return the block group at or after bytenr if contains is 0, else
122  * it will return the block group that contains the bytenr
123  */
124 static struct btrfs_block_group_cache *
125 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
126                               int contains)
127 {
128         struct btrfs_block_group_cache *cache, *ret = NULL;
129         struct rb_node *n;
130         u64 end, start;
131
132         spin_lock(&info->block_group_cache_lock);
133         n = info->block_group_cache_tree.rb_node;
134
135         while (n) {
136                 cache = rb_entry(n, struct btrfs_block_group_cache,
137                                  cache_node);
138                 end = cache->key.objectid + cache->key.offset - 1;
139                 start = cache->key.objectid;
140
141                 if (bytenr < start) {
142                         if (!contains && (!ret || start < ret->key.objectid))
143                                 ret = cache;
144                         n = n->rb_left;
145                 } else if (bytenr > start) {
146                         if (contains && bytenr <= end) {
147                                 ret = cache;
148                                 break;
149                         }
150                         n = n->rb_right;
151                 } else {
152                         ret = cache;
153                         break;
154                 }
155         }
156         if (ret)
157                 atomic_inc(&ret->count);
158         spin_unlock(&info->block_group_cache_lock);
159
160         return ret;
161 }
162
163 static int add_excluded_extent(struct btrfs_root *root,
164                                u64 start, u64 num_bytes)
165 {
166         u64 end = start + num_bytes - 1;
167         set_extent_bits(&root->fs_info->freed_extents[0],
168                         start, end, EXTENT_UPTODATE, GFP_NOFS);
169         set_extent_bits(&root->fs_info->freed_extents[1],
170                         start, end, EXTENT_UPTODATE, GFP_NOFS);
171         return 0;
172 }
173
174 static void free_excluded_extents(struct btrfs_root *root,
175                                   struct btrfs_block_group_cache *cache)
176 {
177         u64 start, end;
178
179         start = cache->key.objectid;
180         end = start + cache->key.offset - 1;
181
182         clear_extent_bits(&root->fs_info->freed_extents[0],
183                           start, end, EXTENT_UPTODATE, GFP_NOFS);
184         clear_extent_bits(&root->fs_info->freed_extents[1],
185                           start, end, EXTENT_UPTODATE, GFP_NOFS);
186 }
187
188 static int exclude_super_stripes(struct btrfs_root *root,
189                                  struct btrfs_block_group_cache *cache)
190 {
191         u64 bytenr;
192         u64 *logical;
193         int stripe_len;
194         int i, nr, ret;
195
196         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
197                 bytenr = btrfs_sb_offset(i);
198                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
199                                        cache->key.objectid, bytenr,
200                                        0, &logical, &nr, &stripe_len);
201                 BUG_ON(ret);
202
203                 while (nr--) {
204                         cache->bytes_super += stripe_len;
205                         ret = add_excluded_extent(root, logical[nr],
206                                                   stripe_len);
207                         BUG_ON(ret);
208                 }
209
210                 kfree(logical);
211         }
212         return 0;
213 }
214
215 static struct btrfs_caching_control *
216 get_caching_control(struct btrfs_block_group_cache *cache)
217 {
218         struct btrfs_caching_control *ctl;
219
220         spin_lock(&cache->lock);
221         if (cache->cached != BTRFS_CACHE_STARTED) {
222                 spin_unlock(&cache->lock);
223                 return NULL;
224         }
225
226         ctl = cache->caching_ctl;
227         atomic_inc(&ctl->count);
228         spin_unlock(&cache->lock);
229         return ctl;
230 }
231
232 static void put_caching_control(struct btrfs_caching_control *ctl)
233 {
234         if (atomic_dec_and_test(&ctl->count))
235                 kfree(ctl);
236 }
237
238 /*
239  * this is only called by cache_block_group, since we could have freed extents
240  * we need to check the pinned_extents for any extents that can't be used yet
241  * since their free space will be released as soon as the transaction commits.
242  */
243 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
244                               struct btrfs_fs_info *info, u64 start, u64 end)
245 {
246         u64 extent_start, extent_end, size, total_added = 0;
247         int ret;
248
249         while (start < end) {
250                 ret = find_first_extent_bit(info->pinned_extents, start,
251                                             &extent_start, &extent_end,
252                                             EXTENT_DIRTY | EXTENT_UPTODATE);
253                 if (ret)
254                         break;
255
256                 if (extent_start == start) {
257                         start = extent_end + 1;
258                 } else if (extent_start > start && extent_start < end) {
259                         size = extent_start - start;
260                         total_added += size;
261                         ret = btrfs_add_free_space(block_group, start,
262                                                    size);
263                         BUG_ON(ret);
264                         start = extent_end + 1;
265                 } else {
266                         break;
267                 }
268         }
269
270         if (start < end) {
271                 size = end - start;
272                 total_added += size;
273                 ret = btrfs_add_free_space(block_group, start, size);
274                 BUG_ON(ret);
275         }
276
277         return total_added;
278 }
279
280 static int caching_kthread(void *data)
281 {
282         struct btrfs_block_group_cache *block_group = data;
283         struct btrfs_fs_info *fs_info = block_group->fs_info;
284         struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
285         struct btrfs_root *extent_root = fs_info->extent_root;
286         struct btrfs_path *path;
287         struct extent_buffer *leaf;
288         struct btrfs_key key;
289         u64 total_found = 0;
290         u64 last = 0;
291         u32 nritems;
292         int ret = 0;
293
294         path = btrfs_alloc_path();
295         if (!path)
296                 return -ENOMEM;
297
298         exclude_super_stripes(extent_root, block_group);
299         spin_lock(&block_group->space_info->lock);
300         block_group->space_info->bytes_super += block_group->bytes_super;
301         spin_unlock(&block_group->space_info->lock);
302
303         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
304
305         /*
306          * We don't want to deadlock with somebody trying to allocate a new
307          * extent for the extent root while also trying to search the extent
308          * root to add free space.  So we skip locking and search the commit
309          * root, since its read-only
310          */
311         path->skip_locking = 1;
312         path->search_commit_root = 1;
313         path->reada = 2;
314
315         key.objectid = last;
316         key.offset = 0;
317         key.type = BTRFS_EXTENT_ITEM_KEY;
318 again:
319         mutex_lock(&caching_ctl->mutex);
320         /* need to make sure the commit_root doesn't disappear */
321         down_read(&fs_info->extent_commit_sem);
322
323         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
324         if (ret < 0)
325                 goto err;
326
327         leaf = path->nodes[0];
328         nritems = btrfs_header_nritems(leaf);
329
330         while (1) {
331                 smp_mb();
332                 if (fs_info->closing > 1) {
333                         last = (u64)-1;
334                         break;
335                 }
336
337                 if (path->slots[0] < nritems) {
338                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
339                 } else {
340                         ret = find_next_key(path, 0, &key);
341                         if (ret)
342                                 break;
343
344                         caching_ctl->progress = last;
345                         btrfs_release_path(extent_root, path);
346                         up_read(&fs_info->extent_commit_sem);
347                         mutex_unlock(&caching_ctl->mutex);
348                         if (btrfs_transaction_in_commit(fs_info))
349                                 schedule_timeout(1);
350                         else
351                                 cond_resched();
352                         goto again;
353                 }
354
355                 if (key.objectid < block_group->key.objectid) {
356                         path->slots[0]++;
357                         continue;
358                 }
359
360                 if (key.objectid >= block_group->key.objectid +
361                     block_group->key.offset)
362                         break;
363
364                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
365                         total_found += add_new_free_space(block_group,
366                                                           fs_info, last,
367                                                           key.objectid);
368                         last = key.objectid + key.offset;
369
370                         if (total_found > (1024 * 1024 * 2)) {
371                                 total_found = 0;
372                                 wake_up(&caching_ctl->wait);
373                         }
374                 }
375                 path->slots[0]++;
376         }
377         ret = 0;
378
379         total_found += add_new_free_space(block_group, fs_info, last,
380                                           block_group->key.objectid +
381                                           block_group->key.offset);
382         caching_ctl->progress = (u64)-1;
383
384         spin_lock(&block_group->lock);
385         block_group->caching_ctl = NULL;
386         block_group->cached = BTRFS_CACHE_FINISHED;
387         spin_unlock(&block_group->lock);
388
389 err:
390         btrfs_free_path(path);
391         up_read(&fs_info->extent_commit_sem);
392
393         free_excluded_extents(extent_root, block_group);
394
395         mutex_unlock(&caching_ctl->mutex);
396         wake_up(&caching_ctl->wait);
397
398         put_caching_control(caching_ctl);
399         atomic_dec(&block_group->space_info->caching_threads);
400         return 0;
401 }
402
403 static int cache_block_group(struct btrfs_block_group_cache *cache)
404 {
405         struct btrfs_fs_info *fs_info = cache->fs_info;
406         struct btrfs_caching_control *caching_ctl;
407         struct task_struct *tsk;
408         int ret = 0;
409
410         smp_mb();
411         if (cache->cached != BTRFS_CACHE_NO)
412                 return 0;
413
414         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
415         BUG_ON(!caching_ctl);
416
417         INIT_LIST_HEAD(&caching_ctl->list);
418         mutex_init(&caching_ctl->mutex);
419         init_waitqueue_head(&caching_ctl->wait);
420         caching_ctl->block_group = cache;
421         caching_ctl->progress = cache->key.objectid;
422         /* one for caching kthread, one for caching block group list */
423         atomic_set(&caching_ctl->count, 2);
424
425         spin_lock(&cache->lock);
426         if (cache->cached != BTRFS_CACHE_NO) {
427                 spin_unlock(&cache->lock);
428                 kfree(caching_ctl);
429                 return 0;
430         }
431         cache->caching_ctl = caching_ctl;
432         cache->cached = BTRFS_CACHE_STARTED;
433         spin_unlock(&cache->lock);
434
435         down_write(&fs_info->extent_commit_sem);
436         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
437         up_write(&fs_info->extent_commit_sem);
438
439         atomic_inc(&cache->space_info->caching_threads);
440
441         tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
442                           cache->key.objectid);
443         if (IS_ERR(tsk)) {
444                 ret = PTR_ERR(tsk);
445                 printk(KERN_ERR "error running thread %d\n", ret);
446                 BUG();
447         }
448
449         return ret;
450 }
451
452 /*
453  * return the block group that starts at or after bytenr
454  */
455 static struct btrfs_block_group_cache *
456 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
457 {
458         struct btrfs_block_group_cache *cache;
459
460         cache = block_group_cache_tree_search(info, bytenr, 0);
461
462         return cache;
463 }
464
465 /*
466  * return the block group that contains the given bytenr
467  */
468 struct btrfs_block_group_cache *btrfs_lookup_block_group(
469                                                  struct btrfs_fs_info *info,
470                                                  u64 bytenr)
471 {
472         struct btrfs_block_group_cache *cache;
473
474         cache = block_group_cache_tree_search(info, bytenr, 1);
475
476         return cache;
477 }
478
479 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
480 {
481         if (atomic_dec_and_test(&cache->count))
482                 kfree(cache);
483 }
484
485 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
486                                                   u64 flags)
487 {
488         struct list_head *head = &info->space_info;
489         struct btrfs_space_info *found;
490
491         rcu_read_lock();
492         list_for_each_entry_rcu(found, head, list) {
493                 if (found->flags == flags) {
494                         rcu_read_unlock();
495                         return found;
496                 }
497         }
498         rcu_read_unlock();
499         return NULL;
500 }
501
502 /*
503  * after adding space to the filesystem, we need to clear the full flags
504  * on all the space infos.
505  */
506 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
507 {
508         struct list_head *head = &info->space_info;
509         struct btrfs_space_info *found;
510
511         rcu_read_lock();
512         list_for_each_entry_rcu(found, head, list)
513                 found->full = 0;
514         rcu_read_unlock();
515 }
516
517 static u64 div_factor(u64 num, int factor)
518 {
519         if (factor == 10)
520                 return num;
521         num *= factor;
522         do_div(num, 10);
523         return num;
524 }
525
526 u64 btrfs_find_block_group(struct btrfs_root *root,
527                            u64 search_start, u64 search_hint, int owner)
528 {
529         struct btrfs_block_group_cache *cache;
530         u64 used;
531         u64 last = max(search_hint, search_start);
532         u64 group_start = 0;
533         int full_search = 0;
534         int factor = 9;
535         int wrapped = 0;
536 again:
537         while (1) {
538                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
539                 if (!cache)
540                         break;
541
542                 spin_lock(&cache->lock);
543                 last = cache->key.objectid + cache->key.offset;
544                 used = btrfs_block_group_used(&cache->item);
545
546                 if ((full_search || !cache->ro) &&
547                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
548                         if (used + cache->pinned + cache->reserved <
549                             div_factor(cache->key.offset, factor)) {
550                                 group_start = cache->key.objectid;
551                                 spin_unlock(&cache->lock);
552                                 btrfs_put_block_group(cache);
553                                 goto found;
554                         }
555                 }
556                 spin_unlock(&cache->lock);
557                 btrfs_put_block_group(cache);
558                 cond_resched();
559         }
560         if (!wrapped) {
561                 last = search_start;
562                 wrapped = 1;
563                 goto again;
564         }
565         if (!full_search && factor < 10) {
566                 last = search_start;
567                 full_search = 1;
568                 factor = 10;
569                 goto again;
570         }
571 found:
572         return group_start;
573 }
574
575 /* simple helper to search for an existing extent at a given offset */
576 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
577 {
578         int ret;
579         struct btrfs_key key;
580         struct btrfs_path *path;
581
582         path = btrfs_alloc_path();
583         BUG_ON(!path);
584         key.objectid = start;
585         key.offset = len;
586         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
587         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
588                                 0, 0);
589         btrfs_free_path(path);
590         return ret;
591 }
592
593 /*
594  * Back reference rules.  Back refs have three main goals:
595  *
596  * 1) differentiate between all holders of references to an extent so that
597  *    when a reference is dropped we can make sure it was a valid reference
598  *    before freeing the extent.
599  *
600  * 2) Provide enough information to quickly find the holders of an extent
601  *    if we notice a given block is corrupted or bad.
602  *
603  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
604  *    maintenance.  This is actually the same as #2, but with a slightly
605  *    different use case.
606  *
607  * There are two kinds of back refs. The implicit back refs is optimized
608  * for pointers in non-shared tree blocks. For a given pointer in a block,
609  * back refs of this kind provide information about the block's owner tree
610  * and the pointer's key. These information allow us to find the block by
611  * b-tree searching. The full back refs is for pointers in tree blocks not
612  * referenced by their owner trees. The location of tree block is recorded
613  * in the back refs. Actually the full back refs is generic, and can be
614  * used in all cases the implicit back refs is used. The major shortcoming
615  * of the full back refs is its overhead. Every time a tree block gets
616  * COWed, we have to update back refs entry for all pointers in it.
617  *
618  * For a newly allocated tree block, we use implicit back refs for
619  * pointers in it. This means most tree related operations only involve
620  * implicit back refs. For a tree block created in old transaction, the
621  * only way to drop a reference to it is COW it. So we can detect the
622  * event that tree block loses its owner tree's reference and do the
623  * back refs conversion.
624  *
625  * When a tree block is COW'd through a tree, there are four cases:
626  *
627  * The reference count of the block is one and the tree is the block's
628  * owner tree. Nothing to do in this case.
629  *
630  * The reference count of the block is one and the tree is not the
631  * block's owner tree. In this case, full back refs is used for pointers
632  * in the block. Remove these full back refs, add implicit back refs for
633  * every pointers in the new block.
634  *
635  * The reference count of the block is greater than one and the tree is
636  * the block's owner tree. In this case, implicit back refs is used for
637  * pointers in the block. Add full back refs for every pointers in the
638  * block, increase lower level extents' reference counts. The original
639  * implicit back refs are entailed to the new block.
640  *
641  * The reference count of the block is greater than one and the tree is
642  * not the block's owner tree. Add implicit back refs for every pointer in
643  * the new block, increase lower level extents' reference count.
644  *
645  * Back Reference Key composing:
646  *
647  * The key objectid corresponds to the first byte in the extent,
648  * The key type is used to differentiate between types of back refs.
649  * There are different meanings of the key offset for different types
650  * of back refs.
651  *
652  * File extents can be referenced by:
653  *
654  * - multiple snapshots, subvolumes, or different generations in one subvol
655  * - different files inside a single subvolume
656  * - different offsets inside a file (bookend extents in file.c)
657  *
658  * The extent ref structure for the implicit back refs has fields for:
659  *
660  * - Objectid of the subvolume root
661  * - objectid of the file holding the reference
662  * - original offset in the file
663  * - how many bookend extents
664  *
665  * The key offset for the implicit back refs is hash of the first
666  * three fields.
667  *
668  * The extent ref structure for the full back refs has field for:
669  *
670  * - number of pointers in the tree leaf
671  *
672  * The key offset for the implicit back refs is the first byte of
673  * the tree leaf
674  *
675  * When a file extent is allocated, The implicit back refs is used.
676  * the fields are filled in:
677  *
678  *     (root_key.objectid, inode objectid, offset in file, 1)
679  *
680  * When a file extent is removed file truncation, we find the
681  * corresponding implicit back refs and check the following fields:
682  *
683  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
684  *
685  * Btree extents can be referenced by:
686  *
687  * - Different subvolumes
688  *
689  * Both the implicit back refs and the full back refs for tree blocks
690  * only consist of key. The key offset for the implicit back refs is
691  * objectid of block's owner tree. The key offset for the full back refs
692  * is the first byte of parent block.
693  *
694  * When implicit back refs is used, information about the lowest key and
695  * level of the tree block are required. These information are stored in
696  * tree block info structure.
697  */
698
699 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
700 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
701                                   struct btrfs_root *root,
702                                   struct btrfs_path *path,
703                                   u64 owner, u32 extra_size)
704 {
705         struct btrfs_extent_item *item;
706         struct btrfs_extent_item_v0 *ei0;
707         struct btrfs_extent_ref_v0 *ref0;
708         struct btrfs_tree_block_info *bi;
709         struct extent_buffer *leaf;
710         struct btrfs_key key;
711         struct btrfs_key found_key;
712         u32 new_size = sizeof(*item);
713         u64 refs;
714         int ret;
715
716         leaf = path->nodes[0];
717         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
718
719         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
720         ei0 = btrfs_item_ptr(leaf, path->slots[0],
721                              struct btrfs_extent_item_v0);
722         refs = btrfs_extent_refs_v0(leaf, ei0);
723
724         if (owner == (u64)-1) {
725                 while (1) {
726                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
727                                 ret = btrfs_next_leaf(root, path);
728                                 if (ret < 0)
729                                         return ret;
730                                 BUG_ON(ret > 0);
731                                 leaf = path->nodes[0];
732                         }
733                         btrfs_item_key_to_cpu(leaf, &found_key,
734                                               path->slots[0]);
735                         BUG_ON(key.objectid != found_key.objectid);
736                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
737                                 path->slots[0]++;
738                                 continue;
739                         }
740                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
741                                               struct btrfs_extent_ref_v0);
742                         owner = btrfs_ref_objectid_v0(leaf, ref0);
743                         break;
744                 }
745         }
746         btrfs_release_path(root, path);
747
748         if (owner < BTRFS_FIRST_FREE_OBJECTID)
749                 new_size += sizeof(*bi);
750
751         new_size -= sizeof(*ei0);
752         ret = btrfs_search_slot(trans, root, &key, path,
753                                 new_size + extra_size, 1);
754         if (ret < 0)
755                 return ret;
756         BUG_ON(ret);
757
758         ret = btrfs_extend_item(trans, root, path, new_size);
759         BUG_ON(ret);
760
761         leaf = path->nodes[0];
762         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
763         btrfs_set_extent_refs(leaf, item, refs);
764         /* FIXME: get real generation */
765         btrfs_set_extent_generation(leaf, item, 0);
766         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
767                 btrfs_set_extent_flags(leaf, item,
768                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
769                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
770                 bi = (struct btrfs_tree_block_info *)(item + 1);
771                 /* FIXME: get first key of the block */
772                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
773                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
774         } else {
775                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
776         }
777         btrfs_mark_buffer_dirty(leaf);
778         return 0;
779 }
780 #endif
781
782 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
783 {
784         u32 high_crc = ~(u32)0;
785         u32 low_crc = ~(u32)0;
786         __le64 lenum;
787
788         lenum = cpu_to_le64(root_objectid);
789         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
790         lenum = cpu_to_le64(owner);
791         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
792         lenum = cpu_to_le64(offset);
793         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
794
795         return ((u64)high_crc << 31) ^ (u64)low_crc;
796 }
797
798 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
799                                      struct btrfs_extent_data_ref *ref)
800 {
801         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
802                                     btrfs_extent_data_ref_objectid(leaf, ref),
803                                     btrfs_extent_data_ref_offset(leaf, ref));
804 }
805
806 static int match_extent_data_ref(struct extent_buffer *leaf,
807                                  struct btrfs_extent_data_ref *ref,
808                                  u64 root_objectid, u64 owner, u64 offset)
809 {
810         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
811             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
812             btrfs_extent_data_ref_offset(leaf, ref) != offset)
813                 return 0;
814         return 1;
815 }
816
817 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
818                                            struct btrfs_root *root,
819                                            struct btrfs_path *path,
820                                            u64 bytenr, u64 parent,
821                                            u64 root_objectid,
822                                            u64 owner, u64 offset)
823 {
824         struct btrfs_key key;
825         struct btrfs_extent_data_ref *ref;
826         struct extent_buffer *leaf;
827         u32 nritems;
828         int ret;
829         int recow;
830         int err = -ENOENT;
831
832         key.objectid = bytenr;
833         if (parent) {
834                 key.type = BTRFS_SHARED_DATA_REF_KEY;
835                 key.offset = parent;
836         } else {
837                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
838                 key.offset = hash_extent_data_ref(root_objectid,
839                                                   owner, offset);
840         }
841 again:
842         recow = 0;
843         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
844         if (ret < 0) {
845                 err = ret;
846                 goto fail;
847         }
848
849         if (parent) {
850                 if (!ret)
851                         return 0;
852 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
853                 key.type = BTRFS_EXTENT_REF_V0_KEY;
854                 btrfs_release_path(root, path);
855                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
856                 if (ret < 0) {
857                         err = ret;
858                         goto fail;
859                 }
860                 if (!ret)
861                         return 0;
862 #endif
863                 goto fail;
864         }
865
866         leaf = path->nodes[0];
867         nritems = btrfs_header_nritems(leaf);
868         while (1) {
869                 if (path->slots[0] >= nritems) {
870                         ret = btrfs_next_leaf(root, path);
871                         if (ret < 0)
872                                 err = ret;
873                         if (ret)
874                                 goto fail;
875
876                         leaf = path->nodes[0];
877                         nritems = btrfs_header_nritems(leaf);
878                         recow = 1;
879                 }
880
881                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
882                 if (key.objectid != bytenr ||
883                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
884                         goto fail;
885
886                 ref = btrfs_item_ptr(leaf, path->slots[0],
887                                      struct btrfs_extent_data_ref);
888
889                 if (match_extent_data_ref(leaf, ref, root_objectid,
890                                           owner, offset)) {
891                         if (recow) {
892                                 btrfs_release_path(root, path);
893                                 goto again;
894                         }
895                         err = 0;
896                         break;
897                 }
898                 path->slots[0]++;
899         }
900 fail:
901         return err;
902 }
903
904 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
905                                            struct btrfs_root *root,
906                                            struct btrfs_path *path,
907                                            u64 bytenr, u64 parent,
908                                            u64 root_objectid, u64 owner,
909                                            u64 offset, int refs_to_add)
910 {
911         struct btrfs_key key;
912         struct extent_buffer *leaf;
913         u32 size;
914         u32 num_refs;
915         int ret;
916
917         key.objectid = bytenr;
918         if (parent) {
919                 key.type = BTRFS_SHARED_DATA_REF_KEY;
920                 key.offset = parent;
921                 size = sizeof(struct btrfs_shared_data_ref);
922         } else {
923                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
924                 key.offset = hash_extent_data_ref(root_objectid,
925                                                   owner, offset);
926                 size = sizeof(struct btrfs_extent_data_ref);
927         }
928
929         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
930         if (ret && ret != -EEXIST)
931                 goto fail;
932
933         leaf = path->nodes[0];
934         if (parent) {
935                 struct btrfs_shared_data_ref *ref;
936                 ref = btrfs_item_ptr(leaf, path->slots[0],
937                                      struct btrfs_shared_data_ref);
938                 if (ret == 0) {
939                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
940                 } else {
941                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
942                         num_refs += refs_to_add;
943                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
944                 }
945         } else {
946                 struct btrfs_extent_data_ref *ref;
947                 while (ret == -EEXIST) {
948                         ref = btrfs_item_ptr(leaf, path->slots[0],
949                                              struct btrfs_extent_data_ref);
950                         if (match_extent_data_ref(leaf, ref, root_objectid,
951                                                   owner, offset))
952                                 break;
953                         btrfs_release_path(root, path);
954                         key.offset++;
955                         ret = btrfs_insert_empty_item(trans, root, path, &key,
956                                                       size);
957                         if (ret && ret != -EEXIST)
958                                 goto fail;
959
960                         leaf = path->nodes[0];
961                 }
962                 ref = btrfs_item_ptr(leaf, path->slots[0],
963                                      struct btrfs_extent_data_ref);
964                 if (ret == 0) {
965                         btrfs_set_extent_data_ref_root(leaf, ref,
966                                                        root_objectid);
967                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
968                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
969                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
970                 } else {
971                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
972                         num_refs += refs_to_add;
973                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
974                 }
975         }
976         btrfs_mark_buffer_dirty(leaf);
977         ret = 0;
978 fail:
979         btrfs_release_path(root, path);
980         return ret;
981 }
982
983 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
984                                            struct btrfs_root *root,
985                                            struct btrfs_path *path,
986                                            int refs_to_drop)
987 {
988         struct btrfs_key key;
989         struct btrfs_extent_data_ref *ref1 = NULL;
990         struct btrfs_shared_data_ref *ref2 = NULL;
991         struct extent_buffer *leaf;
992         u32 num_refs = 0;
993         int ret = 0;
994
995         leaf = path->nodes[0];
996         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
997
998         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
999                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1000                                       struct btrfs_extent_data_ref);
1001                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1002         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1003                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1004                                       struct btrfs_shared_data_ref);
1005                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1006 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1007         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1008                 struct btrfs_extent_ref_v0 *ref0;
1009                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1010                                       struct btrfs_extent_ref_v0);
1011                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1012 #endif
1013         } else {
1014                 BUG();
1015         }
1016
1017         BUG_ON(num_refs < refs_to_drop);
1018         num_refs -= refs_to_drop;
1019
1020         if (num_refs == 0) {
1021                 ret = btrfs_del_item(trans, root, path);
1022         } else {
1023                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1024                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1025                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1026                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1027 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1028                 else {
1029                         struct btrfs_extent_ref_v0 *ref0;
1030                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1031                                         struct btrfs_extent_ref_v0);
1032                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1033                 }
1034 #endif
1035                 btrfs_mark_buffer_dirty(leaf);
1036         }
1037         return ret;
1038 }
1039
1040 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1041                                           struct btrfs_path *path,
1042                                           struct btrfs_extent_inline_ref *iref)
1043 {
1044         struct btrfs_key key;
1045         struct extent_buffer *leaf;
1046         struct btrfs_extent_data_ref *ref1;
1047         struct btrfs_shared_data_ref *ref2;
1048         u32 num_refs = 0;
1049
1050         leaf = path->nodes[0];
1051         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1052         if (iref) {
1053                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1054                     BTRFS_EXTENT_DATA_REF_KEY) {
1055                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1056                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1057                 } else {
1058                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1059                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1060                 }
1061         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1062                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1063                                       struct btrfs_extent_data_ref);
1064                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1065         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1066                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1067                                       struct btrfs_shared_data_ref);
1068                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1069 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1070         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1071                 struct btrfs_extent_ref_v0 *ref0;
1072                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1073                                       struct btrfs_extent_ref_v0);
1074                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1075 #endif
1076         } else {
1077                 WARN_ON(1);
1078         }
1079         return num_refs;
1080 }
1081
1082 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1083                                           struct btrfs_root *root,
1084                                           struct btrfs_path *path,
1085                                           u64 bytenr, u64 parent,
1086                                           u64 root_objectid)
1087 {
1088         struct btrfs_key key;
1089         int ret;
1090
1091         key.objectid = bytenr;
1092         if (parent) {
1093                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1094                 key.offset = parent;
1095         } else {
1096                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1097                 key.offset = root_objectid;
1098         }
1099
1100         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1101         if (ret > 0)
1102                 ret = -ENOENT;
1103 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1104         if (ret == -ENOENT && parent) {
1105                 btrfs_release_path(root, path);
1106                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1107                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1108                 if (ret > 0)
1109                         ret = -ENOENT;
1110         }
1111 #endif
1112         return ret;
1113 }
1114
1115 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1116                                           struct btrfs_root *root,
1117                                           struct btrfs_path *path,
1118                                           u64 bytenr, u64 parent,
1119                                           u64 root_objectid)
1120 {
1121         struct btrfs_key key;
1122         int ret;
1123
1124         key.objectid = bytenr;
1125         if (parent) {
1126                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1127                 key.offset = parent;
1128         } else {
1129                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1130                 key.offset = root_objectid;
1131         }
1132
1133         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1134         btrfs_release_path(root, path);
1135         return ret;
1136 }
1137
1138 static inline int extent_ref_type(u64 parent, u64 owner)
1139 {
1140         int type;
1141         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1142                 if (parent > 0)
1143                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1144                 else
1145                         type = BTRFS_TREE_BLOCK_REF_KEY;
1146         } else {
1147                 if (parent > 0)
1148                         type = BTRFS_SHARED_DATA_REF_KEY;
1149                 else
1150                         type = BTRFS_EXTENT_DATA_REF_KEY;
1151         }
1152         return type;
1153 }
1154
1155 static int find_next_key(struct btrfs_path *path, int level,
1156                          struct btrfs_key *key)
1157
1158 {
1159         for (; level < BTRFS_MAX_LEVEL; level++) {
1160                 if (!path->nodes[level])
1161                         break;
1162                 if (path->slots[level] + 1 >=
1163                     btrfs_header_nritems(path->nodes[level]))
1164                         continue;
1165                 if (level == 0)
1166                         btrfs_item_key_to_cpu(path->nodes[level], key,
1167                                               path->slots[level] + 1);
1168                 else
1169                         btrfs_node_key_to_cpu(path->nodes[level], key,
1170                                               path->slots[level] + 1);
1171                 return 0;
1172         }
1173         return 1;
1174 }
1175
1176 /*
1177  * look for inline back ref. if back ref is found, *ref_ret is set
1178  * to the address of inline back ref, and 0 is returned.
1179  *
1180  * if back ref isn't found, *ref_ret is set to the address where it
1181  * should be inserted, and -ENOENT is returned.
1182  *
1183  * if insert is true and there are too many inline back refs, the path
1184  * points to the extent item, and -EAGAIN is returned.
1185  *
1186  * NOTE: inline back refs are ordered in the same way that back ref
1187  *       items in the tree are ordered.
1188  */
1189 static noinline_for_stack
1190 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1191                                  struct btrfs_root *root,
1192                                  struct btrfs_path *path,
1193                                  struct btrfs_extent_inline_ref **ref_ret,
1194                                  u64 bytenr, u64 num_bytes,
1195                                  u64 parent, u64 root_objectid,
1196                                  u64 owner, u64 offset, int insert)
1197 {
1198         struct btrfs_key key;
1199         struct extent_buffer *leaf;
1200         struct btrfs_extent_item *ei;
1201         struct btrfs_extent_inline_ref *iref;
1202         u64 flags;
1203         u64 item_size;
1204         unsigned long ptr;
1205         unsigned long end;
1206         int extra_size;
1207         int type;
1208         int want;
1209         int ret;
1210         int err = 0;
1211
1212         key.objectid = bytenr;
1213         key.type = BTRFS_EXTENT_ITEM_KEY;
1214         key.offset = num_bytes;
1215
1216         want = extent_ref_type(parent, owner);
1217         if (insert) {
1218                 extra_size = btrfs_extent_inline_ref_size(want);
1219                 path->keep_locks = 1;
1220         } else
1221                 extra_size = -1;
1222         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1223         if (ret < 0) {
1224                 err = ret;
1225                 goto out;
1226         }
1227         BUG_ON(ret);
1228
1229         leaf = path->nodes[0];
1230         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1231 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1232         if (item_size < sizeof(*ei)) {
1233                 if (!insert) {
1234                         err = -ENOENT;
1235                         goto out;
1236                 }
1237                 ret = convert_extent_item_v0(trans, root, path, owner,
1238                                              extra_size);
1239                 if (ret < 0) {
1240                         err = ret;
1241                         goto out;
1242                 }
1243                 leaf = path->nodes[0];
1244                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1245         }
1246 #endif
1247         BUG_ON(item_size < sizeof(*ei));
1248
1249         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1250         flags = btrfs_extent_flags(leaf, ei);
1251
1252         ptr = (unsigned long)(ei + 1);
1253         end = (unsigned long)ei + item_size;
1254
1255         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1256                 ptr += sizeof(struct btrfs_tree_block_info);
1257                 BUG_ON(ptr > end);
1258         } else {
1259                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1260         }
1261
1262         err = -ENOENT;
1263         while (1) {
1264                 if (ptr >= end) {
1265                         WARN_ON(ptr > end);
1266                         break;
1267                 }
1268                 iref = (struct btrfs_extent_inline_ref *)ptr;
1269                 type = btrfs_extent_inline_ref_type(leaf, iref);
1270                 if (want < type)
1271                         break;
1272                 if (want > type) {
1273                         ptr += btrfs_extent_inline_ref_size(type);
1274                         continue;
1275                 }
1276
1277                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1278                         struct btrfs_extent_data_ref *dref;
1279                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1280                         if (match_extent_data_ref(leaf, dref, root_objectid,
1281                                                   owner, offset)) {
1282                                 err = 0;
1283                                 break;
1284                         }
1285                         if (hash_extent_data_ref_item(leaf, dref) <
1286                             hash_extent_data_ref(root_objectid, owner, offset))
1287                                 break;
1288                 } else {
1289                         u64 ref_offset;
1290                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1291                         if (parent > 0) {
1292                                 if (parent == ref_offset) {
1293                                         err = 0;
1294                                         break;
1295                                 }
1296                                 if (ref_offset < parent)
1297                                         break;
1298                         } else {
1299                                 if (root_objectid == ref_offset) {
1300                                         err = 0;
1301                                         break;
1302                                 }
1303                                 if (ref_offset < root_objectid)
1304                                         break;
1305                         }
1306                 }
1307                 ptr += btrfs_extent_inline_ref_size(type);
1308         }
1309         if (err == -ENOENT && insert) {
1310                 if (item_size + extra_size >=
1311                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1312                         err = -EAGAIN;
1313                         goto out;
1314                 }
1315                 /*
1316                  * To add new inline back ref, we have to make sure
1317                  * there is no corresponding back ref item.
1318                  * For simplicity, we just do not add new inline back
1319                  * ref if there is any kind of item for this block
1320                  */
1321                 if (find_next_key(path, 0, &key) == 0 &&
1322                     key.objectid == bytenr &&
1323                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1324                         err = -EAGAIN;
1325                         goto out;
1326                 }
1327         }
1328         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1329 out:
1330         if (insert) {
1331                 path->keep_locks = 0;
1332                 btrfs_unlock_up_safe(path, 1);
1333         }
1334         return err;
1335 }
1336
1337 /*
1338  * helper to add new inline back ref
1339  */
1340 static noinline_for_stack
1341 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1342                                 struct btrfs_root *root,
1343                                 struct btrfs_path *path,
1344                                 struct btrfs_extent_inline_ref *iref,
1345                                 u64 parent, u64 root_objectid,
1346                                 u64 owner, u64 offset, int refs_to_add,
1347                                 struct btrfs_delayed_extent_op *extent_op)
1348 {
1349         struct extent_buffer *leaf;
1350         struct btrfs_extent_item *ei;
1351         unsigned long ptr;
1352         unsigned long end;
1353         unsigned long item_offset;
1354         u64 refs;
1355         int size;
1356         int type;
1357         int ret;
1358
1359         leaf = path->nodes[0];
1360         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1361         item_offset = (unsigned long)iref - (unsigned long)ei;
1362
1363         type = extent_ref_type(parent, owner);
1364         size = btrfs_extent_inline_ref_size(type);
1365
1366         ret = btrfs_extend_item(trans, root, path, size);
1367         BUG_ON(ret);
1368
1369         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1370         refs = btrfs_extent_refs(leaf, ei);
1371         refs += refs_to_add;
1372         btrfs_set_extent_refs(leaf, ei, refs);
1373         if (extent_op)
1374                 __run_delayed_extent_op(extent_op, leaf, ei);
1375
1376         ptr = (unsigned long)ei + item_offset;
1377         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1378         if (ptr < end - size)
1379                 memmove_extent_buffer(leaf, ptr + size, ptr,
1380                                       end - size - ptr);
1381
1382         iref = (struct btrfs_extent_inline_ref *)ptr;
1383         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1384         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1385                 struct btrfs_extent_data_ref *dref;
1386                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1387                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1388                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1389                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1390                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1391         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1392                 struct btrfs_shared_data_ref *sref;
1393                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1394                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1395                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1396         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1397                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1398         } else {
1399                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1400         }
1401         btrfs_mark_buffer_dirty(leaf);
1402         return 0;
1403 }
1404
1405 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1406                                  struct btrfs_root *root,
1407                                  struct btrfs_path *path,
1408                                  struct btrfs_extent_inline_ref **ref_ret,
1409                                  u64 bytenr, u64 num_bytes, u64 parent,
1410                                  u64 root_objectid, u64 owner, u64 offset)
1411 {
1412         int ret;
1413
1414         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1415                                            bytenr, num_bytes, parent,
1416                                            root_objectid, owner, offset, 0);
1417         if (ret != -ENOENT)
1418                 return ret;
1419
1420         btrfs_release_path(root, path);
1421         *ref_ret = NULL;
1422
1423         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1424                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1425                                             root_objectid);
1426         } else {
1427                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1428                                              root_objectid, owner, offset);
1429         }
1430         return ret;
1431 }
1432
1433 /*
1434  * helper to update/remove inline back ref
1435  */
1436 static noinline_for_stack
1437 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1438                                  struct btrfs_root *root,
1439                                  struct btrfs_path *path,
1440                                  struct btrfs_extent_inline_ref *iref,
1441                                  int refs_to_mod,
1442                                  struct btrfs_delayed_extent_op *extent_op)
1443 {
1444         struct extent_buffer *leaf;
1445         struct btrfs_extent_item *ei;
1446         struct btrfs_extent_data_ref *dref = NULL;
1447         struct btrfs_shared_data_ref *sref = NULL;
1448         unsigned long ptr;
1449         unsigned long end;
1450         u32 item_size;
1451         int size;
1452         int type;
1453         int ret;
1454         u64 refs;
1455
1456         leaf = path->nodes[0];
1457         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1458         refs = btrfs_extent_refs(leaf, ei);
1459         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1460         refs += refs_to_mod;
1461         btrfs_set_extent_refs(leaf, ei, refs);
1462         if (extent_op)
1463                 __run_delayed_extent_op(extent_op, leaf, ei);
1464
1465         type = btrfs_extent_inline_ref_type(leaf, iref);
1466
1467         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1468                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1469                 refs = btrfs_extent_data_ref_count(leaf, dref);
1470         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1471                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1472                 refs = btrfs_shared_data_ref_count(leaf, sref);
1473         } else {
1474                 refs = 1;
1475                 BUG_ON(refs_to_mod != -1);
1476         }
1477
1478         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1479         refs += refs_to_mod;
1480
1481         if (refs > 0) {
1482                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1483                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1484                 else
1485                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1486         } else {
1487                 size =  btrfs_extent_inline_ref_size(type);
1488                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1489                 ptr = (unsigned long)iref;
1490                 end = (unsigned long)ei + item_size;
1491                 if (ptr + size < end)
1492                         memmove_extent_buffer(leaf, ptr, ptr + size,
1493                                               end - ptr - size);
1494                 item_size -= size;
1495                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1496                 BUG_ON(ret);
1497         }
1498         btrfs_mark_buffer_dirty(leaf);
1499         return 0;
1500 }
1501
1502 static noinline_for_stack
1503 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1504                                  struct btrfs_root *root,
1505                                  struct btrfs_path *path,
1506                                  u64 bytenr, u64 num_bytes, u64 parent,
1507                                  u64 root_objectid, u64 owner,
1508                                  u64 offset, int refs_to_add,
1509                                  struct btrfs_delayed_extent_op *extent_op)
1510 {
1511         struct btrfs_extent_inline_ref *iref;
1512         int ret;
1513
1514         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1515                                            bytenr, num_bytes, parent,
1516                                            root_objectid, owner, offset, 1);
1517         if (ret == 0) {
1518                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1519                 ret = update_inline_extent_backref(trans, root, path, iref,
1520                                                    refs_to_add, extent_op);
1521         } else if (ret == -ENOENT) {
1522                 ret = setup_inline_extent_backref(trans, root, path, iref,
1523                                                   parent, root_objectid,
1524                                                   owner, offset, refs_to_add,
1525                                                   extent_op);
1526         }
1527         return ret;
1528 }
1529
1530 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1531                                  struct btrfs_root *root,
1532                                  struct btrfs_path *path,
1533                                  u64 bytenr, u64 parent, u64 root_objectid,
1534                                  u64 owner, u64 offset, int refs_to_add)
1535 {
1536         int ret;
1537         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1538                 BUG_ON(refs_to_add != 1);
1539                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1540                                             parent, root_objectid);
1541         } else {
1542                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1543                                              parent, root_objectid,
1544                                              owner, offset, refs_to_add);
1545         }
1546         return ret;
1547 }
1548
1549 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1550                                  struct btrfs_root *root,
1551                                  struct btrfs_path *path,
1552                                  struct btrfs_extent_inline_ref *iref,
1553                                  int refs_to_drop, int is_data)
1554 {
1555         int ret;
1556
1557         BUG_ON(!is_data && refs_to_drop != 1);
1558         if (iref) {
1559                 ret = update_inline_extent_backref(trans, root, path, iref,
1560                                                    -refs_to_drop, NULL);
1561         } else if (is_data) {
1562                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1563         } else {
1564                 ret = btrfs_del_item(trans, root, path);
1565         }
1566         return ret;
1567 }
1568
1569 #ifdef BIO_RW_DISCARD
1570 static void btrfs_issue_discard(struct block_device *bdev,
1571                                 u64 start, u64 len)
1572 {
1573         blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1574                              DISCARD_FL_BARRIER);
1575 }
1576 #endif
1577
1578 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1579                                 u64 num_bytes)
1580 {
1581 #ifdef BIO_RW_DISCARD
1582         int ret;
1583         u64 map_length = num_bytes;
1584         struct btrfs_multi_bio *multi = NULL;
1585
1586         /* Tell the block device(s) that the sectors can be discarded */
1587         ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1588                               bytenr, &map_length, &multi, 0);
1589         if (!ret) {
1590                 struct btrfs_bio_stripe *stripe = multi->stripes;
1591                 int i;
1592
1593                 if (map_length > num_bytes)
1594                         map_length = num_bytes;
1595
1596                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1597                         btrfs_issue_discard(stripe->dev->bdev,
1598                                             stripe->physical,
1599                                             map_length);
1600                 }
1601                 kfree(multi);
1602         }
1603
1604         return ret;
1605 #else
1606         return 0;
1607 #endif
1608 }
1609
1610 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1611                          struct btrfs_root *root,
1612                          u64 bytenr, u64 num_bytes, u64 parent,
1613                          u64 root_objectid, u64 owner, u64 offset)
1614 {
1615         int ret;
1616         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1617                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1618
1619         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1620                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1621                                         parent, root_objectid, (int)owner,
1622                                         BTRFS_ADD_DELAYED_REF, NULL);
1623         } else {
1624                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1625                                         parent, root_objectid, owner, offset,
1626                                         BTRFS_ADD_DELAYED_REF, NULL);
1627         }
1628         return ret;
1629 }
1630
1631 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1632                                   struct btrfs_root *root,
1633                                   u64 bytenr, u64 num_bytes,
1634                                   u64 parent, u64 root_objectid,
1635                                   u64 owner, u64 offset, int refs_to_add,
1636                                   struct btrfs_delayed_extent_op *extent_op)
1637 {
1638         struct btrfs_path *path;
1639         struct extent_buffer *leaf;
1640         struct btrfs_extent_item *item;
1641         u64 refs;
1642         int ret;
1643         int err = 0;
1644
1645         path = btrfs_alloc_path();
1646         if (!path)
1647                 return -ENOMEM;
1648
1649         path->reada = 1;
1650         path->leave_spinning = 1;
1651         /* this will setup the path even if it fails to insert the back ref */
1652         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1653                                            path, bytenr, num_bytes, parent,
1654                                            root_objectid, owner, offset,
1655                                            refs_to_add, extent_op);
1656         if (ret == 0)
1657                 goto out;
1658
1659         if (ret != -EAGAIN) {
1660                 err = ret;
1661                 goto out;
1662         }
1663
1664         leaf = path->nodes[0];
1665         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1666         refs = btrfs_extent_refs(leaf, item);
1667         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1668         if (extent_op)
1669                 __run_delayed_extent_op(extent_op, leaf, item);
1670
1671         btrfs_mark_buffer_dirty(leaf);
1672         btrfs_release_path(root->fs_info->extent_root, path);
1673
1674         path->reada = 1;
1675         path->leave_spinning = 1;
1676
1677         /* now insert the actual backref */
1678         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1679                                     path, bytenr, parent, root_objectid,
1680                                     owner, offset, refs_to_add);
1681         BUG_ON(ret);
1682 out:
1683         btrfs_free_path(path);
1684         return err;
1685 }
1686
1687 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1688                                 struct btrfs_root *root,
1689                                 struct btrfs_delayed_ref_node *node,
1690                                 struct btrfs_delayed_extent_op *extent_op,
1691                                 int insert_reserved)
1692 {
1693         int ret = 0;
1694         struct btrfs_delayed_data_ref *ref;
1695         struct btrfs_key ins;
1696         u64 parent = 0;
1697         u64 ref_root = 0;
1698         u64 flags = 0;
1699
1700         ins.objectid = node->bytenr;
1701         ins.offset = node->num_bytes;
1702         ins.type = BTRFS_EXTENT_ITEM_KEY;
1703
1704         ref = btrfs_delayed_node_to_data_ref(node);
1705         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1706                 parent = ref->parent;
1707         else
1708                 ref_root = ref->root;
1709
1710         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1711                 if (extent_op) {
1712                         BUG_ON(extent_op->update_key);
1713                         flags |= extent_op->flags_to_set;
1714                 }
1715                 ret = alloc_reserved_file_extent(trans, root,
1716                                                  parent, ref_root, flags,
1717                                                  ref->objectid, ref->offset,
1718                                                  &ins, node->ref_mod);
1719         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1720                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1721                                              node->num_bytes, parent,
1722                                              ref_root, ref->objectid,
1723                                              ref->offset, node->ref_mod,
1724                                              extent_op);
1725         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1726                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1727                                           node->num_bytes, parent,
1728                                           ref_root, ref->objectid,
1729                                           ref->offset, node->ref_mod,
1730                                           extent_op);
1731         } else {
1732                 BUG();
1733         }
1734         return ret;
1735 }
1736
1737 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1738                                     struct extent_buffer *leaf,
1739                                     struct btrfs_extent_item *ei)
1740 {
1741         u64 flags = btrfs_extent_flags(leaf, ei);
1742         if (extent_op->update_flags) {
1743                 flags |= extent_op->flags_to_set;
1744                 btrfs_set_extent_flags(leaf, ei, flags);
1745         }
1746
1747         if (extent_op->update_key) {
1748                 struct btrfs_tree_block_info *bi;
1749                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1750                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1751                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1752         }
1753 }
1754
1755 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1756                                  struct btrfs_root *root,
1757                                  struct btrfs_delayed_ref_node *node,
1758                                  struct btrfs_delayed_extent_op *extent_op)
1759 {
1760         struct btrfs_key key;
1761         struct btrfs_path *path;
1762         struct btrfs_extent_item *ei;
1763         struct extent_buffer *leaf;
1764         u32 item_size;
1765         int ret;
1766         int err = 0;
1767
1768         path = btrfs_alloc_path();
1769         if (!path)
1770                 return -ENOMEM;
1771
1772         key.objectid = node->bytenr;
1773         key.type = BTRFS_EXTENT_ITEM_KEY;
1774         key.offset = node->num_bytes;
1775
1776         path->reada = 1;
1777         path->leave_spinning = 1;
1778         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1779                                 path, 0, 1);
1780         if (ret < 0) {
1781                 err = ret;
1782                 goto out;
1783         }
1784         if (ret > 0) {
1785                 err = -EIO;
1786                 goto out;
1787         }
1788
1789         leaf = path->nodes[0];
1790         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1791 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1792         if (item_size < sizeof(*ei)) {
1793                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1794                                              path, (u64)-1, 0);
1795                 if (ret < 0) {
1796                         err = ret;
1797                         goto out;
1798                 }
1799                 leaf = path->nodes[0];
1800                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1801         }
1802 #endif
1803         BUG_ON(item_size < sizeof(*ei));
1804         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1805         __run_delayed_extent_op(extent_op, leaf, ei);
1806
1807         btrfs_mark_buffer_dirty(leaf);
1808 out:
1809         btrfs_free_path(path);
1810         return err;
1811 }
1812
1813 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1814                                 struct btrfs_root *root,
1815                                 struct btrfs_delayed_ref_node *node,
1816                                 struct btrfs_delayed_extent_op *extent_op,
1817                                 int insert_reserved)
1818 {
1819         int ret = 0;
1820         struct btrfs_delayed_tree_ref *ref;
1821         struct btrfs_key ins;
1822         u64 parent = 0;
1823         u64 ref_root = 0;
1824
1825         ins.objectid = node->bytenr;
1826         ins.offset = node->num_bytes;
1827         ins.type = BTRFS_EXTENT_ITEM_KEY;
1828
1829         ref = btrfs_delayed_node_to_tree_ref(node);
1830         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1831                 parent = ref->parent;
1832         else
1833                 ref_root = ref->root;
1834
1835         BUG_ON(node->ref_mod != 1);
1836         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1837                 BUG_ON(!extent_op || !extent_op->update_flags ||
1838                        !extent_op->update_key);
1839                 ret = alloc_reserved_tree_block(trans, root,
1840                                                 parent, ref_root,
1841                                                 extent_op->flags_to_set,
1842                                                 &extent_op->key,
1843                                                 ref->level, &ins);
1844         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1845                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1846                                              node->num_bytes, parent, ref_root,
1847                                              ref->level, 0, 1, extent_op);
1848         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1849                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1850                                           node->num_bytes, parent, ref_root,
1851                                           ref->level, 0, 1, extent_op);
1852         } else {
1853                 BUG();
1854         }
1855         return ret;
1856 }
1857
1858
1859 /* helper function to actually process a single delayed ref entry */
1860 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1861                                struct btrfs_root *root,
1862                                struct btrfs_delayed_ref_node *node,
1863                                struct btrfs_delayed_extent_op *extent_op,
1864                                int insert_reserved)
1865 {
1866         int ret;
1867         if (btrfs_delayed_ref_is_head(node)) {
1868                 struct btrfs_delayed_ref_head *head;
1869                 /*
1870                  * we've hit the end of the chain and we were supposed
1871                  * to insert this extent into the tree.  But, it got
1872                  * deleted before we ever needed to insert it, so all
1873                  * we have to do is clean up the accounting
1874                  */
1875                 BUG_ON(extent_op);
1876                 head = btrfs_delayed_node_to_head(node);
1877                 if (insert_reserved) {
1878                         int mark_free = 0;
1879                         struct extent_buffer *must_clean = NULL;
1880
1881                         ret = pin_down_bytes(trans, root, NULL,
1882                                              node->bytenr, node->num_bytes,
1883                                              head->is_data, 1, &must_clean);
1884                         if (ret > 0)
1885                                 mark_free = 1;
1886
1887                         if (must_clean) {
1888                                 clean_tree_block(NULL, root, must_clean);
1889                                 btrfs_tree_unlock(must_clean);
1890                                 free_extent_buffer(must_clean);
1891                         }
1892                         if (head->is_data) {
1893                                 ret = btrfs_del_csums(trans, root,
1894                                                       node->bytenr,
1895                                                       node->num_bytes);
1896                                 BUG_ON(ret);
1897                         }
1898                         if (mark_free) {
1899                                 ret = btrfs_free_reserved_extent(root,
1900                                                         node->bytenr,
1901                                                         node->num_bytes);
1902                                 BUG_ON(ret);
1903                         }
1904                 }
1905                 mutex_unlock(&head->mutex);
1906                 return 0;
1907         }
1908
1909         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1910             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1911                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1912                                            insert_reserved);
1913         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1914                  node->type == BTRFS_SHARED_DATA_REF_KEY)
1915                 ret = run_delayed_data_ref(trans, root, node, extent_op,
1916                                            insert_reserved);
1917         else
1918                 BUG();
1919         return ret;
1920 }
1921
1922 static noinline struct btrfs_delayed_ref_node *
1923 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1924 {
1925         struct rb_node *node;
1926         struct btrfs_delayed_ref_node *ref;
1927         int action = BTRFS_ADD_DELAYED_REF;
1928 again:
1929         /*
1930          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1931          * this prevents ref count from going down to zero when
1932          * there still are pending delayed ref.
1933          */
1934         node = rb_prev(&head->node.rb_node);
1935         while (1) {
1936                 if (!node)
1937                         break;
1938                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1939                                 rb_node);
1940                 if (ref->bytenr != head->node.bytenr)
1941                         break;
1942                 if (ref->action == action)
1943                         return ref;
1944                 node = rb_prev(node);
1945         }
1946         if (action == BTRFS_ADD_DELAYED_REF) {
1947                 action = BTRFS_DROP_DELAYED_REF;
1948                 goto again;
1949         }
1950         return NULL;
1951 }
1952
1953 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1954                                        struct btrfs_root *root,
1955                                        struct list_head *cluster)
1956 {
1957         struct btrfs_delayed_ref_root *delayed_refs;
1958         struct btrfs_delayed_ref_node *ref;
1959         struct btrfs_delayed_ref_head *locked_ref = NULL;
1960         struct btrfs_delayed_extent_op *extent_op;
1961         int ret;
1962         int count = 0;
1963         int must_insert_reserved = 0;
1964
1965         delayed_refs = &trans->transaction->delayed_refs;
1966         while (1) {
1967                 if (!locked_ref) {
1968                         /* pick a new head ref from the cluster list */
1969                         if (list_empty(cluster))
1970                                 break;
1971
1972                         locked_ref = list_entry(cluster->next,
1973                                      struct btrfs_delayed_ref_head, cluster);
1974
1975                         /* grab the lock that says we are going to process
1976                          * all the refs for this head */
1977                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
1978
1979                         /*
1980                          * we may have dropped the spin lock to get the head
1981                          * mutex lock, and that might have given someone else
1982                          * time to free the head.  If that's true, it has been
1983                          * removed from our list and we can move on.
1984                          */
1985                         if (ret == -EAGAIN) {
1986                                 locked_ref = NULL;
1987                                 count++;
1988                                 continue;
1989                         }
1990                 }
1991
1992                 /*
1993                  * record the must insert reserved flag before we
1994                  * drop the spin lock.
1995                  */
1996                 must_insert_reserved = locked_ref->must_insert_reserved;
1997                 locked_ref->must_insert_reserved = 0;
1998
1999                 extent_op = locked_ref->extent_op;
2000                 locked_ref->extent_op = NULL;
2001
2002                 /*
2003                  * locked_ref is the head node, so we have to go one
2004                  * node back for any delayed ref updates
2005                  */
2006                 ref = select_delayed_ref(locked_ref);
2007                 if (!ref) {
2008                         /* All delayed refs have been processed, Go ahead
2009                          * and send the head node to run_one_delayed_ref,
2010                          * so that any accounting fixes can happen
2011                          */
2012                         ref = &locked_ref->node;
2013
2014                         if (extent_op && must_insert_reserved) {
2015                                 kfree(extent_op);
2016                                 extent_op = NULL;
2017                         }
2018
2019                         if (extent_op) {
2020                                 spin_unlock(&delayed_refs->lock);
2021
2022                                 ret = run_delayed_extent_op(trans, root,
2023                                                             ref, extent_op);
2024                                 BUG_ON(ret);
2025                                 kfree(extent_op);
2026
2027                                 cond_resched();
2028                                 spin_lock(&delayed_refs->lock);
2029                                 continue;
2030                         }
2031
2032                         list_del_init(&locked_ref->cluster);
2033                         locked_ref = NULL;
2034                 }
2035
2036                 ref->in_tree = 0;
2037                 rb_erase(&ref->rb_node, &delayed_refs->root);
2038                 delayed_refs->num_entries--;
2039
2040                 spin_unlock(&delayed_refs->lock);
2041
2042                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2043                                           must_insert_reserved);
2044                 BUG_ON(ret);
2045
2046                 btrfs_put_delayed_ref(ref);
2047                 kfree(extent_op);
2048                 count++;
2049
2050                 cond_resched();
2051                 spin_lock(&delayed_refs->lock);
2052         }
2053         return count;
2054 }
2055
2056 /*
2057  * this starts processing the delayed reference count updates and
2058  * extent insertions we have queued up so far.  count can be
2059  * 0, which means to process everything in the tree at the start
2060  * of the run (but not newly added entries), or it can be some target
2061  * number you'd like to process.
2062  */
2063 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2064                            struct btrfs_root *root, unsigned long count)
2065 {
2066         struct rb_node *node;
2067         struct btrfs_delayed_ref_root *delayed_refs;
2068         struct btrfs_delayed_ref_node *ref;
2069         struct list_head cluster;
2070         int ret;
2071         int run_all = count == (unsigned long)-1;
2072         int run_most = 0;
2073
2074         if (root == root->fs_info->extent_root)
2075                 root = root->fs_info->tree_root;
2076
2077         delayed_refs = &trans->transaction->delayed_refs;
2078         INIT_LIST_HEAD(&cluster);
2079 again:
2080         spin_lock(&delayed_refs->lock);
2081         if (count == 0) {
2082                 count = delayed_refs->num_entries * 2;
2083                 run_most = 1;
2084         }
2085         while (1) {
2086                 if (!(run_all || run_most) &&
2087                     delayed_refs->num_heads_ready < 64)
2088                         break;
2089
2090                 /*
2091                  * go find something we can process in the rbtree.  We start at
2092                  * the beginning of the tree, and then build a cluster
2093                  * of refs to process starting at the first one we are able to
2094                  * lock
2095                  */
2096                 ret = btrfs_find_ref_cluster(trans, &cluster,
2097                                              delayed_refs->run_delayed_start);
2098                 if (ret)
2099                         break;
2100
2101                 ret = run_clustered_refs(trans, root, &cluster);
2102                 BUG_ON(ret < 0);
2103
2104                 count -= min_t(unsigned long, ret, count);
2105
2106                 if (count == 0)
2107                         break;
2108         }
2109
2110         if (run_all) {
2111                 node = rb_first(&delayed_refs->root);
2112                 if (!node)
2113                         goto out;
2114                 count = (unsigned long)-1;
2115
2116                 while (node) {
2117                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2118                                        rb_node);
2119                         if (btrfs_delayed_ref_is_head(ref)) {
2120                                 struct btrfs_delayed_ref_head *head;
2121
2122                                 head = btrfs_delayed_node_to_head(ref);
2123                                 atomic_inc(&ref->refs);
2124
2125                                 spin_unlock(&delayed_refs->lock);
2126                                 mutex_lock(&head->mutex);
2127                                 mutex_unlock(&head->mutex);
2128
2129                                 btrfs_put_delayed_ref(ref);
2130                                 cond_resched();
2131                                 goto again;
2132                         }
2133                         node = rb_next(node);
2134                 }
2135                 spin_unlock(&delayed_refs->lock);
2136                 schedule_timeout(1);
2137                 goto again;
2138         }
2139 out:
2140         spin_unlock(&delayed_refs->lock);
2141         return 0;
2142 }
2143
2144 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2145                                 struct btrfs_root *root,
2146                                 u64 bytenr, u64 num_bytes, u64 flags,
2147                                 int is_data)
2148 {
2149         struct btrfs_delayed_extent_op *extent_op;
2150         int ret;
2151
2152         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2153         if (!extent_op)
2154                 return -ENOMEM;
2155
2156         extent_op->flags_to_set = flags;
2157         extent_op->update_flags = 1;
2158         extent_op->update_key = 0;
2159         extent_op->is_data = is_data ? 1 : 0;
2160
2161         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2162         if (ret)
2163                 kfree(extent_op);
2164         return ret;
2165 }
2166
2167 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2168                                       struct btrfs_root *root,
2169                                       struct btrfs_path *path,
2170                                       u64 objectid, u64 offset, u64 bytenr)
2171 {
2172         struct btrfs_delayed_ref_head *head;
2173         struct btrfs_delayed_ref_node *ref;
2174         struct btrfs_delayed_data_ref *data_ref;
2175         struct btrfs_delayed_ref_root *delayed_refs;
2176         struct rb_node *node;
2177         int ret = 0;
2178
2179         ret = -ENOENT;
2180         delayed_refs = &trans->transaction->delayed_refs;
2181         spin_lock(&delayed_refs->lock);
2182         head = btrfs_find_delayed_ref_head(trans, bytenr);
2183         if (!head)
2184                 goto out;
2185
2186         if (!mutex_trylock(&head->mutex)) {
2187                 atomic_inc(&head->node.refs);
2188                 spin_unlock(&delayed_refs->lock);
2189
2190                 btrfs_release_path(root->fs_info->extent_root, path);
2191
2192                 mutex_lock(&head->mutex);
2193                 mutex_unlock(&head->mutex);
2194                 btrfs_put_delayed_ref(&head->node);
2195                 return -EAGAIN;
2196         }
2197
2198         node = rb_prev(&head->node.rb_node);
2199         if (!node)
2200                 goto out_unlock;
2201
2202         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2203
2204         if (ref->bytenr != bytenr)
2205                 goto out_unlock;
2206
2207         ret = 1;
2208         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2209                 goto out_unlock;
2210
2211         data_ref = btrfs_delayed_node_to_data_ref(ref);
2212
2213         node = rb_prev(node);
2214         if (node) {
2215                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2216                 if (ref->bytenr == bytenr)
2217                         goto out_unlock;
2218         }
2219
2220         if (data_ref->root != root->root_key.objectid ||
2221             data_ref->objectid != objectid || data_ref->offset != offset)
2222                 goto out_unlock;
2223
2224         ret = 0;
2225 out_unlock:
2226         mutex_unlock(&head->mutex);
2227 out:
2228         spin_unlock(&delayed_refs->lock);
2229         return ret;
2230 }
2231
2232 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2233                                         struct btrfs_root *root,
2234                                         struct btrfs_path *path,
2235                                         u64 objectid, u64 offset, u64 bytenr)
2236 {
2237         struct btrfs_root *extent_root = root->fs_info->extent_root;
2238         struct extent_buffer *leaf;
2239         struct btrfs_extent_data_ref *ref;
2240         struct btrfs_extent_inline_ref *iref;
2241         struct btrfs_extent_item *ei;
2242         struct btrfs_key key;
2243         u32 item_size;
2244         int ret;
2245
2246         key.objectid = bytenr;
2247         key.offset = (u64)-1;
2248         key.type = BTRFS_EXTENT_ITEM_KEY;
2249
2250         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2251         if (ret < 0)
2252                 goto out;
2253         BUG_ON(ret == 0);
2254
2255         ret = -ENOENT;
2256         if (path->slots[0] == 0)
2257                 goto out;
2258
2259         path->slots[0]--;
2260         leaf = path->nodes[0];
2261         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2262
2263         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2264                 goto out;
2265
2266         ret = 1;
2267         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2268 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2269         if (item_size < sizeof(*ei)) {
2270                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2271                 goto out;
2272         }
2273 #endif
2274         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2275
2276         if (item_size != sizeof(*ei) +
2277             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2278                 goto out;
2279
2280         if (btrfs_extent_generation(leaf, ei) <=
2281             btrfs_root_last_snapshot(&root->root_item))
2282                 goto out;
2283
2284         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2285         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2286             BTRFS_EXTENT_DATA_REF_KEY)
2287                 goto out;
2288
2289         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2290         if (btrfs_extent_refs(leaf, ei) !=
2291             btrfs_extent_data_ref_count(leaf, ref) ||
2292             btrfs_extent_data_ref_root(leaf, ref) !=
2293             root->root_key.objectid ||
2294             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2295             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2296                 goto out;
2297
2298         ret = 0;
2299 out:
2300         return ret;
2301 }
2302
2303 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2304                           struct btrfs_root *root,
2305                           u64 objectid, u64 offset, u64 bytenr)
2306 {
2307         struct btrfs_path *path;
2308         int ret;
2309         int ret2;
2310
2311         path = btrfs_alloc_path();
2312         if (!path)
2313                 return -ENOENT;
2314
2315         do {
2316                 ret = check_committed_ref(trans, root, path, objectid,
2317                                           offset, bytenr);
2318                 if (ret && ret != -ENOENT)
2319                         goto out;
2320
2321                 ret2 = check_delayed_ref(trans, root, path, objectid,
2322                                          offset, bytenr);
2323         } while (ret2 == -EAGAIN);
2324
2325         if (ret2 && ret2 != -ENOENT) {
2326                 ret = ret2;
2327                 goto out;
2328         }
2329
2330         if (ret != -ENOENT || ret2 != -ENOENT)
2331                 ret = 0;
2332 out:
2333         btrfs_free_path(path);
2334         return ret;
2335 }
2336
2337 #if 0
2338 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2339                     struct extent_buffer *buf, u32 nr_extents)
2340 {
2341         struct btrfs_key key;
2342         struct btrfs_file_extent_item *fi;
2343         u64 root_gen;
2344         u32 nritems;
2345         int i;
2346         int level;
2347         int ret = 0;
2348         int shared = 0;
2349
2350         if (!root->ref_cows)
2351                 return 0;
2352
2353         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2354                 shared = 0;
2355                 root_gen = root->root_key.offset;
2356         } else {
2357                 shared = 1;
2358                 root_gen = trans->transid - 1;
2359         }
2360
2361         level = btrfs_header_level(buf);
2362         nritems = btrfs_header_nritems(buf);
2363
2364         if (level == 0) {
2365                 struct btrfs_leaf_ref *ref;
2366                 struct btrfs_extent_info *info;
2367
2368                 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2369                 if (!ref) {
2370                         ret = -ENOMEM;
2371                         goto out;
2372                 }
2373
2374                 ref->root_gen = root_gen;
2375                 ref->bytenr = buf->start;
2376                 ref->owner = btrfs_header_owner(buf);
2377                 ref->generation = btrfs_header_generation(buf);
2378                 ref->nritems = nr_extents;
2379                 info = ref->extents;
2380
2381                 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2382                         u64 disk_bytenr;
2383                         btrfs_item_key_to_cpu(buf, &key, i);
2384                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2385                                 continue;
2386                         fi = btrfs_item_ptr(buf, i,
2387                                             struct btrfs_file_extent_item);
2388                         if (btrfs_file_extent_type(buf, fi) ==
2389                             BTRFS_FILE_EXTENT_INLINE)
2390                                 continue;
2391                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2392                         if (disk_bytenr == 0)
2393                                 continue;
2394
2395                         info->bytenr = disk_bytenr;
2396                         info->num_bytes =
2397                                 btrfs_file_extent_disk_num_bytes(buf, fi);
2398                         info->objectid = key.objectid;
2399                         info->offset = key.offset;
2400                         info++;
2401                 }
2402
2403                 ret = btrfs_add_leaf_ref(root, ref, shared);
2404                 if (ret == -EEXIST && shared) {
2405                         struct btrfs_leaf_ref *old;
2406                         old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2407                         BUG_ON(!old);
2408                         btrfs_remove_leaf_ref(root, old);
2409                         btrfs_free_leaf_ref(root, old);
2410                         ret = btrfs_add_leaf_ref(root, ref, shared);
2411                 }
2412                 WARN_ON(ret);
2413                 btrfs_free_leaf_ref(root, ref);
2414         }
2415 out:
2416         return ret;
2417 }
2418
2419 /* when a block goes through cow, we update the reference counts of
2420  * everything that block points to.  The internal pointers of the block
2421  * can be in just about any order, and it is likely to have clusters of
2422  * things that are close together and clusters of things that are not.
2423  *
2424  * To help reduce the seeks that come with updating all of these reference
2425  * counts, sort them by byte number before actual updates are done.
2426  *
2427  * struct refsort is used to match byte number to slot in the btree block.
2428  * we sort based on the byte number and then use the slot to actually
2429  * find the item.
2430  *
2431  * struct refsort is smaller than strcut btrfs_item and smaller than
2432  * struct btrfs_key_ptr.  Since we're currently limited to the page size
2433  * for a btree block, there's no way for a kmalloc of refsorts for a
2434  * single node to be bigger than a page.
2435  */
2436 struct refsort {
2437         u64 bytenr;
2438         u32 slot;
2439 };
2440
2441 /*
2442  * for passing into sort()
2443  */
2444 static int refsort_cmp(const void *a_void, const void *b_void)
2445 {
2446         const struct refsort *a = a_void;
2447         const struct refsort *b = b_void;
2448
2449         if (a->bytenr < b->bytenr)
2450                 return -1;
2451         if (a->bytenr > b->bytenr)
2452                 return 1;
2453         return 0;
2454 }
2455 #endif
2456
2457 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2458                            struct btrfs_root *root,
2459                            struct extent_buffer *buf,
2460                            int full_backref, int inc)
2461 {
2462         u64 bytenr;
2463         u64 num_bytes;
2464         u64 parent;
2465         u64 ref_root;
2466         u32 nritems;
2467         struct btrfs_key key;
2468         struct btrfs_file_extent_item *fi;
2469         int i;
2470         int level;
2471         int ret = 0;
2472         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2473                             u64, u64, u64, u64, u64, u64);
2474
2475         ref_root = btrfs_header_owner(buf);
2476         nritems = btrfs_header_nritems(buf);
2477         level = btrfs_header_level(buf);
2478
2479         if (!root->ref_cows && level == 0)
2480                 return 0;
2481
2482         if (inc)
2483                 process_func = btrfs_inc_extent_ref;
2484         else
2485                 process_func = btrfs_free_extent;
2486
2487         if (full_backref)
2488                 parent = buf->start;
2489         else
2490                 parent = 0;
2491
2492         for (i = 0; i < nritems; i++) {
2493                 if (level == 0) {
2494                         btrfs_item_key_to_cpu(buf, &key, i);
2495                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2496                                 continue;
2497                         fi = btrfs_item_ptr(buf, i,
2498                                             struct btrfs_file_extent_item);
2499                         if (btrfs_file_extent_type(buf, fi) ==
2500                             BTRFS_FILE_EXTENT_INLINE)
2501                                 continue;
2502                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2503                         if (bytenr == 0)
2504                                 continue;
2505
2506                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2507                         key.offset -= btrfs_file_extent_offset(buf, fi);
2508                         ret = process_func(trans, root, bytenr, num_bytes,
2509                                            parent, ref_root, key.objectid,
2510                                            key.offset);
2511                         if (ret)
2512                                 goto fail;
2513                 } else {
2514                         bytenr = btrfs_node_blockptr(buf, i);
2515                         num_bytes = btrfs_level_size(root, level - 1);
2516                         ret = process_func(trans, root, bytenr, num_bytes,
2517                                            parent, ref_root, level - 1, 0);
2518                         if (ret)
2519                                 goto fail;
2520                 }
2521         }
2522         return 0;
2523 fail:
2524         BUG();
2525         return ret;
2526 }
2527
2528 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2529                   struct extent_buffer *buf, int full_backref)
2530 {
2531         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2532 }
2533
2534 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2535                   struct extent_buffer *buf, int full_backref)
2536 {
2537         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2538 }
2539
2540 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2541                                  struct btrfs_root *root,
2542                                  struct btrfs_path *path,
2543                                  struct btrfs_block_group_cache *cache)
2544 {
2545         int ret;
2546         struct btrfs_root *extent_root = root->fs_info->extent_root;
2547         unsigned long bi;
2548         struct extent_buffer *leaf;
2549
2550         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2551         if (ret < 0)
2552                 goto fail;
2553         BUG_ON(ret);
2554
2555         leaf = path->nodes[0];
2556         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2557         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2558         btrfs_mark_buffer_dirty(leaf);
2559         btrfs_release_path(extent_root, path);
2560 fail:
2561         if (ret)
2562                 return ret;
2563         return 0;
2564
2565 }
2566
2567 static struct btrfs_block_group_cache *
2568 next_block_group(struct btrfs_root *root,
2569                  struct btrfs_block_group_cache *cache)
2570 {
2571         struct rb_node *node;
2572         spin_lock(&root->fs_info->block_group_cache_lock);
2573         node = rb_next(&cache->cache_node);
2574         btrfs_put_block_group(cache);
2575         if (node) {
2576                 cache = rb_entry(node, struct btrfs_block_group_cache,
2577                                  cache_node);
2578                 atomic_inc(&cache->count);
2579         } else
2580                 cache = NULL;
2581         spin_unlock(&root->fs_info->block_group_cache_lock);
2582         return cache;
2583 }
2584
2585 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2586                                    struct btrfs_root *root)
2587 {
2588         struct btrfs_block_group_cache *cache;
2589         int err = 0;
2590         struct btrfs_path *path;
2591         u64 last = 0;
2592
2593         path = btrfs_alloc_path();
2594         if (!path)
2595                 return -ENOMEM;
2596
2597         while (1) {
2598                 if (last == 0) {
2599                         err = btrfs_run_delayed_refs(trans, root,
2600                                                      (unsigned long)-1);
2601                         BUG_ON(err);
2602                 }
2603
2604                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2605                 while (cache) {
2606                         if (cache->dirty)
2607                                 break;
2608                         cache = next_block_group(root, cache);
2609                 }
2610                 if (!cache) {
2611                         if (last == 0)
2612                                 break;
2613                         last = 0;
2614                         continue;
2615                 }
2616
2617                 cache->dirty = 0;
2618                 last = cache->key.objectid + cache->key.offset;
2619
2620                 err = write_one_cache_group(trans, root, path, cache);
2621                 BUG_ON(err);
2622                 btrfs_put_block_group(cache);
2623         }
2624
2625         btrfs_free_path(path);
2626         return 0;
2627 }
2628
2629 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2630 {
2631         struct btrfs_block_group_cache *block_group;
2632         int readonly = 0;
2633
2634         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2635         if (!block_group || block_group->ro)
2636                 readonly = 1;
2637         if (block_group)
2638                 btrfs_put_block_group(block_group);
2639         return readonly;
2640 }
2641
2642 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2643                              u64 total_bytes, u64 bytes_used,
2644                              struct btrfs_space_info **space_info)
2645 {
2646         struct btrfs_space_info *found;
2647
2648         found = __find_space_info(info, flags);
2649         if (found) {
2650                 spin_lock(&found->lock);
2651                 found->total_bytes += total_bytes;
2652                 found->bytes_used += bytes_used;
2653                 found->full = 0;
2654                 spin_unlock(&found->lock);
2655                 *space_info = found;
2656                 return 0;
2657         }
2658         found = kzalloc(sizeof(*found), GFP_NOFS);
2659         if (!found)
2660                 return -ENOMEM;
2661
2662         INIT_LIST_HEAD(&found->block_groups);
2663         init_rwsem(&found->groups_sem);
2664         spin_lock_init(&found->lock);
2665         found->flags = flags;
2666         found->total_bytes = total_bytes;
2667         found->bytes_used = bytes_used;
2668         found->bytes_pinned = 0;
2669         found->bytes_reserved = 0;
2670         found->bytes_readonly = 0;
2671         found->bytes_delalloc = 0;
2672         found->full = 0;
2673         found->force_alloc = 0;
2674         *space_info = found;
2675         list_add_rcu(&found->list, &info->space_info);
2676         atomic_set(&found->caching_threads, 0);
2677         return 0;
2678 }
2679
2680 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2681 {
2682         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2683                                    BTRFS_BLOCK_GROUP_RAID1 |
2684                                    BTRFS_BLOCK_GROUP_RAID10 |
2685                                    BTRFS_BLOCK_GROUP_DUP);
2686         if (extra_flags) {
2687                 if (flags & BTRFS_BLOCK_GROUP_DATA)
2688                         fs_info->avail_data_alloc_bits |= extra_flags;
2689                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2690                         fs_info->avail_metadata_alloc_bits |= extra_flags;
2691                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2692                         fs_info->avail_system_alloc_bits |= extra_flags;
2693         }
2694 }
2695
2696 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2697 {
2698         spin_lock(&cache->space_info->lock);
2699         spin_lock(&cache->lock);
2700         if (!cache->ro) {
2701                 cache->space_info->bytes_readonly += cache->key.offset -
2702                                         btrfs_block_group_used(&cache->item);
2703                 cache->ro = 1;
2704         }
2705         spin_unlock(&cache->lock);
2706         spin_unlock(&cache->space_info->lock);
2707 }
2708
2709 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2710 {
2711         u64 num_devices = root->fs_info->fs_devices->rw_devices;
2712
2713         if (num_devices == 1)
2714                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2715         if (num_devices < 4)
2716                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2717
2718         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2719             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2720                       BTRFS_BLOCK_GROUP_RAID10))) {
2721                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2722         }
2723
2724         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2725             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2726                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2727         }
2728
2729         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2730             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2731              (flags & BTRFS_BLOCK_GROUP_RAID10) |
2732              (flags & BTRFS_BLOCK_GROUP_DUP)))
2733                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2734         return flags;
2735 }
2736
2737 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
2738 {
2739         struct btrfs_fs_info *info = root->fs_info;
2740         u64 alloc_profile;
2741
2742         if (data) {
2743                 alloc_profile = info->avail_data_alloc_bits &
2744                         info->data_alloc_profile;
2745                 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2746         } else if (root == root->fs_info->chunk_root) {
2747                 alloc_profile = info->avail_system_alloc_bits &
2748                         info->system_alloc_profile;
2749                 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2750         } else {
2751                 alloc_profile = info->avail_metadata_alloc_bits &
2752                         info->metadata_alloc_profile;
2753                 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2754         }
2755
2756         return btrfs_reduce_alloc_profile(root, data);
2757 }
2758
2759 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2760 {
2761         u64 alloc_target;
2762
2763         alloc_target = btrfs_get_alloc_profile(root, 1);
2764         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2765                                                        alloc_target);
2766 }
2767
2768 /*
2769  * for now this just makes sure we have at least 5% of our metadata space free
2770  * for use.
2771  */
2772 int btrfs_check_metadata_free_space(struct btrfs_root *root)
2773 {
2774         struct btrfs_fs_info *info = root->fs_info;
2775         struct btrfs_space_info *meta_sinfo;
2776         u64 alloc_target, thresh;
2777         int committed = 0, ret;
2778
2779         /* get the space info for where the metadata will live */
2780         alloc_target = btrfs_get_alloc_profile(root, 0);
2781         meta_sinfo = __find_space_info(info, alloc_target);
2782         if (!meta_sinfo)
2783                 goto alloc;
2784
2785 again:
2786         spin_lock(&meta_sinfo->lock);
2787         if (!meta_sinfo->full)
2788                 thresh = meta_sinfo->total_bytes * 80;
2789         else
2790                 thresh = meta_sinfo->total_bytes * 95;
2791
2792         do_div(thresh, 100);
2793
2794         if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2795             meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2796             meta_sinfo->bytes_super > thresh) {
2797                 struct btrfs_trans_handle *trans;
2798                 if (!meta_sinfo->full) {
2799                         meta_sinfo->force_alloc = 1;
2800                         spin_unlock(&meta_sinfo->lock);
2801 alloc:
2802                         trans = btrfs_start_transaction(root, 1);
2803                         if (!trans)
2804                                 return -ENOMEM;
2805
2806                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2807                                              2 * 1024 * 1024, alloc_target, 0);
2808                         btrfs_end_transaction(trans, root);
2809                         if (!meta_sinfo) {
2810                                 meta_sinfo = __find_space_info(info,
2811                                                                alloc_target);
2812                         }
2813                         goto again;
2814                 }
2815                 spin_unlock(&meta_sinfo->lock);
2816
2817                 if (!committed) {
2818                         committed = 1;
2819                         trans = btrfs_join_transaction(root, 1);
2820                         if (!trans)
2821                                 return -ENOMEM;
2822                         ret = btrfs_commit_transaction(trans, root);
2823                         if (ret)
2824                                 return ret;
2825                         goto again;
2826                 }
2827                 return -ENOSPC;
2828         }
2829         spin_unlock(&meta_sinfo->lock);
2830
2831         return 0;
2832 }
2833
2834 /*
2835  * This will check the space that the inode allocates from to make sure we have
2836  * enough space for bytes.
2837  */
2838 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2839                                 u64 bytes)
2840 {
2841         struct btrfs_space_info *data_sinfo;
2842         int ret = 0, committed = 0;
2843
2844         /* make sure bytes are sectorsize aligned */
2845         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2846
2847         data_sinfo = BTRFS_I(inode)->space_info;
2848         if (!data_sinfo)
2849                 goto alloc;
2850
2851 again:
2852         /* make sure we have enough space to handle the data first */
2853         spin_lock(&data_sinfo->lock);
2854         if (data_sinfo->total_bytes - data_sinfo->bytes_used -
2855             data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
2856             data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
2857             data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) {
2858                 struct btrfs_trans_handle *trans;
2859
2860                 /*
2861                  * if we don't have enough free bytes in this space then we need
2862                  * to alloc a new chunk.
2863                  */
2864                 if (!data_sinfo->full) {
2865                         u64 alloc_target;
2866
2867                         data_sinfo->force_alloc = 1;
2868                         spin_unlock(&data_sinfo->lock);
2869 alloc:
2870                         alloc_target = btrfs_get_alloc_profile(root, 1);
2871                         trans = btrfs_start_transaction(root, 1);
2872                         if (!trans)
2873                                 return -ENOMEM;
2874
2875                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2876                                              bytes + 2 * 1024 * 1024,
2877                                              alloc_target, 0);
2878                         btrfs_end_transaction(trans, root);
2879                         if (ret)
2880                                 return ret;
2881
2882                         if (!data_sinfo) {
2883                                 btrfs_set_inode_space_info(root, inode);
2884                                 data_sinfo = BTRFS_I(inode)->space_info;
2885                         }
2886                         goto again;
2887                 }
2888                 spin_unlock(&data_sinfo->lock);
2889
2890                 /* commit the current transaction and try again */
2891                 if (!committed) {
2892                         committed = 1;
2893                         trans = btrfs_join_transaction(root, 1);
2894                         if (!trans)
2895                                 return -ENOMEM;
2896                         ret = btrfs_commit_transaction(trans, root);
2897                         if (ret)
2898                                 return ret;
2899                         goto again;
2900                 }
2901
2902                 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
2903                        ", %llu bytes_used, %llu bytes_reserved, "
2904                        "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
2905                        "%llu total\n", (unsigned long long)bytes,
2906                        (unsigned long long)data_sinfo->bytes_delalloc,
2907                        (unsigned long long)data_sinfo->bytes_used,
2908                        (unsigned long long)data_sinfo->bytes_reserved,
2909                        (unsigned long long)data_sinfo->bytes_pinned,
2910                        (unsigned long long)data_sinfo->bytes_readonly,
2911                        (unsigned long long)data_sinfo->bytes_may_use,
2912                        (unsigned long long)data_sinfo->total_bytes);
2913                 return -ENOSPC;
2914         }
2915         data_sinfo->bytes_may_use += bytes;
2916         BTRFS_I(inode)->reserved_bytes += bytes;
2917         spin_unlock(&data_sinfo->lock);
2918
2919         return btrfs_check_metadata_free_space(root);
2920 }
2921
2922 /*
2923  * if there was an error for whatever reason after calling
2924  * btrfs_check_data_free_space, call this so we can cleanup the counters.
2925  */
2926 void btrfs_free_reserved_data_space(struct btrfs_root *root,
2927                                     struct inode *inode, u64 bytes)
2928 {
2929         struct btrfs_space_info *data_sinfo;
2930
2931         /* make sure bytes are sectorsize aligned */
2932         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2933
2934         data_sinfo = BTRFS_I(inode)->space_info;
2935         spin_lock(&data_sinfo->lock);
2936         data_sinfo->bytes_may_use -= bytes;
2937         BTRFS_I(inode)->reserved_bytes -= bytes;
2938         spin_unlock(&data_sinfo->lock);
2939 }
2940
2941 /* called when we are adding a delalloc extent to the inode's io_tree */
2942 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
2943                                   u64 bytes)
2944 {
2945         struct btrfs_space_info *data_sinfo;
2946
2947         /* get the space info for where this inode will be storing its data */
2948         data_sinfo = BTRFS_I(inode)->space_info;
2949
2950         /* make sure we have enough space to handle the data first */
2951         spin_lock(&data_sinfo->lock);
2952         data_sinfo->bytes_delalloc += bytes;
2953
2954         /*
2955          * we are adding a delalloc extent without calling
2956          * btrfs_check_data_free_space first.  This happens on a weird
2957          * writepage condition, but shouldn't hurt our accounting
2958          */
2959         if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
2960                 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
2961                 BTRFS_I(inode)->reserved_bytes = 0;
2962         } else {
2963                 data_sinfo->bytes_may_use -= bytes;
2964                 BTRFS_I(inode)->reserved_bytes -= bytes;
2965         }
2966
2967         spin_unlock(&data_sinfo->lock);
2968 }
2969
2970 /* called when we are clearing an delalloc extent from the inode's io_tree */
2971 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
2972                               u64 bytes)
2973 {
2974         struct btrfs_space_info *info;
2975
2976         info = BTRFS_I(inode)->space_info;
2977
2978         spin_lock(&info->lock);
2979         info->bytes_delalloc -= bytes;
2980         spin_unlock(&info->lock);
2981 }
2982
2983 static void force_metadata_allocation(struct btrfs_fs_info *info)
2984 {
2985         struct list_head *head = &info->space_info;
2986         struct btrfs_space_info *found;
2987
2988         rcu_read_lock();
2989         list_for_each_entry_rcu(found, head, list) {
2990                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2991                         found->force_alloc = 1;
2992         }
2993         rcu_read_unlock();
2994 }
2995
2996 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
2997                           struct btrfs_root *extent_root, u64 alloc_bytes,
2998                           u64 flags, int force)
2999 {
3000         struct btrfs_space_info *space_info;
3001         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3002         u64 thresh;
3003         int ret = 0;
3004
3005         mutex_lock(&fs_info->chunk_mutex);
3006
3007         flags = btrfs_reduce_alloc_profile(extent_root, flags);
3008
3009         space_info = __find_space_info(extent_root->fs_info, flags);
3010         if (!space_info) {
3011                 ret = update_space_info(extent_root->fs_info, flags,
3012                                         0, 0, &space_info);
3013                 BUG_ON(ret);
3014         }
3015         BUG_ON(!space_info);
3016
3017         spin_lock(&space_info->lock);
3018         if (space_info->force_alloc) {
3019                 force = 1;
3020                 space_info->force_alloc = 0;
3021         }
3022         if (space_info->full) {
3023                 spin_unlock(&space_info->lock);
3024                 goto out;
3025         }
3026
3027         thresh = space_info->total_bytes - space_info->bytes_readonly;
3028         thresh = div_factor(thresh, 6);
3029         if (!force &&
3030            (space_info->bytes_used + space_info->bytes_pinned +
3031             space_info->bytes_reserved + alloc_bytes) < thresh) {
3032                 spin_unlock(&space_info->lock);
3033                 goto out;
3034         }
3035         spin_unlock(&space_info->lock);
3036
3037         /*
3038          * if we're doing a data chunk, go ahead and make sure that
3039          * we keep a reasonable number of metadata chunks allocated in the
3040          * FS as well.
3041          */
3042         if (flags & BTRFS_BLOCK_GROUP_DATA) {
3043                 fs_info->data_chunk_allocations++;
3044                 if (!(fs_info->data_chunk_allocations %
3045                       fs_info->metadata_ratio))
3046                         force_metadata_allocation(fs_info);
3047         }
3048
3049         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3050         if (ret)
3051                 space_info->full = 1;
3052 out:
3053         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3054         return ret;
3055 }
3056
3057 static int update_block_group(struct btrfs_trans_handle *trans,
3058                               struct btrfs_root *root,
3059                               u64 bytenr, u64 num_bytes, int alloc,
3060                               int mark_free)
3061 {
3062         struct btrfs_block_group_cache *cache;
3063         struct btrfs_fs_info *info = root->fs_info;
3064         u64 total = num_bytes;
3065         u64 old_val;
3066         u64 byte_in_group;
3067
3068         /* block accounting for super block */
3069         spin_lock(&info->delalloc_lock);
3070         old_val = btrfs_super_bytes_used(&info->super_copy);
3071         if (alloc)
3072                 old_val += num_bytes;
3073         else
3074                 old_val -= num_bytes;
3075         btrfs_set_super_bytes_used(&info->super_copy, old_val);
3076
3077         /* block accounting for root item */
3078         old_val = btrfs_root_used(&root->root_item);
3079         if (alloc)
3080                 old_val += num_bytes;
3081         else
3082                 old_val -= num_bytes;
3083         btrfs_set_root_used(&root->root_item, old_val);
3084         spin_unlock(&info->delalloc_lock);
3085
3086         while (total) {
3087                 cache = btrfs_lookup_block_group(info, bytenr);
3088                 if (!cache)
3089                         return -1;
3090                 byte_in_group = bytenr - cache->key.objectid;
3091                 WARN_ON(byte_in_group > cache->key.offset);
3092
3093                 spin_lock(&cache->space_info->lock);
3094                 spin_lock(&cache->lock);
3095                 cache->dirty = 1;
3096                 old_val = btrfs_block_group_used(&cache->item);
3097                 num_bytes = min(total, cache->key.offset - byte_in_group);
3098                 if (alloc) {
3099                         old_val += num_bytes;
3100                         btrfs_set_block_group_used(&cache->item, old_val);
3101                         cache->reserved -= num_bytes;
3102                         cache->space_info->bytes_used += num_bytes;
3103                         cache->space_info->bytes_reserved -= num_bytes;
3104                         if (cache->ro)
3105                                 cache->space_info->bytes_readonly -= num_bytes;
3106                         spin_unlock(&cache->lock);
3107                         spin_unlock(&cache->space_info->lock);
3108                 } else {
3109                         old_val -= num_bytes;
3110                         cache->space_info->bytes_used -= num_bytes;
3111                         if (cache->ro)
3112                                 cache->space_info->bytes_readonly += num_bytes;
3113                         btrfs_set_block_group_used(&cache->item, old_val);
3114                         spin_unlock(&cache->lock);
3115                         spin_unlock(&cache->space_info->lock);
3116                         if (mark_free) {
3117                                 int ret;
3118
3119                                 ret = btrfs_discard_extent(root, bytenr,
3120                                                            num_bytes);
3121                                 WARN_ON(ret);
3122
3123                                 ret = btrfs_add_free_space(cache, bytenr,
3124                                                            num_bytes);
3125                                 WARN_ON(ret);
3126                         }
3127                 }
3128                 btrfs_put_block_group(cache);
3129                 total -= num_bytes;
3130                 bytenr += num_bytes;
3131         }
3132         return 0;
3133 }
3134
3135 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3136 {
3137         struct btrfs_block_group_cache *cache;
3138         u64 bytenr;
3139
3140         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3141         if (!cache)
3142                 return 0;
3143
3144         bytenr = cache->key.objectid;
3145         btrfs_put_block_group(cache);
3146
3147         return bytenr;
3148 }
3149
3150 /*
3151  * this function must be called within transaction
3152  */
3153 int btrfs_pin_extent(struct btrfs_root *root,
3154                      u64 bytenr, u64 num_bytes, int reserved)
3155 {
3156         struct btrfs_fs_info *fs_info = root->fs_info;
3157         struct btrfs_block_group_cache *cache;
3158
3159         cache = btrfs_lookup_block_group(fs_info, bytenr);
3160         BUG_ON(!cache);
3161
3162         spin_lock(&cache->space_info->lock);
3163         spin_lock(&cache->lock);
3164         cache->pinned += num_bytes;
3165         cache->space_info->bytes_pinned += num_bytes;
3166         if (reserved) {
3167                 cache->reserved -= num_bytes;
3168                 cache->space_info->bytes_reserved -= num_bytes;
3169         }
3170         spin_unlock(&cache->lock);
3171         spin_unlock(&cache->space_info->lock);
3172
3173         btrfs_put_block_group(cache);
3174
3175         set_extent_dirty(fs_info->pinned_extents,
3176                          bytenr, bytenr + num_bytes - 1, GFP_NOFS);
3177         return 0;
3178 }
3179
3180 static int update_reserved_extents(struct btrfs_block_group_cache *cache,
3181                                    u64 num_bytes, int reserve)
3182 {
3183         spin_lock(&cache->space_info->lock);
3184         spin_lock(&cache->lock);
3185         if (reserve) {
3186                 cache->reserved += num_bytes;
3187                 cache->space_info->bytes_reserved += num_bytes;
3188         } else {
3189                 cache->reserved -= num_bytes;
3190                 cache->space_info->bytes_reserved -= num_bytes;
3191         }
3192         spin_unlock(&cache->lock);
3193         spin_unlock(&cache->space_info->lock);
3194         return 0;
3195 }
3196
3197 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
3198                                 struct btrfs_root *root)
3199 {
3200         struct btrfs_fs_info *fs_info = root->fs_info;
3201         struct btrfs_caching_control *next;
3202         struct btrfs_caching_control *caching_ctl;
3203         struct btrfs_block_group_cache *cache;
3204
3205         down_write(&fs_info->extent_commit_sem);
3206
3207         list_for_each_entry_safe(caching_ctl, next,
3208                                  &fs_info->caching_block_groups, list) {
3209                 cache = caching_ctl->block_group;
3210                 if (block_group_cache_done(cache)) {
3211                         cache->last_byte_to_unpin = (u64)-1;
3212                         list_del_init(&caching_ctl->list);
3213                         put_caching_control(caching_ctl);
3214                 } else {
3215                         cache->last_byte_to_unpin = caching_ctl->progress;
3216                 }
3217         }
3218
3219         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3220                 fs_info->pinned_extents = &fs_info->freed_extents[1];
3221         else
3222                 fs_info->pinned_extents = &fs_info->freed_extents[0];
3223
3224         up_write(&fs_info->extent_commit_sem);
3225         return 0;
3226 }
3227
3228 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
3229 {
3230         struct btrfs_fs_info *fs_info = root->fs_info;
3231         struct btrfs_block_group_cache *cache = NULL;
3232         u64 len;
3233
3234         while (start <= end) {
3235                 if (!cache ||
3236                     start >= cache->key.objectid + cache->key.offset) {
3237                         if (cache)
3238                                 btrfs_put_block_group(cache);
3239                         cache = btrfs_lookup_block_group(fs_info, start);
3240                         BUG_ON(!cache);
3241                 }
3242
3243                 len = cache->key.objectid + cache->key.offset - start;
3244                 len = min(len, end + 1 - start);
3245
3246                 if (start < cache->last_byte_to_unpin) {
3247                         len = min(len, cache->last_byte_to_unpin - start);
3248                         btrfs_add_free_space(cache, start, len);
3249                 }
3250
3251                 spin_lock(&cache->space_info->lock);
3252                 spin_lock(&cache->lock);
3253                 cache->pinned -= len;
3254                 cache->space_info->bytes_pinned -= len;
3255                 spin_unlock(&cache->lock);
3256                 spin_unlock(&cache->space_info->lock);
3257
3258                 start += len;
3259         }
3260
3261         if (cache)
3262                 btrfs_put_block_group(cache);
3263         return 0;
3264 }
3265
3266 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3267                                struct btrfs_root *root)
3268 {
3269         struct btrfs_fs_info *fs_info = root->fs_info;
3270         struct extent_io_tree *unpin;
3271         u64 start;
3272         u64 end;
3273         int ret;
3274
3275         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3276                 unpin = &fs_info->freed_extents[1];
3277         else
3278                 unpin = &fs_info->freed_extents[0];
3279
3280         while (1) {
3281                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3282                                             EXTENT_DIRTY);
3283                 if (ret)
3284                         break;
3285
3286                 ret = btrfs_discard_extent(root, start, end + 1 - start);
3287
3288                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3289                 unpin_extent_range(root, start, end);
3290                 cond_resched();
3291         }
3292
3293         return ret;
3294 }
3295
3296 static int pin_down_bytes(struct btrfs_trans_handle *trans,
3297                           struct btrfs_root *root,
3298                           struct btrfs_path *path,
3299                           u64 bytenr, u64 num_bytes,
3300                           int is_data, int reserved,
3301                           struct extent_buffer **must_clean)
3302 {
3303         int err = 0;
3304         struct extent_buffer *buf;
3305
3306         if (is_data)
3307                 goto pinit;
3308
3309         buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3310         if (!buf)
3311                 goto pinit;
3312
3313         /* we can reuse a block if it hasn't been written
3314          * and it is from this transaction.  We can't
3315          * reuse anything from the tree log root because
3316          * it has tiny sub-transactions.
3317          */
3318         if (btrfs_buffer_uptodate(buf, 0) &&
3319             btrfs_try_tree_lock(buf)) {
3320                 u64 header_owner = btrfs_header_owner(buf);
3321                 u64 header_transid = btrfs_header_generation(buf);
3322                 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
3323                     header_transid == trans->transid &&
3324                     !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3325                         *must_clean = buf;
3326                         return 1;
3327                 }
3328                 btrfs_tree_unlock(buf);
3329         }
3330         free_extent_buffer(buf);
3331 pinit:
3332         if (path)
3333                 btrfs_set_path_blocking(path);
3334         /* unlocks the pinned mutex */
3335         btrfs_pin_extent(root, bytenr, num_bytes, reserved);
3336
3337         BUG_ON(err < 0);
3338         return 0;
3339 }
3340
3341 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3342                                 struct btrfs_root *root,
3343                                 u64 bytenr, u64 num_bytes, u64 parent,
3344                                 u64 root_objectid, u64 owner_objectid,
3345                                 u64 owner_offset, int refs_to_drop,
3346                                 struct btrfs_delayed_extent_op *extent_op)
3347 {
3348         struct btrfs_key key;
3349         struct btrfs_path *path;
3350         struct btrfs_fs_info *info = root->fs_info;
3351         struct btrfs_root *extent_root = info->extent_root;
3352         struct extent_buffer *leaf;
3353         struct btrfs_extent_item *ei;
3354         struct btrfs_extent_inline_ref *iref;
3355         int ret;
3356         int is_data;
3357         int extent_slot = 0;
3358         int found_extent = 0;
3359         int num_to_del = 1;
3360         u32 item_size;
3361         u64 refs;
3362
3363         path = btrfs_alloc_path();
3364         if (!path)
3365                 return -ENOMEM;
3366
3367         path->reada = 1;
3368         path->leave_spinning = 1;
3369
3370         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3371         BUG_ON(!is_data && refs_to_drop != 1);
3372
3373         ret = lookup_extent_backref(trans, extent_root, path, &iref,
3374                                     bytenr, num_bytes, parent,
3375                                     root_objectid, owner_objectid,
3376                                     owner_offset);
3377         if (ret == 0) {
3378                 extent_slot = path->slots[0];
3379                 while (extent_slot >= 0) {
3380                         btrfs_item_key_to_cpu(path->nodes[0], &key,
3381                                               extent_slot);
3382                         if (key.objectid != bytenr)
3383                                 break;
3384                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3385                             key.offset == num_bytes) {
3386                                 found_extent = 1;
3387                                 break;
3388                         }
3389                         if (path->slots[0] - extent_slot > 5)
3390                                 break;
3391                         extent_slot--;
3392                 }
3393 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3394                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
3395                 if (found_extent && item_size < sizeof(*ei))
3396                         found_extent = 0;
3397 #endif
3398                 if (!found_extent) {
3399                         BUG_ON(iref);
3400                         ret = remove_extent_backref(trans, extent_root, path,
3401                                                     NULL, refs_to_drop,
3402                                                     is_data);
3403                         BUG_ON(ret);
3404                         btrfs_release_path(extent_root, path);
3405                         path->leave_spinning = 1;
3406
3407                         key.objectid = bytenr;
3408                         key.type = BTRFS_EXTENT_ITEM_KEY;
3409                         key.offset = num_bytes;
3410
3411                         ret = btrfs_search_slot(trans, extent_root,
3412                                                 &key, path, -1, 1);
3413                         if (ret) {
3414                                 printk(KERN_ERR "umm, got %d back from search"
3415                                        ", was looking for %llu\n", ret,
3416                                        (unsigned long long)bytenr);
3417                                 btrfs_print_leaf(extent_root, path->nodes[0]);
3418                         }
3419                         BUG_ON(ret);
3420                         extent_slot = path->slots[0];
3421                 }
3422         } else {
3423                 btrfs_print_leaf(extent_root, path->nodes[0]);
3424                 WARN_ON(1);
3425                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
3426                        "parent %llu root %llu  owner %llu offset %llu\n",
3427                        (unsigned long long)bytenr,
3428                        (unsigned long long)parent,
3429                        (unsigned long long)root_objectid,
3430                        (unsigned long long)owner_objectid,
3431                        (unsigned long long)owner_offset);
3432         }
3433
3434         leaf = path->nodes[0];
3435         item_size = btrfs_item_size_nr(leaf, extent_slot);
3436 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3437         if (item_size < sizeof(*ei)) {
3438                 BUG_ON(found_extent || extent_slot != path->slots[0]);
3439                 ret = convert_extent_item_v0(trans, extent_root, path,
3440                                              owner_objectid, 0);
3441                 BUG_ON(ret < 0);
3442
3443                 btrfs_release_path(extent_root, path);
3444                 path->leave_spinning = 1;
3445
3446                 key.objectid = bytenr;
3447                 key.type = BTRFS_EXTENT_ITEM_KEY;
3448                 key.offset = num_bytes;
3449
3450                 ret = btrfs_search_slot(trans, extent_root, &key, path,
3451                                         -1, 1);
3452                 if (ret) {
3453                         printk(KERN_ERR "umm, got %d back from search"
3454                                ", was looking for %llu\n", ret,
3455                                (unsigned long long)bytenr);
3456                         btrfs_print_leaf(extent_root, path->nodes[0]);
3457                 }
3458                 BUG_ON(ret);
3459                 extent_slot = path->slots[0];
3460                 leaf = path->nodes[0];
3461                 item_size = btrfs_item_size_nr(leaf, extent_slot);
3462         }
3463 #endif
3464         BUG_ON(item_size < sizeof(*ei));
3465         ei = btrfs_item_ptr(leaf, extent_slot,
3466                             struct btrfs_extent_item);
3467         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3468                 struct btrfs_tree_block_info *bi;
3469                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
3470                 bi = (struct btrfs_tree_block_info *)(ei + 1);
3471                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3472         }
3473
3474         refs = btrfs_extent_refs(leaf, ei);
3475         BUG_ON(refs < refs_to_drop);
3476         refs -= refs_to_drop;
3477
3478         if (refs > 0) {
3479                 if (extent_op)
3480                         __run_delayed_extent_op(extent_op, leaf, ei);
3481                 /*
3482                  * In the case of inline back ref, reference count will
3483                  * be updated by remove_extent_backref
3484                  */
3485                 if (iref) {
3486                         BUG_ON(!found_extent);
3487                 } else {
3488                         btrfs_set_extent_refs(leaf, ei, refs);
3489                         btrfs_mark_buffer_dirty(leaf);
3490                 }
3491                 if (found_extent) {
3492                         ret = remove_extent_backref(trans, extent_root, path,
3493                                                     iref, refs_to_drop,
3494                                                     is_data);
3495                         BUG_ON(ret);
3496                 }
3497         } else {
3498                 int mark_free = 0;
3499                 struct extent_buffer *must_clean = NULL;
3500
3501                 if (found_extent) {
3502                         BUG_ON(is_data && refs_to_drop !=
3503                                extent_data_ref_count(root, path, iref));
3504                         if (iref) {
3505                                 BUG_ON(path->slots[0] != extent_slot);
3506                         } else {
3507                                 BUG_ON(path->slots[0] != extent_slot + 1);
3508                                 path->slots[0] = extent_slot;
3509                                 num_to_del = 2;
3510                         }
3511                 }
3512
3513                 ret = pin_down_bytes(trans, root, path, bytenr,
3514                                      num_bytes, is_data, 0, &must_clean);
3515                 if (ret > 0)
3516                         mark_free = 1;
3517                 BUG_ON(ret < 0);
3518                 /*
3519                  * it is going to be very rare for someone to be waiting
3520                  * on the block we're freeing.  del_items might need to
3521                  * schedule, so rather than get fancy, just force it
3522                  * to blocking here
3523                  */
3524                 if (must_clean)
3525                         btrfs_set_lock_blocking(must_clean);
3526
3527                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3528                                       num_to_del);
3529                 BUG_ON(ret);
3530                 btrfs_release_path(extent_root, path);
3531
3532                 if (must_clean) {
3533                         clean_tree_block(NULL, root, must_clean);
3534                         btrfs_tree_unlock(must_clean);
3535                         free_extent_buffer(must_clean);
3536                 }
3537
3538                 if (is_data) {
3539                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
3540                         BUG_ON(ret);
3541                 } else {
3542                         invalidate_mapping_pages(info->btree_inode->i_mapping,
3543                              bytenr >> PAGE_CACHE_SHIFT,
3544                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
3545                 }
3546
3547                 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
3548                                          mark_free);
3549                 BUG_ON(ret);
3550         }
3551         btrfs_free_path(path);
3552         return ret;
3553 }
3554
3555 /*
3556  * when we free an extent, it is possible (and likely) that we free the last
3557  * delayed ref for that extent as well.  This searches the delayed ref tree for
3558  * a given extent, and if there are no other delayed refs to be processed, it
3559  * removes it from the tree.
3560  */
3561 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3562                                       struct btrfs_root *root, u64 bytenr)
3563 {
3564         struct btrfs_delayed_ref_head *head;
3565         struct btrfs_delayed_ref_root *delayed_refs;
3566         struct btrfs_delayed_ref_node *ref;
3567         struct rb_node *node;
3568         int ret;
3569
3570         delayed_refs = &trans->transaction->delayed_refs;
3571         spin_lock(&delayed_refs->lock);
3572         head = btrfs_find_delayed_ref_head(trans, bytenr);
3573         if (!head)
3574                 goto out;
3575
3576         node = rb_prev(&head->node.rb_node);
3577         if (!node)
3578                 goto out;
3579
3580         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3581
3582         /* there are still entries for this ref, we can't drop it */
3583         if (ref->bytenr == bytenr)
3584                 goto out;
3585
3586         if (head->extent_op) {
3587                 if (!head->must_insert_reserved)
3588                         goto out;
3589                 kfree(head->extent_op);
3590                 head->extent_op = NULL;
3591         }
3592
3593         /*
3594          * waiting for the lock here would deadlock.  If someone else has it
3595          * locked they are already in the process of dropping it anyway
3596          */
3597         if (!mutex_trylock(&head->mutex))
3598                 goto out;
3599
3600         /*
3601          * at this point we have a head with no other entries.  Go
3602          * ahead and process it.
3603          */
3604         head->node.in_tree = 0;
3605         rb_erase(&head->node.rb_node, &delayed_refs->root);
3606
3607         delayed_refs->num_entries--;
3608
3609         /*
3610          * we don't take a ref on the node because we're removing it from the
3611          * tree, so we just steal the ref the tree was holding.
3612          */
3613         delayed_refs->num_heads--;
3614         if (list_empty(&head->cluster))
3615                 delayed_refs->num_heads_ready--;
3616
3617         list_del_init(&head->cluster);
3618         spin_unlock(&delayed_refs->lock);
3619
3620         ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
3621                                   &head->node, head->extent_op,
3622                                   head->must_insert_reserved);
3623         BUG_ON(ret);
3624         btrfs_put_delayed_ref(&head->node);
3625         return 0;
3626 out:
3627         spin_unlock(&delayed_refs->lock);
3628         return 0;
3629 }
3630
3631 int btrfs_free_extent(struct btrfs_trans_handle *trans,
3632                       struct btrfs_root *root,
3633                       u64 bytenr, u64 num_bytes, u64 parent,
3634                       u64 root_objectid, u64 owner, u64 offset)
3635 {
3636         int ret;
3637
3638         /*
3639          * tree log blocks never actually go into the extent allocation
3640          * tree, just update pinning info and exit early.
3641          */
3642         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
3643                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
3644                 /* unlocks the pinned mutex */
3645                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
3646                 ret = 0;
3647         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
3648                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
3649                                         parent, root_objectid, (int)owner,
3650                                         BTRFS_DROP_DELAYED_REF, NULL);
3651                 BUG_ON(ret);
3652                 ret = check_ref_cleanup(trans, root, bytenr);
3653                 BUG_ON(ret);
3654         } else {
3655                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
3656                                         parent, root_objectid, owner,
3657                                         offset, BTRFS_DROP_DELAYED_REF, NULL);
3658                 BUG_ON(ret);
3659         }
3660         return ret;
3661 }
3662
3663 static u64 stripe_align(struct btrfs_root *root, u64 val)
3664 {
3665         u64 mask = ((u64)root->stripesize - 1);
3666         u64 ret = (val + mask) & ~mask;
3667         return ret;
3668 }
3669
3670 /*
3671  * when we wait for progress in the block group caching, its because
3672  * our allocation attempt failed at least once.  So, we must sleep
3673  * and let some progress happen before we try again.
3674  *
3675  * This function will sleep at least once waiting for new free space to
3676  * show up, and then it will check the block group free space numbers
3677  * for our min num_bytes.  Another option is to have it go ahead
3678  * and look in the rbtree for a free extent of a given size, but this
3679  * is a good start.
3680  */
3681 static noinline int
3682 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
3683                                 u64 num_bytes)
3684 {
3685         struct btrfs_caching_control *caching_ctl;
3686         DEFINE_WAIT(wait);
3687
3688         caching_ctl = get_caching_control(cache);
3689         if (!caching_ctl)
3690                 return 0;
3691
3692         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
3693                    (cache->free_space >= num_bytes));
3694
3695         put_caching_control(caching_ctl);
3696         return 0;
3697 }
3698
3699 static noinline int
3700 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
3701 {
3702         struct btrfs_caching_control *caching_ctl;
3703         DEFINE_WAIT(wait);
3704
3705         caching_ctl = get_caching_control(cache);
3706         if (!caching_ctl)
3707                 return 0;
3708
3709         wait_event(caching_ctl->wait, block_group_cache_done(cache));
3710
3711         put_caching_control(caching_ctl);
3712         return 0;
3713 }
3714
3715 enum btrfs_loop_type {
3716         LOOP_CACHED_ONLY = 0,
3717         LOOP_CACHING_NOWAIT = 1,
3718         LOOP_CACHING_WAIT = 2,
3719         LOOP_ALLOC_CHUNK = 3,
3720         LOOP_NO_EMPTY_SIZE = 4,
3721 };
3722
3723 /*
3724  * walks the btree of allocated extents and find a hole of a given size.
3725  * The key ins is changed to record the hole:
3726  * ins->objectid == block start
3727  * ins->flags = BTRFS_EXTENT_ITEM_KEY
3728  * ins->offset == number of blocks
3729  * Any available blocks before search_start are skipped.
3730  */
3731 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3732                                      struct btrfs_root *orig_root,
3733                                      u64 num_bytes, u64 empty_size,
3734                                      u64 search_start, u64 search_end,
3735                                      u64 hint_byte, struct btrfs_key *ins,
3736                                      u64 exclude_start, u64 exclude_nr,
3737                                      int data)
3738 {
3739         int ret = 0;
3740         struct btrfs_root *root = orig_root->fs_info->extent_root;
3741         struct btrfs_free_cluster *last_ptr = NULL;
3742         struct btrfs_block_group_cache *block_group = NULL;
3743         int empty_cluster = 2 * 1024 * 1024;
3744         int allowed_chunk_alloc = 0;
3745         struct btrfs_space_info *space_info;
3746         int last_ptr_loop = 0;
3747         int loop = 0;
3748         bool found_uncached_bg = false;
3749         bool failed_cluster_refill = false;
3750
3751         WARN_ON(num_bytes < root->sectorsize);
3752         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
3753         ins->objectid = 0;
3754         ins->offset = 0;
3755
3756         space_info = __find_space_info(root->fs_info, data);
3757
3758         if (orig_root->ref_cows || empty_size)
3759                 allowed_chunk_alloc = 1;
3760
3761         if (data & BTRFS_BLOCK_GROUP_METADATA) {
3762                 last_ptr = &root->fs_info->meta_alloc_cluster;
3763                 if (!btrfs_test_opt(root, SSD))
3764                         empty_cluster = 64 * 1024;
3765         }
3766
3767         if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
3768                 last_ptr = &root->fs_info->data_alloc_cluster;
3769         }
3770
3771         if (last_ptr) {
3772                 spin_lock(&last_ptr->lock);
3773                 if (last_ptr->block_group)
3774                         hint_byte = last_ptr->window_start;
3775                 spin_unlock(&last_ptr->lock);
3776         }
3777
3778         search_start = max(search_start, first_logical_byte(root, 0));
3779         search_start = max(search_start, hint_byte);
3780
3781         if (!last_ptr)
3782                 empty_cluster = 0;
3783
3784         if (search_start == hint_byte) {
3785                 block_group = btrfs_lookup_block_group(root->fs_info,
3786                                                        search_start);
3787                 /*
3788                  * we don't want to use the block group if it doesn't match our
3789                  * allocation bits, or if its not cached.
3790                  */
3791                 if (block_group && block_group_bits(block_group, data) &&
3792                     block_group_cache_done(block_group)) {
3793                         down_read(&space_info->groups_sem);
3794                         if (list_empty(&block_group->list) ||
3795                             block_group->ro) {
3796                                 /*
3797                                  * someone is removing this block group,
3798                                  * we can't jump into the have_block_group
3799                                  * target because our list pointers are not
3800                                  * valid
3801                                  */
3802                                 btrfs_put_block_group(block_group);
3803                                 up_read(&space_info->groups_sem);
3804                         } else
3805                                 goto have_block_group;
3806                 } else if (block_group) {
3807                         btrfs_put_block_group(block_group);
3808                 }
3809         }
3810
3811 search:
3812         down_read(&space_info->groups_sem);
3813         list_for_each_entry(block_group, &space_info->block_groups, list) {
3814                 u64 offset;
3815                 int cached;
3816
3817                 atomic_inc(&block_group->count);
3818                 search_start = block_group->key.objectid;
3819
3820 have_block_group:
3821                 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
3822                         /*
3823                          * we want to start caching kthreads, but not too many
3824                          * right off the bat so we don't overwhelm the system,
3825                          * so only start them if there are less than 2 and we're
3826                          * in the initial allocation phase.
3827                          */
3828                         if (loop > LOOP_CACHING_NOWAIT ||
3829                             atomic_read(&space_info->caching_threads) < 2) {
3830                                 ret = cache_block_group(block_group);
3831                                 BUG_ON(ret);
3832                         }
3833                 }
3834
3835                 cached = block_group_cache_done(block_group);
3836                 if (unlikely(!cached)) {
3837                         found_uncached_bg = true;
3838
3839                         /* if we only want cached bgs, loop */
3840                         if (loop == LOOP_CACHED_ONLY)
3841                                 goto loop;
3842                 }
3843
3844                 if (unlikely(block_group->ro))
3845                         goto loop;
3846
3847                 /*
3848                  * Ok we want to try and use the cluster allocator, so lets look
3849                  * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
3850                  * have tried the cluster allocator plenty of times at this
3851                  * point and not have found anything, so we are likely way too
3852                  * fragmented for the clustering stuff to find anything, so lets
3853                  * just skip it and let the allocator find whatever block it can
3854                  * find
3855                  */
3856                 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
3857                         /*
3858                          * the refill lock keeps out other
3859                          * people trying to start a new cluster
3860                          */
3861                         spin_lock(&last_ptr->refill_lock);
3862                         if (last_ptr->block_group &&
3863                             (last_ptr->block_group->ro ||
3864                             !block_group_bits(last_ptr->block_group, data))) {
3865                                 offset = 0;
3866                                 goto refill_cluster;
3867                         }
3868
3869                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
3870                                                  num_bytes, search_start);
3871                         if (offset) {
3872                                 /* we have a block, we're done */
3873                                 spin_unlock(&last_ptr->refill_lock);
3874                                 goto checks;
3875                         }
3876
3877                         spin_lock(&last_ptr->lock);
3878                         /*
3879                          * whoops, this cluster doesn't actually point to
3880                          * this block group.  Get a ref on the block
3881                          * group is does point to and try again
3882                          */
3883                         if (!last_ptr_loop && last_ptr->block_group &&
3884                             last_ptr->block_group != block_group) {
3885
3886                                 btrfs_put_block_group(block_group);
3887                                 block_group = last_ptr->block_group;
3888                                 atomic_inc(&block_group->count);
3889                                 spin_unlock(&last_ptr->lock);
3890                                 spin_unlock(&last_ptr->refill_lock);
3891
3892                                 last_ptr_loop = 1;
3893                                 search_start = block_group->key.objectid;
3894                                 /*
3895                                  * we know this block group is properly
3896                                  * in the list because
3897                                  * btrfs_remove_block_group, drops the
3898                                  * cluster before it removes the block
3899                                  * group from the list
3900                                  */
3901                                 goto have_block_group;
3902                         }
3903                         spin_unlock(&last_ptr->lock);
3904 refill_cluster:
3905                         /*
3906                          * this cluster didn't work out, free it and
3907                          * start over
3908                          */
3909                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
3910
3911                         last_ptr_loop = 0;
3912
3913                         /* allocate a cluster in this block group */
3914                         ret = btrfs_find_space_cluster(trans, root,
3915                                                block_group, last_ptr,
3916                                                offset, num_bytes,
3917                                                empty_cluster + empty_size);
3918                         if (ret == 0) {
3919                                 /*
3920                                  * now pull our allocation out of this
3921                                  * cluster
3922                                  */
3923                                 offset = btrfs_alloc_from_cluster(block_group,
3924                                                   last_ptr, num_bytes,
3925                                                   search_start);
3926                                 if (offset) {
3927                                         /* we found one, proceed */
3928                                         spin_unlock(&last_ptr->refill_lock);
3929                                         goto checks;
3930                                 }
3931                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
3932                                    && !failed_cluster_refill) {
3933                                 spin_unlock(&last_ptr->refill_lock);
3934
3935                                 failed_cluster_refill = true;
3936                                 wait_block_group_cache_progress(block_group,
3937                                        num_bytes + empty_cluster + empty_size);
3938                                 goto have_block_group;
3939                         }
3940
3941                         /*
3942                          * at this point we either didn't find a cluster
3943                          * or we weren't able to allocate a block from our
3944                          * cluster.  Free the cluster we've been trying
3945                          * to use, and go to the next block group
3946                          */
3947                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
3948                         spin_unlock(&last_ptr->refill_lock);
3949                         goto loop;
3950                 }
3951
3952                 offset = btrfs_find_space_for_alloc(block_group, search_start,
3953                                                     num_bytes, empty_size);
3954                 if (!offset && (cached || (!cached &&
3955                                            loop == LOOP_CACHING_NOWAIT))) {
3956                         goto loop;
3957                 } else if (!offset && (!cached &&
3958                                        loop > LOOP_CACHING_NOWAIT)) {
3959                         wait_block_group_cache_progress(block_group,
3960                                         num_bytes + empty_size);
3961                         goto have_block_group;
3962                 }
3963 checks:
3964                 search_start = stripe_align(root, offset);
3965                 /* move on to the next group */
3966                 if (search_start + num_bytes >= search_end) {
3967                         btrfs_add_free_space(block_group, offset, num_bytes);
3968                         goto loop;
3969                 }
3970
3971                 /* move on to the next group */
3972                 if (search_start + num_bytes >
3973                     block_group->key.objectid + block_group->key.offset) {
3974                         btrfs_add_free_space(block_group, offset, num_bytes);
3975                         goto loop;
3976                 }
3977
3978                 if (exclude_nr > 0 &&
3979                     (search_start + num_bytes > exclude_start &&
3980                      search_start < exclude_start + exclude_nr)) {
3981                         search_start = exclude_start + exclude_nr;
3982
3983                         btrfs_add_free_space(block_group, offset, num_bytes);
3984                         /*
3985                          * if search_start is still in this block group
3986                          * then we just re-search this block group
3987                          */
3988                         if (search_start >= block_group->key.objectid &&
3989                             search_start < (block_group->key.objectid +
3990                                             block_group->key.offset))
3991                                 goto have_block_group;
3992                         goto loop;
3993                 }
3994
3995                 ins->objectid = search_start;
3996                 ins->offset = num_bytes;
3997
3998                 if (offset < search_start)
3999                         btrfs_add_free_space(block_group, offset,
4000                                              search_start - offset);
4001                 BUG_ON(offset > search_start);
4002
4003                 update_reserved_extents(block_group, num_bytes, 1);
4004
4005                 /* we are all good, lets return */
4006                 break;
4007 loop:
4008                 failed_cluster_refill = false;
4009                 btrfs_put_block_group(block_group);
4010         }
4011         up_read(&space_info->groups_sem);
4012
4013         /* LOOP_CACHED_ONLY, only search fully cached block groups
4014          * LOOP_CACHING_NOWAIT, search partially cached block groups, but
4015          *                      dont wait foR them to finish caching
4016          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4017          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4018          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
4019          *                      again
4020          */
4021         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4022             (found_uncached_bg || empty_size || empty_cluster ||
4023              allowed_chunk_alloc)) {
4024                 if (found_uncached_bg) {
4025                         found_uncached_bg = false;
4026                         if (loop < LOOP_CACHING_WAIT) {
4027                                 loop++;
4028                                 goto search;
4029                         }
4030                 }
4031
4032                 if (loop == LOOP_ALLOC_CHUNK) {
4033                         empty_size = 0;
4034                         empty_cluster = 0;
4035                 }
4036
4037                 if (allowed_chunk_alloc) {
4038                         ret = do_chunk_alloc(trans, root, num_bytes +
4039                                              2 * 1024 * 1024, data, 1);
4040                         allowed_chunk_alloc = 0;
4041                 } else {
4042                         space_info->force_alloc = 1;
4043                 }
4044
4045                 if (loop < LOOP_NO_EMPTY_SIZE) {
4046                         loop++;
4047                         goto search;
4048                 }
4049                 ret = -ENOSPC;
4050         } else if (!ins->objectid) {
4051                 ret = -ENOSPC;
4052         }
4053
4054         /* we found what we needed */
4055         if (ins->objectid) {
4056                 if (!(data & BTRFS_BLOCK_GROUP_DATA))
4057                         trans->block_group = block_group->key.objectid;
4058
4059                 btrfs_put_block_group(block_group);
4060                 ret = 0;
4061         }
4062
4063         return ret;
4064 }
4065
4066 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
4067 {
4068         struct btrfs_block_group_cache *cache;
4069
4070         printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4071                (unsigned long long)(info->total_bytes - info->bytes_used -
4072                                     info->bytes_pinned - info->bytes_reserved),
4073                (info->full) ? "" : "not ");
4074         printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
4075                " may_use=%llu, used=%llu\n",
4076                (unsigned long long)info->total_bytes,
4077                (unsigned long long)info->bytes_pinned,
4078                (unsigned long long)info->bytes_delalloc,
4079                (unsigned long long)info->bytes_may_use,
4080                (unsigned long long)info->bytes_used);
4081
4082         down_read(&info->groups_sem);
4083         list_for_each_entry(cache, &info->block_groups, list) {
4084                 spin_lock(&cache->lock);
4085                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
4086                        "%llu pinned %llu reserved\n",
4087                        (unsigned long long)cache->key.objectid,
4088                        (unsigned long long)cache->key.offset,
4089                        (unsigned long long)btrfs_block_group_used(&cache->item),
4090                        (unsigned long long)cache->pinned,
4091                        (unsigned long long)cache->reserved);
4092                 btrfs_dump_free_space(cache, bytes);
4093                 spin_unlock(&cache->lock);
4094         }
4095         up_read(&info->groups_sem);
4096 }
4097
4098 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4099                          struct btrfs_root *root,
4100                          u64 num_bytes, u64 min_alloc_size,
4101                          u64 empty_size, u64 hint_byte,
4102                          u64 search_end, struct btrfs_key *ins,
4103                          u64 data)
4104 {
4105         int ret;
4106         u64 search_start = 0;
4107         struct btrfs_fs_info *info = root->fs_info;
4108
4109         data = btrfs_get_alloc_profile(root, data);
4110 again:
4111         /*
4112          * the only place that sets empty_size is btrfs_realloc_node, which
4113          * is not called recursively on allocations
4114          */
4115         if (empty_size || root->ref_cows) {
4116                 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
4117                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4118                                      2 * 1024 * 1024,
4119                                      BTRFS_BLOCK_GROUP_METADATA |
4120                                      (info->metadata_alloc_profile &
4121                                       info->avail_metadata_alloc_bits), 0);
4122                 }
4123                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4124                                      num_bytes + 2 * 1024 * 1024, data, 0);
4125         }
4126
4127         WARN_ON(num_bytes < root->sectorsize);
4128         ret = find_free_extent(trans, root, num_bytes, empty_size,
4129                                search_start, search_end, hint_byte, ins,
4130                                trans->alloc_exclude_start,
4131                                trans->alloc_exclude_nr, data);
4132
4133         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
4134                 num_bytes = num_bytes >> 1;
4135                 num_bytes = num_bytes & ~(root->sectorsize - 1);
4136                 num_bytes = max(num_bytes, min_alloc_size);
4137                 do_chunk_alloc(trans, root->fs_info->extent_root,
4138                                num_bytes, data, 1);
4139                 goto again;
4140         }
4141         if (ret == -ENOSPC) {
4142                 struct btrfs_space_info *sinfo;
4143
4144                 sinfo = __find_space_info(root->fs_info, data);
4145                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4146                        "wanted %llu\n", (unsigned long long)data,
4147                        (unsigned long long)num_bytes);
4148                 dump_space_info(sinfo, num_bytes);
4149         }
4150
4151         return ret;
4152 }
4153
4154 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
4155 {
4156         struct btrfs_block_group_cache *cache;
4157         int ret = 0;
4158
4159         cache = btrfs_lookup_block_group(root->fs_info, start);
4160         if (!cache) {
4161                 printk(KERN_ERR "Unable to find block group for %llu\n",
4162                        (unsigned long long)start);
4163                 return -ENOSPC;
4164         }
4165
4166         ret = btrfs_discard_extent(root, start, len);
4167
4168         btrfs_add_free_space(cache, start, len);
4169         update_reserved_extents(cache, len, 0);
4170         btrfs_put_block_group(cache);
4171
4172         return ret;
4173 }
4174
4175 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4176                                       struct btrfs_root *root,
4177                                       u64 parent, u64 root_objectid,
4178                                       u64 flags, u64 owner, u64 offset,
4179                                       struct btrfs_key *ins, int ref_mod)
4180 {
4181         int ret;
4182         struct btrfs_fs_info *fs_info = root->fs_info;
4183         struct btrfs_extent_item *extent_item;
4184         struct btrfs_extent_inline_ref *iref;
4185         struct btrfs_path *path;
4186         struct extent_buffer *leaf;
4187         int type;
4188         u32 size;
4189
4190         if (parent > 0)
4191                 type = BTRFS_SHARED_DATA_REF_KEY;
4192         else
4193                 type = BTRFS_EXTENT_DATA_REF_KEY;
4194
4195         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
4196
4197         path = btrfs_alloc_path();
4198         BUG_ON(!path);
4199
4200         path->leave_spinning = 1;
4201         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4202                                       ins, size);
4203         BUG_ON(ret);
4204
4205         leaf = path->nodes[0];
4206         extent_item = btrfs_item_ptr(leaf, path->slots[0],
4207                                      struct btrfs_extent_item);
4208         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4209         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4210         btrfs_set_extent_flags(leaf, extent_item,
4211                                flags | BTRFS_EXTENT_FLAG_DATA);
4212
4213         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4214         btrfs_set_extent_inline_ref_type(leaf, iref, type);
4215         if (parent > 0) {
4216                 struct btrfs_shared_data_ref *ref;
4217                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
4218                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4219                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4220         } else {
4221                 struct btrfs_extent_data_ref *ref;
4222                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4223                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4224                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4225                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4226                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4227         }
4228
4229         btrfs_mark_buffer_dirty(path->nodes[0]);
4230         btrfs_free_path(path);
4231
4232         ret = update_block_group(trans, root, ins->objectid, ins->offset,
4233                                  1, 0);
4234         if (ret) {
4235                 printk(KERN_ERR "btrfs update block group failed for %llu "
4236                        "%llu\n", (unsigned long long)ins->objectid,
4237                        (unsigned long long)ins->offset);
4238                 BUG();
4239         }
4240         return ret;
4241 }
4242
4243 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4244                                      struct btrfs_root *root,
4245                                      u64 parent, u64 root_objectid,
4246                                      u64 flags, struct btrfs_disk_key *key,
4247                                      int level, struct btrfs_key *ins)
4248 {
4249         int ret;
4250         struct btrfs_fs_info *fs_info = root->fs_info;
4251         struct btrfs_extent_item *extent_item;
4252         struct btrfs_tree_block_info *block_info;
4253         struct btrfs_extent_inline_ref *iref;
4254         struct btrfs_path *path;
4255         struct extent_buffer *leaf;
4256         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
4257
4258         path = btrfs_alloc_path();
4259         BUG_ON(!path);
4260
4261         path->leave_spinning = 1;
4262         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4263                                       ins, size);
4264         BUG_ON(ret);
4265
4266         leaf = path->nodes[0];
4267         extent_item = btrfs_item_ptr(leaf, path->slots[0],
4268                                      struct btrfs_extent_item);
4269         btrfs_set_extent_refs(leaf, extent_item, 1);
4270         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4271         btrfs_set_extent_flags(leaf, extent_item,
4272                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4273         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4274
4275         btrfs_set_tree_block_key(leaf, block_info, key);
4276         btrfs_set_tree_block_level(leaf, block_info, level);
4277
4278         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4279         if (parent > 0) {
4280                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4281                 btrfs_set_extent_inline_ref_type(leaf, iref,
4282                                                  BTRFS_SHARED_BLOCK_REF_KEY);
4283                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4284         } else {
4285                 btrfs_set_extent_inline_ref_type(leaf, iref,
4286                                                  BTRFS_TREE_BLOCK_REF_KEY);
4287                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
4288         }
4289
4290         btrfs_mark_buffer_dirty(leaf);
4291         btrfs_free_path(path);
4292
4293         ret = update_block_group(trans, root, ins->objectid, ins->offset,
4294                                  1, 0);
4295         if (ret) {
4296                 printk(KERN_ERR "btrfs update block group failed for %llu "
4297                        "%llu\n", (unsigned long long)ins->objectid,
4298                        (unsigned long long)ins->offset);
4299                 BUG();
4300         }
4301         return ret;
4302 }
4303
4304 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4305                                      struct btrfs_root *root,
4306                                      u64 root_objectid, u64 owner,
4307                                      u64 offset, struct btrfs_key *ins)
4308 {
4309         int ret;
4310
4311         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
4312
4313         ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
4314                                          0, root_objectid, owner, offset,
4315                                          BTRFS_ADD_DELAYED_EXTENT, NULL);
4316         return ret;
4317 }
4318
4319 /*
4320  * this is used by the tree logging recovery code.  It records that
4321  * an extent has been allocated and makes sure to clear the free
4322  * space cache bits as well
4323  */
4324 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4325                                    struct btrfs_root *root,
4326                                    u64 root_objectid, u64 owner, u64 offset,
4327                                    struct btrfs_key *ins)
4328 {
4329         int ret;
4330         struct btrfs_block_group_cache *block_group;
4331         struct btrfs_caching_control *caching_ctl;
4332         u64 start = ins->objectid;
4333         u64 num_bytes = ins->offset;
4334
4335         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
4336         cache_block_group(block_group);
4337         caching_ctl = get_caching_control(block_group);
4338
4339         if (!caching_ctl) {
4340                 BUG_ON(!block_group_cache_done(block_group));
4341                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
4342                 BUG_ON(ret);
4343         } else {
4344                 mutex_lock(&caching_ctl->mutex);
4345
4346                 if (start >= caching_ctl->progress) {
4347                         ret = add_excluded_extent(root, start, num_bytes);
4348                         BUG_ON(ret);
4349                 } else if (start + num_bytes <= caching_ctl->progress) {
4350                         ret = btrfs_remove_free_space(block_group,
4351                                                       start, num_bytes);
4352                         BUG_ON(ret);
4353                 } else {
4354                         num_bytes = caching_ctl->progress - start;
4355                         ret = btrfs_remove_free_space(block_group,
4356                                                       start, num_bytes);
4357                         BUG_ON(ret);
4358
4359                         start = caching_ctl->progress;
4360                         num_bytes = ins->objectid + ins->offset -
4361                                     caching_ctl->progress;
4362                         ret = add_excluded_extent(root, start, num_bytes);
4363                         BUG_ON(ret);
4364                 }
4365
4366                 mutex_unlock(&caching_ctl->mutex);
4367                 put_caching_control(caching_ctl);
4368         }
4369
4370         update_reserved_extents(block_group, ins->offset, 1);
4371         btrfs_put_block_group(block_group);
4372         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
4373                                          0, owner, offset, ins, 1);
4374         return ret;
4375 }
4376
4377 /*
4378  * finds a free extent and does all the dirty work required for allocation
4379  * returns the key for the extent through ins, and a tree buffer for
4380  * the first block of the extent through buf.
4381  *
4382  * returns 0 if everything worked, non-zero otherwise.
4383  */
4384 static int alloc_tree_block(struct btrfs_trans_handle *trans,
4385                             struct btrfs_root *root,
4386                             u64 num_bytes, u64 parent, u64 root_objectid,
4387                             struct btrfs_disk_key *key, int level,
4388                             u64 empty_size, u64 hint_byte, u64 search_end,
4389                             struct btrfs_key *ins)
4390 {
4391         int ret;
4392         u64 flags = 0;
4393
4394         ret = btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4395                                    empty_size, hint_byte, search_end,
4396                                    ins, 0);
4397         if (ret)
4398                 return ret;
4399
4400         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4401                 if (parent == 0)
4402                         parent = ins->objectid;
4403                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4404         } else
4405                 BUG_ON(parent > 0);
4406
4407         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4408                 struct btrfs_delayed_extent_op *extent_op;
4409                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
4410                 BUG_ON(!extent_op);
4411                 if (key)
4412                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
4413                 else
4414                         memset(&extent_op->key, 0, sizeof(extent_op->key));
4415                 extent_op->flags_to_set = flags;
4416                 extent_op->update_key = 1;
4417                 extent_op->update_flags = 1;
4418                 extent_op->is_data = 0;
4419
4420                 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4421                                         ins->offset, parent, root_objectid,
4422                                         level, BTRFS_ADD_DELAYED_EXTENT,
4423                                         extent_op);
4424                 BUG_ON(ret);
4425         }
4426         return ret;
4427 }
4428
4429 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4430                                             struct btrfs_root *root,
4431                                             u64 bytenr, u32 blocksize,
4432                                             int level)
4433 {
4434         struct extent_buffer *buf;
4435
4436         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
4437         if (!buf)
4438                 return ERR_PTR(-ENOMEM);
4439         btrfs_set_header_generation(buf, trans->transid);
4440         btrfs_set_buffer_lockdep_class(buf, level);
4441         btrfs_tree_lock(buf);
4442         clean_tree_block(trans, root, buf);
4443
4444         btrfs_set_lock_blocking(buf);
4445         btrfs_set_buffer_uptodate(buf);
4446
4447         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4448                 set_extent_dirty(&root->dirty_log_pages, buf->start,
4449                          buf->start + buf->len - 1, GFP_NOFS);
4450         } else {
4451                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4452                          buf->start + buf->len - 1, GFP_NOFS);
4453         }
4454         trans->blocks_used++;
4455         /* this returns a buffer locked for blocking */
4456         return buf;
4457 }
4458
4459 /*
4460  * helper function to allocate a block for a given tree
4461  * returns the tree buffer or NULL.
4462  */
4463 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
4464                                         struct btrfs_root *root, u32 blocksize,
4465                                         u64 parent, u64 root_objectid,
4466                                         struct btrfs_disk_key *key, int level,
4467                                         u64 hint, u64 empty_size)
4468 {
4469         struct btrfs_key ins;
4470         int ret;
4471         struct extent_buffer *buf;
4472
4473         ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid,
4474                                key, level, empty_size, hint, (u64)-1, &ins);
4475         if (ret) {
4476                 BUG_ON(ret > 0);
4477                 return ERR_PTR(ret);
4478         }
4479
4480         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
4481                                     blocksize, level);
4482         return buf;
4483 }
4484
4485 struct walk_control {
4486         u64 refs[BTRFS_MAX_LEVEL];
4487         u64 flags[BTRFS_MAX_LEVEL];
4488         struct btrfs_key update_progress;
4489         int stage;
4490         int level;
4491         int shared_level;
4492         int update_ref;
4493         int keep_locks;
4494         int reada_slot;
4495         int reada_count;
4496 };
4497
4498 #define DROP_REFERENCE  1
4499 #define UPDATE_BACKREF  2
4500
4501 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4502                                      struct btrfs_root *root,
4503                                      struct walk_control *wc,
4504                                      struct btrfs_path *path)
4505 {
4506         u64 bytenr;
4507         u64 generation;
4508         u64 refs;
4509         u64 last = 0;
4510         u32 nritems;
4511         u32 blocksize;
4512         struct btrfs_key key;
4513         struct extent_buffer *eb;
4514         int ret;
4515         int slot;
4516         int nread = 0;
4517
4518         if (path->slots[wc->level] < wc->reada_slot) {
4519                 wc->reada_count = wc->reada_count * 2 / 3;
4520                 wc->reada_count = max(wc->reada_count, 2);
4521         } else {
4522                 wc->reada_count = wc->reada_count * 3 / 2;
4523                 wc->reada_count = min_t(int, wc->reada_count,
4524                                         BTRFS_NODEPTRS_PER_BLOCK(root));
4525         }
4526
4527         eb = path->nodes[wc->level];
4528         nritems = btrfs_header_nritems(eb);
4529         blocksize = btrfs_level_size(root, wc->level - 1);
4530
4531         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
4532                 if (nread >= wc->reada_count)
4533                         break;
4534
4535                 cond_resched();
4536                 bytenr = btrfs_node_blockptr(eb, slot);
4537                 generation = btrfs_node_ptr_generation(eb, slot);
4538
4539                 if (slot == path->slots[wc->level])
4540                         goto reada;
4541
4542                 if (wc->stage == UPDATE_BACKREF &&
4543                     generation <= root->root_key.offset)
4544                         continue;
4545
4546                 if (wc->stage == DROP_REFERENCE) {
4547                         ret = btrfs_lookup_extent_info(trans, root,
4548                                                 bytenr, blocksize,
4549                                                 &refs, NULL);
4550                         BUG_ON(ret);
4551                         BUG_ON(refs == 0);
4552                         if (refs == 1)
4553                                 goto reada;
4554
4555                         if (!wc->update_ref ||
4556                             generation <= root->root_key.offset)
4557                                 continue;
4558                         btrfs_node_key_to_cpu(eb, &key, slot);
4559                         ret = btrfs_comp_cpu_keys(&key,
4560                                                   &wc->update_progress);
4561                         if (ret < 0)
4562                                 continue;
4563                 }
4564 reada:
4565                 ret = readahead_tree_block(root, bytenr, blocksize,
4566                                            generation);
4567                 if (ret)
4568                         break;
4569                 last = bytenr + blocksize;
4570                 nread++;
4571         }
4572         wc->reada_slot = slot;
4573 }
4574
4575 /*
4576  * hepler to process tree block while walking down the tree.
4577  *
4578  * when wc->stage == UPDATE_BACKREF, this function updates
4579  * back refs for pointers in the block.
4580  *
4581  * NOTE: return value 1 means we should stop walking down.
4582  */
4583 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4584                                    struct btrfs_root *root,
4585                                    struct btrfs_path *path,
4586                                    struct walk_control *wc)
4587 {
4588         int level = wc->level;
4589         struct extent_buffer *eb = path->nodes[level];
4590         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
4591         int ret;
4592
4593         if (wc->stage == UPDATE_BACKREF &&
4594             btrfs_header_owner(eb) != root->root_key.objectid)
4595                 return 1;
4596
4597         /*
4598          * when reference count of tree block is 1, it won't increase
4599          * again. once full backref flag is set, we never clear it.
4600          */
4601         if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
4602             (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
4603                 BUG_ON(!path->locks[level]);
4604                 ret = btrfs_lookup_extent_info(trans, root,
4605                                                eb->start, eb->len,
4606                                                &wc->refs[level],
4607                                                &wc->flags[level]);
4608                 BUG_ON(ret);
4609                 BUG_ON(wc->refs[level] == 0);
4610         }
4611
4612         if (wc->stage == DROP_REFERENCE) {
4613                 if (wc->refs[level] > 1)
4614                         return 1;
4615
4616                 if (path->locks[level] && !wc->keep_locks) {
4617                         btrfs_tree_unlock(eb);
4618                         path->locks[level] = 0;
4619                 }
4620                 return 0;
4621         }
4622
4623         /* wc->stage == UPDATE_BACKREF */
4624         if (!(wc->flags[level] & flag)) {
4625                 BUG_ON(!path->locks[level]);
4626                 ret = btrfs_inc_ref(trans, root, eb, 1);
4627                 BUG_ON(ret);
4628                 ret = btrfs_dec_ref(trans, root, eb, 0);
4629                 BUG_ON(ret);
4630                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
4631                                                   eb->len, flag, 0);
4632                 BUG_ON(ret);
4633                 wc->flags[level] |= flag;
4634         }
4635
4636         /*
4637          * the block is shared by multiple trees, so it's not good to
4638          * keep the tree lock
4639          */
4640         if (path->locks[level] && level > 0) {
4641                 btrfs_tree_unlock(eb);
4642                 path->locks[level] = 0;
4643         }
4644         return 0;
4645 }
4646
4647 /*
4648  * hepler to process tree block pointer.
4649  *
4650  * when wc->stage == DROP_REFERENCE, this function checks
4651  * reference count of the block pointed to. if the block
4652  * is shared and we need update back refs for the subtree
4653  * rooted at the block, this function changes wc->stage to
4654  * UPDATE_BACKREF. if the block is shared and there is no
4655  * need to update back, this function drops the reference
4656  * to the block.
4657  *
4658  * NOTE: return value 1 means we should stop walking down.
4659  */
4660 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4661                                  struct btrfs_root *root,
4662                                  struct btrfs_path *path,
4663                                  struct walk_control *wc)
4664 {
4665         u64 bytenr;
4666         u64 generation;
4667         u64 parent;
4668         u32 blocksize;
4669         struct btrfs_key key;
4670         struct extent_buffer *next;
4671         int level = wc->level;
4672         int reada = 0;
4673         int ret = 0;
4674
4675         generation = btrfs_node_ptr_generation(path->nodes[level],
4676                                                path->slots[level]);
4677         /*
4678          * if the lower level block was created before the snapshot
4679          * was created, we know there is no need to update back refs
4680          * for the subtree
4681          */
4682         if (wc->stage == UPDATE_BACKREF &&
4683             generation <= root->root_key.offset)
4684                 return 1;
4685
4686         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
4687         blocksize = btrfs_level_size(root, level - 1);
4688
4689         next = btrfs_find_tree_block(root, bytenr, blocksize);
4690         if (!next) {
4691                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
4692                 reada = 1;
4693         }
4694         btrfs_tree_lock(next);
4695         btrfs_set_lock_blocking(next);
4696
4697         if (wc->stage == DROP_REFERENCE) {
4698                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
4699                                                &wc->refs[level - 1],
4700                                                &wc->flags[level - 1]);
4701                 BUG_ON(ret);
4702                 BUG_ON(wc->refs[level - 1] == 0);
4703
4704                 if (wc->refs[level - 1] > 1) {
4705                         if (!wc->update_ref ||
4706                             generation <= root->root_key.offset)
4707                                 goto skip;
4708
4709                         btrfs_node_key_to_cpu(path->nodes[level], &key,
4710                                               path->slots[level]);
4711                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
4712                         if (ret < 0)
4713                                 goto skip;
4714
4715                         wc->stage = UPDATE_BACKREF;
4716                         wc->shared_level = level - 1;
4717                 }
4718         }
4719
4720         if (!btrfs_buffer_uptodate(next, generation)) {
4721                 btrfs_tree_unlock(next);
4722                 free_extent_buffer(next);
4723                 next = NULL;
4724         }
4725
4726         if (!next) {
4727                 if (reada && level == 1)
4728                         reada_walk_down(trans, root, wc, path);
4729                 next = read_tree_block(root, bytenr, blocksize, generation);
4730                 btrfs_tree_lock(next);
4731                 btrfs_set_lock_blocking(next);
4732         }
4733
4734         level--;
4735         BUG_ON(level != btrfs_header_level(next));
4736         path->nodes[level] = next;
4737         path->slots[level] = 0;
4738         path->locks[level] = 1;
4739         wc->level = level;
4740         if (wc->level == 1)
4741                 wc->reada_slot = 0;
4742         return 0;
4743 skip:
4744         wc->refs[level - 1] = 0;
4745         wc->flags[level - 1] = 0;
4746
4747         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
4748                 parent = path->nodes[level]->start;
4749         } else {
4750                 BUG_ON(root->root_key.objectid !=
4751                        btrfs_header_owner(path->nodes[level]));
4752                 parent = 0;
4753         }
4754
4755         ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
4756                                 root->root_key.objectid, level - 1, 0);
4757         BUG_ON(ret);
4758
4759         btrfs_tree_unlock(next);
4760         free_extent_buffer(next);
4761         return 1;
4762 }
4763
4764 /*
4765  * hepler to process tree block while walking up the tree.
4766  *
4767  * when wc->stage == DROP_REFERENCE, this function drops
4768  * reference count on the block.
4769  *
4770  * when wc->stage == UPDATE_BACKREF, this function changes
4771  * wc->stage back to DROP_REFERENCE if we changed wc->stage
4772  * to UPDATE_BACKREF previously while processing the block.
4773  *
4774  * NOTE: return value 1 means we should stop walking up.
4775  */
4776 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
4777                                  struct btrfs_root *root,
4778                                  struct btrfs_path *path,
4779                                  struct walk_control *wc)
4780 {
4781         int ret = 0;
4782         int level = wc->level;
4783         struct extent_buffer *eb = path->nodes[level];
4784         u64 parent = 0;
4785
4786         if (wc->stage == UPDATE_BACKREF) {
4787                 BUG_ON(wc->shared_level < level);
4788                 if (level < wc->shared_level)
4789                         goto out;
4790
4791                 ret = find_next_key(path, level + 1, &wc->update_progress);
4792                 if (ret > 0)
4793                         wc->update_ref = 0;
4794
4795                 wc->stage = DROP_REFERENCE;
4796                 wc->shared_level = -1;
4797                 path->slots[level] = 0;
4798
4799                 /*
4800                  * check reference count again if the block isn't locked.
4801                  * we should start walking down the tree again if reference
4802                  * count is one.
4803                  */
4804                 if (!path->locks[level]) {
4805                         BUG_ON(level == 0);
4806                         btrfs_tree_lock(eb);
4807                         btrfs_set_lock_blocking(eb);
4808                         path->locks[level] = 1;
4809
4810                         ret = btrfs_lookup_extent_info(trans, root,
4811                                                        eb->start, eb->len,
4812                                                        &wc->refs[level],
4813                                                        &wc->flags[level]);
4814                         BUG_ON(ret);
4815                         BUG_ON(wc->refs[level] == 0);
4816                         if (wc->refs[level] == 1) {
4817                                 btrfs_tree_unlock(eb);
4818                                 path->locks[level] = 0;
4819                                 return 1;
4820                         }
4821                 }
4822         }
4823
4824         /* wc->stage == DROP_REFERENCE */
4825         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
4826
4827         if (wc->refs[level] == 1) {
4828                 if (level == 0) {
4829                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4830                                 ret = btrfs_dec_ref(trans, root, eb, 1);
4831                         else
4832                                 ret = btrfs_dec_ref(trans, root, eb, 0);
4833                         BUG_ON(ret);
4834                 }
4835                 /* make block locked assertion in clean_tree_block happy */
4836                 if (!path->locks[level] &&
4837                     btrfs_header_generation(eb) == trans->transid) {
4838                         btrfs_tree_lock(eb);
4839                         btrfs_set_lock_blocking(eb);
4840                         path->locks[level] = 1;
4841                 }
4842                 clean_tree_block(trans, root, eb);
4843         }
4844
4845         if (eb == root->node) {
4846                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4847                         parent = eb->start;
4848                 else
4849                         BUG_ON(root->root_key.objectid !=
4850                                btrfs_header_owner(eb));
4851         } else {
4852                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4853                         parent = path->nodes[level + 1]->start;
4854                 else
4855                         BUG_ON(root->root_key.objectid !=
4856                                btrfs_header_owner(path->nodes[level + 1]));
4857         }
4858
4859         ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
4860                                 root->root_key.objectid, level, 0);
4861         BUG_ON(ret);
4862 out:
4863         wc->refs[level] = 0;
4864         wc->flags[level] = 0;
4865         return ret;
4866 }
4867
4868 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4869                                    struct btrfs_root *root,
4870                                    struct btrfs_path *path,
4871                                    struct walk_control *wc)
4872 {
4873         int level = wc->level;
4874         int ret;
4875
4876         while (level >= 0) {
4877                 if (path->slots[level] >=
4878                     btrfs_header_nritems(path->nodes[level]))
4879                         break;
4880
4881                 ret = walk_down_proc(trans, root, path, wc);
4882                 if (ret > 0)
4883                         break;
4884
4885                 if (level == 0)
4886                         break;
4887
4888                 ret = do_walk_down(trans, root, path, wc);
4889                 if (ret > 0) {
4890                         path->slots[level]++;
4891                         continue;
4892                 }
4893                 level = wc->level;
4894         }
4895         return 0;
4896 }
4897
4898 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
4899                                  struct btrfs_root *root,
4900                                  struct btrfs_path *path,
4901                                  struct walk_control *wc, int max_level)
4902 {
4903         int level = wc->level;
4904         int ret;
4905
4906         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
4907         while (level < max_level && path->nodes[level]) {
4908                 wc->level = level;
4909                 if (path->slots[level] + 1 <
4910                     btrfs_header_nritems(path->nodes[level])) {
4911                         path->slots[level]++;
4912                         return 0;
4913                 } else {
4914                         ret = walk_up_proc(trans, root, path, wc);
4915                         if (ret > 0)
4916                                 return 0;
4917
4918                         if (path->locks[level]) {
4919                                 btrfs_tree_unlock(path->nodes[level]);
4920                                 path->locks[level] = 0;
4921                         }
4922                         free_extent_buffer(path->nodes[level]);
4923                         path->nodes[level] = NULL;
4924                         level++;
4925                 }
4926         }
4927         return 1;
4928 }
4929
4930 /*
4931  * drop a subvolume tree.
4932  *
4933  * this function traverses the tree freeing any blocks that only
4934  * referenced by the tree.
4935  *
4936  * when a shared tree block is found. this function decreases its
4937  * reference count by one. if update_ref is true, this function
4938  * also make sure backrefs for the shared block and all lower level
4939  * blocks are properly updated.
4940  */
4941 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
4942 {
4943         struct btrfs_path *path;
4944         struct btrfs_trans_handle *trans;
4945         struct btrfs_root *tree_root = root->fs_info->tree_root;
4946         struct btrfs_root_item *root_item = &root->root_item;
4947         struct walk_control *wc;
4948         struct btrfs_key key;
4949         int err = 0;
4950         int ret;
4951         int level;
4952
4953         path = btrfs_alloc_path();
4954         BUG_ON(!path);
4955
4956         wc = kzalloc(sizeof(*wc), GFP_NOFS);
4957         BUG_ON(!wc);
4958
4959         trans = btrfs_start_transaction(tree_root, 1);
4960
4961         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
4962                 level = btrfs_header_level(root->node);
4963                 path->nodes[level] = btrfs_lock_root_node(root);
4964                 btrfs_set_lock_blocking(path->nodes[level]);
4965                 path->slots[level] = 0;
4966                 path->locks[level] = 1;
4967                 memset(&wc->update_progress, 0,
4968                        sizeof(wc->update_progress));
4969         } else {
4970                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
4971                 memcpy(&wc->update_progress, &key,
4972                        sizeof(wc->update_progress));
4973
4974                 level = root_item->drop_level;
4975                 BUG_ON(level == 0);
4976                 path->lowest_level = level;
4977                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4978                 path->lowest_level = 0;
4979                 if (ret < 0) {
4980                         err = ret;
4981                         goto out;
4982                 }
4983                 WARN_ON(ret > 0);
4984
4985                 /*
4986                  * unlock our path, this is safe because only this
4987                  * function is allowed to delete this snapshot
4988                  */
4989                 btrfs_unlock_up_safe(path, 0);
4990
4991                 level = btrfs_header_level(root->node);
4992                 while (1) {
4993                         btrfs_tree_lock(path->nodes[level]);
4994                         btrfs_set_lock_blocking(path->nodes[level]);
4995
4996                         ret = btrfs_lookup_extent_info(trans, root,
4997                                                 path->nodes[level]->start,
4998                                                 path->nodes[level]->len,
4999                                                 &wc->refs[level],
5000                                                 &wc->flags[level]);
5001                         BUG_ON(ret);
5002                         BUG_ON(wc->refs[level] == 0);
5003
5004                         if (level == root_item->drop_level)
5005                                 break;
5006
5007                         btrfs_tree_unlock(path->nodes[level]);
5008                         WARN_ON(wc->refs[level] != 1);
5009                         level--;
5010                 }
5011         }
5012
5013         wc->level = level;
5014         wc->shared_level = -1;
5015         wc->stage = DROP_REFERENCE;
5016         wc->update_ref = update_ref;
5017         wc->keep_locks = 0;
5018         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
5019
5020         while (1) {
5021                 ret = walk_down_tree(trans, root, path, wc);
5022                 if (ret < 0) {
5023                         err = ret;
5024                         break;
5025                 }
5026
5027                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5028                 if (ret < 0) {
5029                         err = ret;
5030                         break;
5031                 }
5032
5033                 if (ret > 0) {
5034                         BUG_ON(wc->stage != DROP_REFERENCE);
5035                         break;
5036                 }
5037
5038                 if (wc->stage == DROP_REFERENCE) {
5039                         level = wc->level;
5040                         btrfs_node_key(path->nodes[level],
5041                                        &root_item->drop_progress,
5042                                        path->slots[level]);
5043                         root_item->drop_level = level;
5044                 }
5045
5046                 BUG_ON(wc->level == 0);
5047                 if (trans->transaction->in_commit ||
5048                     trans->transaction->delayed_refs.flushing) {
5049                         ret = btrfs_update_root(trans, tree_root,
5050                                                 &root->root_key,
5051                                                 root_item);
5052                         BUG_ON(ret);
5053
5054                         btrfs_end_transaction(trans, tree_root);
5055                         trans = btrfs_start_transaction(tree_root, 1);
5056                 } else {
5057                         unsigned long update;
5058                         update = trans->delayed_ref_updates;
5059                         trans->delayed_ref_updates = 0;
5060                         if (update)
5061                                 btrfs_run_delayed_refs(trans, tree_root,
5062                                                        update);
5063                 }
5064         }
5065         btrfs_release_path(root, path);
5066         BUG_ON(err);
5067
5068         ret = btrfs_del_root(trans, tree_root, &root->root_key);
5069         BUG_ON(ret);
5070
5071         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
5072                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
5073                                            NULL, NULL);
5074                 BUG_ON(ret < 0);
5075                 if (ret > 0) {
5076                         ret = btrfs_del_orphan_item(trans, tree_root,
5077                                                     root->root_key.objectid);
5078                         BUG_ON(ret);
5079                 }
5080         }
5081
5082         if (root->in_radix) {
5083                 btrfs_free_fs_root(tree_root->fs_info, root);
5084         } else {
5085                 free_extent_buffer(root->node);
5086                 free_extent_buffer(root->commit_root);
5087                 kfree(root);
5088         }
5089 out:
5090         btrfs_end_transaction(trans, tree_root);
5091         kfree(wc);
5092         btrfs_free_path(path);
5093         return err;
5094 }
5095
5096 /*
5097  * drop subtree rooted at tree block 'node'.
5098  *
5099  * NOTE: this function will unlock and release tree block 'node'
5100  */
5101 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5102                         struct btrfs_root *root,
5103                         struct extent_buffer *node,
5104                         struct extent_buffer *parent)
5105 {
5106         struct btrfs_path *path;
5107         struct walk_control *wc;
5108         int level;
5109         int parent_level;
5110         int ret = 0;
5111         int wret;
5112
5113         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5114
5115         path = btrfs_alloc_path();
5116         BUG_ON(!path);
5117
5118         wc = kzalloc(sizeof(*wc), GFP_NOFS);
5119         BUG_ON(!wc);
5120
5121         btrfs_assert_tree_locked(parent);
5122         parent_level = btrfs_header_level(parent);
5123         extent_buffer_get(parent);
5124         path->nodes[parent_level] = parent;
5125         path->slots[parent_level] = btrfs_header_nritems(parent);
5126
5127         btrfs_assert_tree_locked(node);
5128         level = btrfs_header_level(node);
5129         path->nodes[level] = node;
5130         path->slots[level] = 0;
5131         path->locks[level] = 1;
5132
5133         wc->refs[parent_level] = 1;
5134         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5135         wc->level = level;
5136         wc->shared_level = -1;
5137         wc->stage = DROP_REFERENCE;
5138         wc->update_ref = 0;
5139         wc->keep_locks = 1;
5140         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
5141
5142         while (1) {
5143                 wret = walk_down_tree(trans, root, path, wc);
5144                 if (wret < 0) {
5145                         ret = wret;
5146                         break;
5147                 }
5148
5149                 wret = walk_up_tree(trans, root, path, wc, parent_level);
5150                 if (wret < 0)
5151                         ret = wret;
5152                 if (wret != 0)
5153                         break;
5154         }
5155
5156         kfree(wc);
5157         btrfs_free_path(path);
5158         return ret;
5159 }
5160
5161 #if 0
5162 static unsigned long calc_ra(unsigned long start, unsigned long last,
5163                              unsigned long nr)
5164 {
5165         return min(last, start + nr - 1);
5166 }
5167
5168 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
5169                                          u64 len)
5170 {
5171         u64 page_start;
5172         u64 page_end;
5173         unsigned long first_index;
5174         unsigned long last_index;
5175         unsigned long i;
5176         struct page *page;
5177         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5178         struct file_ra_state *ra;
5179         struct btrfs_ordered_extent *ordered;
5180         unsigned int total_read = 0;
5181         unsigned int total_dirty = 0;
5182         int ret = 0;
5183
5184         ra = kzalloc(sizeof(*ra), GFP_NOFS);
5185
5186         mutex_lock(&inode->i_mutex);
5187         first_index = start >> PAGE_CACHE_SHIFT;
5188         last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
5189
5190         /* make sure the dirty trick played by the caller work */
5191         ret = invalidate_inode_pages2_range(inode->i_mapping,
5192                                             first_index, last_index);
5193         if (ret)
5194                 goto out_unlock;
5195
5196         file_ra_state_init(ra, inode->i_mapping);
5197
5198         for (i = first_index ; i <= last_index; i++) {
5199                 if (total_read % ra->ra_pages == 0) {
5200                         btrfs_force_ra(inode->i_mapping, ra, NULL, i,
5201                                        calc_ra(i, last_index, ra->ra_pages));
5202                 }
5203                 total_read++;
5204 again:
5205                 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
5206                         BUG_ON(1);
5207                 page = grab_cache_page(inode->i_mapping, i);
5208                 if (!page) {
5209                         ret = -ENOMEM;
5210                         goto out_unlock;
5211                 }
5212                 if (!PageUptodate(page)) {
5213                         btrfs_readpage(NULL, page);
5214                         lock_page(page);
5215                         if (!PageUptodate(page)) {
5216                                 unlock_page(page);
5217                                 page_cache_release(page);
5218                                 ret = -EIO;
5219                                 goto out_unlock;
5220                         }
5221                 }
5222                 wait_on_page_writeback(page);
5223
5224                 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
5225                 page_end = page_start + PAGE_CACHE_SIZE - 1;
5226                 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5227
5228                 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5229                 if (ordered) {
5230                         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5231                         unlock_page(page);
5232                         page_cache_release(page);
5233                         btrfs_start_ordered_extent(inode, ordered, 1);
5234                         btrfs_put_ordered_extent(ordered);
5235                         goto again;
5236                 }
5237                 set_page_extent_mapped(page);
5238
5239                 if (i == first_index)
5240                         set_extent_bits(io_tree, page_start, page_end,
5241                                         EXTENT_BOUNDARY, GFP_NOFS);
5242                 btrfs_set_extent_delalloc(inode, page_start, page_end);
5243
5244                 set_page_dirty(page);
5245                 total_dirty++;
5246
5247                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5248                 unlock_page(page);
5249                 page_cache_release(page);
5250         }
5251
5252 out_unlock:
5253         kfree(ra);
5254         mutex_unlock(&inode->i_mutex);
5255         balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
5256         return ret;
5257 }
5258
5259 static noinline int relocate_data_extent(struct inode *reloc_inode,
5260                                          struct btrfs_key *extent_key,
5261                                          u64 offset)
5262 {
5263         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5264         struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
5265         struct extent_map *em;
5266         u64 start = extent_key->objectid - offset;
5267         u64 end = start + extent_key->offset - 1;
5268
5269         em = alloc_extent_map(GFP_NOFS);
5270         BUG_ON(!em || IS_ERR(em));
5271
5272         em->start = start;
5273         em->len = extent_key->offset;
5274         em->block_len = extent_key->offset;
5275         em->block_start = extent_key->objectid;
5276         em->bdev = root->fs_info->fs_devices->latest_bdev;
5277         set_bit(EXTENT_FLAG_PINNED, &em->flags);
5278
5279         /* setup extent map to cheat btrfs_readpage */
5280         lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5281         while (1) {
5282                 int ret;
5283                 write_lock(&em_tree->lock);
5284                 ret = add_extent_mapping(em_tree, em);
5285                 write_unlock(&em_tree->lock);
5286                 if (ret != -EEXIST) {
5287                         free_extent_map(em);
5288                         break;
5289                 }
5290                 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
5291         }
5292         unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5293
5294         return relocate_inode_pages(reloc_inode, start, extent_key->offset);
5295 }
5296
5297 struct btrfs_ref_path {
5298         u64 extent_start;
5299         u64 nodes[BTRFS_MAX_LEVEL];
5300         u64 root_objectid;
5301         u64 root_generation;
5302         u64 owner_objectid;
5303         u32 num_refs;
5304         int lowest_level;
5305         int current_level;
5306         int shared_level;
5307
5308         struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
5309         u64 new_nodes[BTRFS_MAX_LEVEL];
5310 };
5311
5312 struct disk_extent {
5313         u64 ram_bytes;
5314         u64 disk_bytenr;
5315         u64 disk_num_bytes;
5316         u64 offset;
5317         u64 num_bytes;
5318         u8 compression;
5319         u8 encryption;
5320         u16 other_encoding;
5321 };
5322
5323 static int is_cowonly_root(u64 root_objectid)
5324 {
5325         if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
5326             root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
5327             root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
5328             root_objectid == BTRFS_DEV_TREE_OBJECTID ||
5329             root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5330             root_objectid == BTRFS_CSUM_TREE_OBJECTID)
5331                 return 1;
5332         return 0;
5333 }
5334
5335 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
5336                                     struct btrfs_root *extent_root,
5337                                     struct btrfs_ref_path *ref_path,
5338                                     int first_time)
5339 {
5340         struct extent_buffer *leaf;
5341         struct btrfs_path *path;
5342         struct btrfs_extent_ref *ref;
5343         struct btrfs_key key;
5344         struct btrfs_key found_key;
5345         u64 bytenr;
5346         u32 nritems;
5347         int level;
5348         int ret = 1;
5349
5350         path = btrfs_alloc_path();
5351         if (!path)
5352                 return -ENOMEM;
5353
5354         if (first_time) {
5355                 ref_path->lowest_level = -1;
5356                 ref_path->current_level = -1;
5357                 ref_path->shared_level = -1;
5358                 goto walk_up;
5359         }
5360 walk_down:
5361         level = ref_path->current_level - 1;
5362         while (level >= -1) {
5363                 u64 parent;
5364                 if (level < ref_path->lowest_level)
5365                         break;
5366
5367                 if (level >= 0)
5368                         bytenr = ref_path->nodes[level];
5369                 else
5370                         bytenr = ref_path->extent_start;
5371                 BUG_ON(bytenr == 0);
5372
5373                 parent = ref_path->nodes[level + 1];
5374                 ref_path->nodes[level + 1] = 0;
5375                 ref_path->current_level = level;
5376                 BUG_ON(parent == 0);
5377
5378                 key.objectid = bytenr;
5379                 key.offset = parent + 1;
5380                 key.type = BTRFS_EXTENT_REF_KEY;
5381
5382                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5383                 if (ret < 0)
5384                         goto out;
5385                 BUG_ON(ret == 0);
5386
5387                 leaf = path->nodes[0];
5388                 nritems = btrfs_header_nritems(leaf);
5389                 if (path->slots[0] >= nritems) {
5390                         ret = btrfs_next_leaf(extent_root, path);
5391                         if (ret < 0)
5392                                 goto out;
5393                         if (ret > 0)
5394                                 goto next;
5395                         leaf = path->nodes[0];
5396                 }
5397
5398                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5399                 if (found_key.objectid == bytenr &&
5400                     found_key.type == BTRFS_EXTENT_REF_KEY) {
5401                         if (level < ref_path->shared_level)
5402                                 ref_path->shared_level = level;
5403                         goto found;
5404                 }
5405 next:
5406                 level--;
5407                 btrfs_release_path(extent_root, path);
5408                 cond_resched();
5409         }
5410         /* reached lowest level */
5411         ret = 1;
5412         goto out;
5413 walk_up:
5414         level = ref_path->current_level;
5415         while (level < BTRFS_MAX_LEVEL - 1) {
5416                 u64 ref_objectid;
5417
5418                 if (level >= 0)
5419                         bytenr = ref_path->nodes[level];
5420                 else
5421                         bytenr = ref_path->extent_start;
5422
5423                 BUG_ON(bytenr == 0);
5424
5425                 key.objectid = bytenr;
5426                 key.offset = 0;
5427                 key.type = BTRFS_EXTENT_REF_KEY;
5428
5429                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5430                 if (ret < 0)
5431                         goto out;
5432
5433                 leaf = path->nodes[0];
5434                 nritems = btrfs_header_nritems(leaf);
5435                 if (path->slots[0] >= nritems) {
5436                         ret = btrfs_next_leaf(extent_root, path);
5437                         if (ret < 0)
5438                                 goto out;
5439                         if (ret > 0) {
5440                                 /* the extent was freed by someone */
5441                                 if (ref_path->lowest_level == level)
5442                                         goto out;
5443                                 btrfs_release_path(extent_root, path);
5444                                 goto walk_down;
5445                         }
5446                         leaf = path->nodes[0];
5447                 }
5448
5449                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5450                 if (found_key.objectid != bytenr ||
5451                                 found_key.type != BTRFS_EXTENT_REF_KEY) {
5452                         /* the extent was freed by someone */
5453                         if (ref_path->lowest_level == level) {
5454                                 ret = 1;
5455                                 goto out;
5456                         }
5457                         btrfs_release_path(extent_root, path);
5458                         goto walk_down;
5459                 }
5460 found:
5461                 ref = btrfs_item_ptr(leaf, path->slots[0],
5462                                 struct btrfs_extent_ref);
5463                 ref_objectid = btrfs_ref_objectid(leaf, ref);
5464                 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5465                         if (first_time) {
5466                                 level = (int)ref_objectid;
5467                                 BUG_ON(level >= BTRFS_MAX_LEVEL);
5468                                 ref_path->lowest_level = level;
5469                                 ref_path->current_level = level;
5470                                 ref_path->nodes[level] = bytenr;
5471                         } else {
5472                                 WARN_ON(ref_objectid != level);
5473                         }
5474                 } else {
5475                         WARN_ON(level != -1);
5476                 }
5477                 first_time = 0;
5478
5479                 if (ref_path->lowest_level == level) {
5480                         ref_path->owner_objectid = ref_objectid;
5481                         ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
5482                 }
5483
5484                 /*
5485                  * the block is tree root or the block isn't in reference
5486                  * counted tree.
5487                  */
5488                 if (found_key.objectid == found_key.offset ||
5489                     is_cowonly_root(btrfs_ref_root(leaf, ref))) {
5490                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5491                         ref_path->root_generation =
5492                                 btrfs_ref_generation(leaf, ref);
5493                         if (level < 0) {
5494                                 /* special reference from the tree log */
5495                                 ref_path->nodes[0] = found_key.offset;
5496                                 ref_path->current_level = 0;
5497                         }
5498                         ret = 0;
5499                         goto out;
5500                 }
5501
5502                 level++;
5503                 BUG_ON(ref_path->nodes[level] != 0);
5504                 ref_path->nodes[level] = found_key.offset;
5505                 ref_path->current_level = level;
5506
5507                 /*
5508                  * the reference was created in the running transaction,
5509                  * no need to continue walking up.
5510                  */
5511                 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
5512                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5513                         ref_path->root_generation =
5514                                 btrfs_ref_generation(leaf, ref);
5515                         ret = 0;
5516                         goto out;
5517                 }
5518
5519                 btrfs_release_path(extent_root, path);
5520                 cond_resched();
5521         }
5522         /* reached max tree level, but no tree root found. */
5523         BUG();
5524 out:
5525         btrfs_free_path(path);
5526         return ret;
5527 }
5528
5529 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
5530                                 struct btrfs_root *extent_root,
5531                                 struct btrfs_ref_path *ref_path,
5532                                 u64 extent_start)
5533 {
5534         memset(ref_path, 0, sizeof(*ref_path));
5535         ref_path->extent_start = extent_start;
5536
5537         return __next_ref_path(trans, extent_root, ref_path, 1);
5538 }
5539
5540 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
5541                                struct btrfs_root *extent_root,
5542                                struct btrfs_ref_path *ref_path)
5543 {
5544         return __next_ref_path(trans, extent_root, ref_path, 0);
5545 }
5546
5547 static noinline int get_new_locations(struct inode *reloc_inode,
5548                                       struct btrfs_key *extent_key,
5549                                       u64 offset, int no_fragment,
5550                                       struct disk_extent **extents,
5551                                       int *nr_extents)
5552 {
5553         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5554         struct btrfs_path *path;
5555         struct btrfs_file_extent_item *fi;
5556         struct extent_buffer *leaf;
5557         struct disk_extent *exts = *extents;
5558         struct btrfs_key found_key;
5559         u64 cur_pos;
5560         u64 last_byte;
5561         u32 nritems;
5562         int nr = 0;
5563         int max = *nr_extents;
5564         int ret;
5565
5566         WARN_ON(!no_fragment && *extents);
5567         if (!exts) {
5568                 max = 1;
5569                 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
5570                 if (!exts)
5571                         return -ENOMEM;
5572         }
5573
5574         path = btrfs_alloc_path();
5575         BUG_ON(!path);
5576
5577         cur_pos = extent_key->objectid - offset;
5578         last_byte = extent_key->objectid + extent_key->offset;
5579         ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
5580                                        cur_pos, 0);
5581         if (ret < 0)
5582                 goto out;
5583         if (ret > 0) {
5584                 ret = -ENOENT;
5585                 goto out;
5586         }
5587
5588         while (1) {
5589                 leaf = path->nodes[0];
5590                 nritems = btrfs_header_nritems(leaf);
5591                 if (path->slots[0] >= nritems) {
5592                         ret = btrfs_next_leaf(root, path);
5593                         if (ret < 0)
5594                                 goto out;
5595                         if (ret > 0)
5596                                 break;
5597                         leaf = path->nodes[0];
5598                 }
5599
5600                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5601                 if (found_key.offset != cur_pos ||
5602                     found_key.type != BTRFS_EXTENT_DATA_KEY ||
5603                     found_key.objectid != reloc_inode->i_ino)
5604                         break;
5605
5606                 fi = btrfs_item_ptr(leaf, path->slots[0],
5607                                     struct btrfs_file_extent_item);
5608                 if (btrfs_file_extent_type(leaf, fi) !=
5609                     BTRFS_FILE_EXTENT_REG ||
5610                     btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
5611                         break;
5612
5613                 if (nr == max) {
5614                         struct disk_extent *old = exts;
5615                         max *= 2;
5616                         exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
5617                         memcpy(exts, old, sizeof(*exts) * nr);
5618                         if (old != *extents)
5619                                 kfree(old);
5620                 }
5621
5622                 exts[nr].disk_bytenr =
5623                         btrfs_file_extent_disk_bytenr(leaf, fi);
5624                 exts[nr].disk_num_bytes =
5625                         btrfs_file_extent_disk_num_bytes(leaf, fi);
5626                 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
5627                 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5628                 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
5629                 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
5630                 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
5631                 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
5632                                                                            fi);
5633                 BUG_ON(exts[nr].offset > 0);
5634                 BUG_ON(exts[nr].compression || exts[nr].encryption);
5635                 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
5636
5637                 cur_pos += exts[nr].num_bytes;
5638                 nr++;
5639
5640                 if (cur_pos + offset >= last_byte)
5641                         break;
5642
5643                 if (no_fragment) {
5644                         ret = 1;
5645                         goto out;
5646                 }
5647                 path->slots[0]++;
5648         }
5649
5650         BUG_ON(cur_pos + offset > last_byte);
5651         if (cur_pos + offset < last_byte) {
5652                 ret = -ENOENT;
5653                 goto out;
5654         }
5655         ret = 0;
5656 out:
5657         btrfs_free_path(path);
5658         if (ret) {
5659                 if (exts != *extents)
5660                         kfree(exts);
5661         } else {
5662                 *extents = exts;
5663                 *nr_extents = nr;
5664         }
5665         return ret;
5666 }
5667
5668 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
5669                                         struct btrfs_root *root,
5670                                         struct btrfs_path *path,
5671                                         struct btrfs_key *extent_key,
5672                                         struct btrfs_key *leaf_key,
5673                                         struct btrfs_ref_path *ref_path,
5674                                         struct disk_extent *new_extents,
5675                                         int nr_extents)
5676 {
5677         struct extent_buffer *leaf;
5678         struct btrfs_file_extent_item *fi;
5679         struct inode *inode = NULL;
5680         struct btrfs_key key;
5681         u64 lock_start = 0;
5682         u64 lock_end = 0;
5683         u64 num_bytes;
5684         u64 ext_offset;
5685         u64 search_end = (u64)-1;
5686         u32 nritems;
5687         int nr_scaned = 0;
5688         int extent_locked = 0;
5689         int extent_type;
5690         int ret;
5691
5692         memcpy(&key, leaf_key, sizeof(key));
5693         if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
5694                 if (key.objectid < ref_path->owner_objectid ||
5695                     (key.objectid == ref_path->owner_objectid &&
5696                      key.type < BTRFS_EXTENT_DATA_KEY)) {
5697                         key.objectid = ref_path->owner_objectid;
5698                         key.type = BTRFS_EXTENT_DATA_KEY;
5699                         key.offset = 0;
5700                 }
5701         }
5702
5703         while (1) {
5704                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
5705                 if (ret < 0)
5706                         goto out;
5707
5708                 leaf = path->nodes[0];
5709                 nritems = btrfs_header_nritems(leaf);
5710 next:
5711                 if (extent_locked && ret > 0) {
5712                         /*
5713                          * the file extent item was modified by someone
5714                          * before the extent got locked.
5715                          */
5716                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5717                                       lock_end, GFP_NOFS);
5718                         extent_locked = 0;
5719                 }
5720
5721                 if (path->slots[0] >= nritems) {
5722                         if (++nr_scaned > 2)
5723                                 break;
5724
5725                         BUG_ON(extent_locked);
5726                         ret = btrfs_next_leaf(root, path);
5727                         if (ret < 0)
5728                                 goto out;
5729                         if (ret > 0)
5730                                 break;
5731                         leaf = path->nodes[0];
5732                         nritems = btrfs_header_nritems(leaf);
5733                 }
5734
5735                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5736
5737                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
5738                         if ((key.objectid > ref_path->owner_objectid) ||
5739                             (key.objectid == ref_path->owner_objectid &&
5740                              key.type > BTRFS_EXTENT_DATA_KEY) ||
5741                             key.offset >= search_end)
5742                                 break;
5743                 }
5744
5745                 if (inode && key.objectid != inode->i_ino) {
5746                         BUG_ON(extent_locked);
5747                         btrfs_release_path(root, path);
5748                         mutex_unlock(&inode->i_mutex);
5749                         iput(inode);
5750                         inode = NULL;
5751                         continue;
5752                 }
5753
5754                 if (key.type != BTRFS_EXTENT_DATA_KEY) {
5755                         path->slots[0]++;
5756                         ret = 1;
5757                         goto next;
5758                 }
5759                 fi = btrfs_item_ptr(leaf, path->slots[0],
5760                                     struct btrfs_file_extent_item);
5761                 extent_type = btrfs_file_extent_type(leaf, fi);
5762                 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
5763                      extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
5764                     (btrfs_file_extent_disk_bytenr(leaf, fi) !=
5765                      extent_key->objectid)) {
5766                         path->slots[0]++;
5767                         ret = 1;
5768                         goto next;
5769                 }
5770
5771                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5772                 ext_offset = btrfs_file_extent_offset(leaf, fi);
5773
5774                 if (search_end == (u64)-1) {
5775                         search_end = key.offset - ext_offset +
5776                                 btrfs_file_extent_ram_bytes(leaf, fi);
5777                 }
5778
5779                 if (!extent_locked) {
5780                         lock_start = key.offset;
5781                         lock_end = lock_start + num_bytes - 1;
5782                 } else {
5783                         if (lock_start > key.offset ||
5784                             lock_end + 1 < key.offset + num_bytes) {
5785                                 unlock_extent(&BTRFS_I(inode)->io_tree,
5786                                               lock_start, lock_end, GFP_NOFS);
5787                                 extent_locked = 0;
5788                         }
5789                 }
5790
5791                 if (!inode) {
5792                         btrfs_release_path(root, path);
5793
5794                         inode = btrfs_iget_locked(root->fs_info->sb,
5795                                                   key.objectid, root);
5796                         if (inode->i_state & I_NEW) {
5797                                 BTRFS_I(inode)->root = root;
5798                                 BTRFS_I(inode)->location.objectid =
5799                                         key.objectid;
5800                                 BTRFS_I(inode)->location.type =
5801                                         BTRFS_INODE_ITEM_KEY;
5802                                 BTRFS_I(inode)->location.offset = 0;
5803                                 btrfs_read_locked_inode(inode);
5804                                 unlock_new_inode(inode);
5805                         }
5806                         /*
5807                          * some code call btrfs_commit_transaction while
5808                          * holding the i_mutex, so we can't use mutex_lock
5809                          * here.
5810                          */
5811                         if (is_bad_inode(inode) ||
5812                             !mutex_trylock(&inode->i_mutex)) {
5813                                 iput(inode);
5814                                 inode = NULL;
5815                                 key.offset = (u64)-1;
5816                                 goto skip;
5817                         }
5818                 }
5819
5820                 if (!extent_locked) {
5821                         struct btrfs_ordered_extent *ordered;
5822
5823                         btrfs_release_path(root, path);
5824
5825                         lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5826                                     lock_end, GFP_NOFS);
5827                         ordered = btrfs_lookup_first_ordered_extent(inode,
5828                                                                     lock_end);
5829                         if (ordered &&
5830                             ordered->file_offset <= lock_end &&
5831                             ordered->file_offset + ordered->len > lock_start) {
5832                                 unlock_extent(&BTRFS_I(inode)->io_tree,
5833                                               lock_start, lock_end, GFP_NOFS);
5834                                 btrfs_start_ordered_extent(inode, ordered, 1);
5835                                 btrfs_put_ordered_extent(ordered);
5836                                 key.offset += num_bytes;
5837                                 goto skip;
5838                         }
5839                         if (ordered)
5840                                 btrfs_put_ordered_extent(ordered);
5841
5842                         extent_locked = 1;
5843                         continue;
5844                 }
5845
5846                 if (nr_extents == 1) {
5847                         /* update extent pointer in place */
5848                         btrfs_set_file_extent_disk_bytenr(leaf, fi,
5849                                                 new_extents[0].disk_bytenr);
5850                         btrfs_set_file_extent_disk_num_bytes(leaf, fi,
5851                                                 new_extents[0].disk_num_bytes);
5852                         btrfs_mark_buffer_dirty(leaf);
5853
5854                         btrfs_drop_extent_cache(inode, key.offset,
5855                                                 key.offset + num_bytes - 1, 0);
5856
5857                         ret = btrfs_inc_extent_ref(trans, root,
5858                                                 new_extents[0].disk_bytenr,
5859                                                 new_extents[0].disk_num_bytes,
5860                                                 leaf->start,
5861                                                 root->root_key.objectid,
5862                                                 trans->transid,
5863                                                 key.objectid);
5864                         BUG_ON(ret);
5865
5866                         ret = btrfs_free_extent(trans, root,
5867                                                 extent_key->objectid,
5868                                                 extent_key->offset,
5869                                                 leaf->start,
5870                                                 btrfs_header_owner(leaf),
5871                                                 btrfs_header_generation(leaf),
5872                                                 key.objectid, 0);
5873                         BUG_ON(ret);
5874
5875                         btrfs_release_path(root, path);
5876                         key.offset += num_bytes;
5877                 } else {
5878                         BUG_ON(1);
5879 #if 0
5880                         u64 alloc_hint;
5881                         u64 extent_len;
5882                         int i;
5883                         /*
5884                          * drop old extent pointer at first, then insert the
5885                          * new pointers one bye one
5886                          */
5887                         btrfs_release_path(root, path);
5888                         ret = btrfs_drop_extents(trans, root, inode, key.offset,
5889                                                  key.offset + num_bytes,
5890                                                  key.offset, &alloc_hint);
5891                         BUG_ON(ret);
5892
5893                         for (i = 0; i < nr_extents; i++) {
5894                                 if (ext_offset >= new_extents[i].num_bytes) {
5895                                         ext_offset -= new_extents[i].num_bytes;
5896                                         continue;
5897                                 }
5898                                 extent_len = min(new_extents[i].num_bytes -
5899                                                  ext_offset, num_bytes);
5900
5901                                 ret = btrfs_insert_empty_item(trans, root,
5902                                                               path, &key,
5903                                                               sizeof(*fi));
5904                                 BUG_ON(ret);
5905
5906                                 leaf = path->nodes[0];
5907                                 fi = btrfs_item_ptr(leaf, path->slots[0],
5908                                                 struct btrfs_file_extent_item);
5909                                 btrfs_set_file_extent_generation(leaf, fi,
5910                                                         trans->transid);
5911                                 btrfs_set_file_extent_type(leaf, fi,
5912                                                         BTRFS_FILE_EXTENT_REG);
5913                                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
5914                                                 new_extents[i].disk_bytenr);
5915                                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
5916                                                 new_extents[i].disk_num_bytes);
5917                                 btrfs_set_file_extent_ram_bytes(leaf, fi,
5918                                                 new_extents[i].ram_bytes);
5919
5920                                 btrfs_set_file_extent_compression(leaf, fi,
5921                                                 new_extents[i].compression);
5922                                 btrfs_set_file_extent_encryption(leaf, fi,
5923                                                 new_extents[i].encryption);
5924                                 btrfs_set_file_extent_other_encoding(leaf, fi,
5925                                                 new_extents[i].other_encoding);
5926
5927                                 btrfs_set_file_extent_num_bytes(leaf, fi,
5928                                                         extent_len);
5929                                 ext_offset += new_extents[i].offset;
5930                                 btrfs_set_file_extent_offset(leaf, fi,
5931                                                         ext_offset);
5932                                 btrfs_mark_buffer_dirty(leaf);
5933
5934                                 btrfs_drop_extent_cache(inode, key.offset,
5935                                                 key.offset + extent_len - 1, 0);
5936
5937                                 ret = btrfs_inc_extent_ref(trans, root,
5938                                                 new_extents[i].disk_bytenr,
5939                                                 new_extents[i].disk_num_bytes,
5940                                                 leaf->start,
5941                                                 root->root_key.objectid,
5942                                                 trans->transid, key.objectid);
5943                                 BUG_ON(ret);
5944                                 btrfs_release_path(root, path);
5945
5946                                 inode_add_bytes(inode, extent_len);
5947
5948                                 ext_offset = 0;
5949                                 num_bytes -= extent_len;
5950                                 key.offset += extent_len;
5951
5952                                 if (num_bytes == 0)
5953                                         break;
5954                         }
5955                         BUG_ON(i >= nr_extents);
5956 #endif
5957                 }
5958
5959                 if (extent_locked) {
5960                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5961                                       lock_end, GFP_NOFS);
5962                         extent_locked = 0;
5963                 }
5964 skip:
5965                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
5966                     key.offset >= search_end)
5967                         break;
5968
5969                 cond_resched();
5970         }
5971         ret = 0;
5972 out:
5973         btrfs_release_path(root, path);
5974         if (inode) {
5975                 mutex_unlock(&inode->i_mutex);
5976                 if (extent_locked) {
5977                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5978                                       lock_end, GFP_NOFS);
5979                 }
5980                 iput(inode);
5981         }
5982         return ret;
5983 }
5984
5985 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
5986                                struct btrfs_root *root,
5987                                struct extent_buffer *buf, u64 orig_start)
5988 {
5989         int level;
5990         int ret;
5991
5992         BUG_ON(btrfs_header_generation(buf) != trans->transid);
5993         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5994
5995         level = btrfs_header_level(buf);
5996         if (level == 0) {
5997                 struct btrfs_leaf_ref *ref;
5998                 struct btrfs_leaf_ref *orig_ref;
5999
6000                 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
6001                 if (!orig_ref)
6002                         return -ENOENT;
6003
6004                 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
6005                 if (!ref) {
6006                         btrfs_free_leaf_ref(root, orig_ref);
6007                         return -ENOMEM;
6008                 }
6009
6010                 ref->nritems = orig_ref->nritems;
6011                 memcpy(ref->extents, orig_ref->extents,
6012                         sizeof(ref->extents[0]) * ref->nritems);
6013
6014                 btrfs_free_leaf_ref(root, orig_ref);
6015
6016                 ref->root_gen = trans->transid;
6017                 ref->bytenr = buf->start;
6018                 ref->owner = btrfs_header_owner(buf);
6019                 ref->generation = btrfs_header_generation(buf);
6020
6021                 ret = btrfs_add_leaf_ref(root, ref, 0);
6022                 WARN_ON(ret);
6023                 btrfs_free_leaf_ref(root, ref);
6024         }
6025         return 0;
6026 }
6027
6028 static noinline int invalidate_extent_cache(struct btrfs_root *root,
6029                                         struct extent_buffer *leaf,
6030                                         struct btrfs_block_group_cache *group,
6031                                         struct btrfs_root *target_root)
6032 {
6033         struct btrfs_key key;
6034         struct inode *inode = NULL;
6035         struct btrfs_file_extent_item *fi;
6036         u64 num_bytes;
6037         u64 skip_objectid = 0;
6038         u32 nritems;
6039         u32 i;
6040
6041         nritems = btrfs_header_nritems(leaf);
6042         for (i = 0; i < nritems; i++) {
6043                 btrfs_item_key_to_cpu(leaf, &key, i);
6044                 if (key.objectid == skip_objectid ||
6045                     key.type != BTRFS_EXTENT_DATA_KEY)
6046                         continue;
6047                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6048                 if (btrfs_file_extent_type(leaf, fi) ==
6049                     BTRFS_FILE_EXTENT_INLINE)
6050                         continue;
6051                 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6052                         continue;
6053                 if (!inode || inode->i_ino != key.objectid) {
6054                         iput(inode);
6055                         inode = btrfs_ilookup(target_root->fs_info->sb,
6056                                               key.objectid, target_root, 1);
6057                 }
6058                 if (!inode) {
6059                         skip_objectid = key.objectid;
6060                         continue;
6061                 }
6062                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6063
6064                 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6065                             key.offset + num_bytes - 1, GFP_NOFS);
6066                 btrfs_drop_extent_cache(inode, key.offset,
6067                                         key.offset + num_bytes - 1, 1);
6068                 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6069                               key.offset + num_bytes - 1, GFP_NOFS);
6070                 cond_resched();
6071         }
6072         iput(inode);
6073         return 0;
6074 }
6075
6076 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
6077                                         struct btrfs_root *root,
6078                                         struct extent_buffer *leaf,
6079                                         struct btrfs_block_group_cache *group,
6080                                         struct inode *reloc_inode)
6081 {
6082         struct btrfs_key key;
6083         struct btrfs_key extent_key;
6084         struct btrfs_file_extent_item *fi;
6085         struct btrfs_leaf_ref *ref;
6086         struct disk_extent *new_extent;
6087         u64 bytenr;
6088         u64 num_bytes;
6089         u32 nritems;
6090         u32 i;
6091         int ext_index;
6092         int nr_extent;
6093         int ret;
6094
6095         new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
6096         BUG_ON(!new_extent);
6097
6098         ref = btrfs_lookup_leaf_ref(root, leaf->start);
6099         BUG_ON(!ref);
6100
6101         ext_index = -1;
6102         nritems = btrfs_header_nritems(leaf);
6103         for (i = 0; i < nritems; i++) {
6104                 btrfs_item_key_to_cpu(leaf, &key, i);
6105                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
6106                         continue;
6107                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6108                 if (btrfs_file_extent_type(leaf, fi) ==
6109                     BTRFS_FILE_EXTENT_INLINE)
6110                         continue;
6111                 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6112                 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6113                 if (bytenr == 0)
6114                         continue;
6115
6116                 ext_index++;
6117                 if (bytenr >= group->key.objectid + group->key.offset ||
6118                     bytenr + num_bytes <= group->key.objectid)
6119                         continue;
6120
6121                 extent_key.objectid = bytenr;
6122                 extent_key.offset = num_bytes;
6123                 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
6124                 nr_extent = 1;
6125                 ret = get_new_locations(reloc_inode, &extent_key,
6126                                         group->key.objectid, 1,
6127                                         &new_extent, &nr_extent);
6128                 if (ret > 0)
6129                         continue;
6130                 BUG_ON(ret < 0);
6131
6132                 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
6133                 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
6134                 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
6135                 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
6136
6137                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6138                                                 new_extent->disk_bytenr);
6139                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6140                                                 new_extent->disk_num_bytes);
6141                 btrfs_mark_buffer_dirty(leaf);
6142
6143                 ret = btrfs_inc_extent_ref(trans, root,
6144                                         new_extent->disk_bytenr,
6145                                         new_extent->disk_num_bytes,
6146                                         leaf->start,
6147                                         root->root_key.objectid,
6148                                         trans->transid, key.objectid);
6149                 BUG_ON(ret);
6150
6151                 ret = btrfs_free_extent(trans, root,
6152                                         bytenr, num_bytes, leaf->start,
6153                                         btrfs_header_owner(leaf),
6154                                         btrfs_header_generation(leaf),
6155                                         key.objectid, 0);
6156                 BUG_ON(ret);
6157                 cond_resched();
6158         }
6159         kfree(new_extent);
6160         BUG_ON(ext_index + 1 != ref->nritems);
6161         btrfs_free_leaf_ref(root, ref);
6162         return 0;
6163 }
6164
6165 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
6166                           struct btrfs_root *root)
6167 {
6168         struct btrfs_root *reloc_root;
6169         int ret;
6170
6171         if (root->reloc_root) {
6172                 reloc_root = root->reloc_root;
6173                 root->reloc_root = NULL;
6174                 list_add(&reloc_root->dead_list,
6175                          &root->fs_info->dead_reloc_roots);
6176
6177                 btrfs_set_root_bytenr(&reloc_root->root_item,
6178                                       reloc_root->node->start);
6179                 btrfs_set_root_level(&root->root_item,
6180                                      btrfs_header_level(reloc_root->node));
6181                 memset(&reloc_root->root_item.drop_progress, 0,
6182                         sizeof(struct btrfs_disk_key));
6183                 reloc_root->root_item.drop_level = 0;
6184
6185                 ret = btrfs_update_root(trans, root->fs_info->tree_root,
6186                                         &reloc_root->root_key,
6187                                         &reloc_root->root_item);
6188                 BUG_ON(ret);
6189         }
6190         return 0;
6191 }
6192
6193 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
6194 {
6195         struct btrfs_trans_handle *trans;
6196         struct btrfs_root *reloc_root;
6197         struct btrfs_root *prev_root = NULL;
6198         struct list_head dead_roots;
6199         int ret;
6200         unsigned long nr;
6201
6202         INIT_LIST_HEAD(&dead_roots);
6203         list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
6204
6205         while (!list_empty(&dead_roots)) {
6206                 reloc_root = list_entry(dead_roots.prev,
6207                                         struct btrfs_root, dead_list);
6208                 list_del_init(&reloc_root->dead_list);
6209
6210                 BUG_ON(reloc_root->commit_root != NULL);
6211                 while (1) {
6212                         trans = btrfs_join_transaction(root, 1);
6213                         BUG_ON(!trans);
6214
6215                         mutex_lock(&root->fs_info->drop_mutex);
6216                         ret = btrfs_drop_snapshot(trans, reloc_root);
6217                         if (ret != -EAGAIN)
6218                                 break;
6219                         mutex_unlock(&root->fs_info->drop_mutex);
6220
6221                         nr = trans->blocks_used;
6222                         ret = btrfs_end_transaction(trans, root);
6223                         BUG_ON(ret);
6224                         btrfs_btree_balance_dirty(root, nr);
6225                 }
6226
6227                 free_extent_buffer(reloc_root->node);
6228
6229                 ret = btrfs_del_root(trans, root->fs_info->tree_root,
6230                                      &reloc_root->root_key);
6231                 BUG_ON(ret);
6232                 mutex_unlock(&root->fs_info->drop_mutex);
6233
6234                 nr = trans->blocks_used;
6235                 ret = btrfs_end_transaction(trans, root);
6236                 BUG_ON(ret);
6237                 btrfs_btree_balance_dirty(root, nr);
6238
6239                 kfree(prev_root);
6240                 prev_root = reloc_root;
6241         }
6242         if (prev_root) {
6243                 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
6244                 kfree(prev_root);
6245         }
6246         return 0;
6247 }
6248
6249 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
6250 {
6251         list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
6252         return 0;
6253 }
6254
6255 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
6256 {
6257         struct btrfs_root *reloc_root;
6258         struct btrfs_trans_handle *trans;
6259         struct btrfs_key location;
6260         int found;
6261         int ret;
6262
6263         mutex_lock(&root->fs_info->tree_reloc_mutex);
6264         ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
6265         BUG_ON(ret);
6266         found = !list_empty(&root->fs_info->dead_reloc_roots);
6267         mutex_unlock(&root->fs_info->tree_reloc_mutex);
6268
6269         if (found) {
6270                 trans = btrfs_start_transaction(root, 1);
6271                 BUG_ON(!trans);
6272                 ret = btrfs_commit_transaction(trans, root);
6273                 BUG_ON(ret);
6274         }
6275
6276         location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6277         location.offset = (u64)-1;
6278         location.type = BTRFS_ROOT_ITEM_KEY;
6279
6280         reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
6281         BUG_ON(!reloc_root);
6282         btrfs_orphan_cleanup(reloc_root);
6283         return 0;
6284 }
6285
6286 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
6287                                     struct btrfs_root *root)
6288 {
6289         struct btrfs_root *reloc_root;
6290         struct extent_buffer *eb;
6291         struct btrfs_root_item *root_item;
6292         struct btrfs_key root_key;
6293         int ret;
6294
6295         BUG_ON(!root->ref_cows);
6296         if (root->reloc_root)
6297                 return 0;
6298
6299         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
6300         BUG_ON(!root_item);
6301
6302         ret = btrfs_copy_root(trans, root, root->commit_root,
6303                               &eb, BTRFS_TREE_RELOC_OBJECTID);
6304         BUG_ON(ret);
6305
6306         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
6307         root_key.offset = root->root_key.objectid;
6308         root_key.type = BTRFS_ROOT_ITEM_KEY;
6309
6310         memcpy(root_item, &root->root_item, sizeof(root_item));
6311         btrfs_set_root_refs(root_item, 0);
6312         btrfs_set_root_bytenr(root_item, eb->start);
6313         btrfs_set_root_level(root_item, btrfs_header_level(eb));
6314         btrfs_set_root_generation(root_item, trans->transid);
6315
6316         btrfs_tree_unlock(eb);
6317         free_extent_buffer(eb);
6318
6319         ret = btrfs_insert_root(trans, root->fs_info->tree_root,
6320                                 &root_key, root_item);
6321         BUG_ON(ret);
6322         kfree(root_item);
6323
6324         reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
6325                                                  &root_key);
6326         BUG_ON(!reloc_root);
6327         reloc_root->last_trans = trans->transid;
6328         reloc_root->commit_root = NULL;
6329         reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
6330
6331         root->reloc_root = reloc_root;
6332         return 0;
6333 }
6334
6335 /*
6336  * Core function of space balance.
6337  *
6338  * The idea is using reloc trees to relocate tree blocks in reference
6339  * counted roots. There is one reloc tree for each subvol, and all
6340  * reloc trees share same root key objectid. Reloc trees are snapshots
6341  * of the latest committed roots of subvols (root->commit_root).
6342  *
6343  * To relocate a tree block referenced by a subvol, there are two steps.
6344  * COW the block through subvol's reloc tree, then update block pointer
6345  * in the subvol to point to the new block. Since all reloc trees share
6346  * same root key objectid, doing special handing for tree blocks owned
6347  * by them is easy. Once a tree block has been COWed in one reloc tree,
6348  * we can use the resulting new block directly when the same block is
6349  * required to COW again through other reloc trees. By this way, relocated
6350  * tree blocks are shared between reloc trees, so they are also shared
6351  * between subvols.
6352  */
6353 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
6354                                       struct btrfs_root *root,
6355                                       struct btrfs_path *path,
6356                                       struct btrfs_key *first_key,
6357                                       struct btrfs_ref_path *ref_path,
6358                                       struct btrfs_block_group_cache *group,
6359                                       struct inode *reloc_inode)
6360 {
6361         struct btrfs_root *reloc_root;
6362         struct extent_buffer *eb = NULL;
6363         struct btrfs_key *keys;
6364         u64 *nodes;
6365         int level;
6366         int shared_level;
6367         int lowest_level = 0;
6368         int ret;
6369
6370         if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
6371                 lowest_level = ref_path->owner_objectid;
6372
6373         if (!root->ref_cows) {
6374                 path->lowest_level = lowest_level;
6375                 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
6376                 BUG_ON(ret < 0);
6377                 path->lowest_level = 0;
6378                 btrfs_release_path(root, path);
6379                 return 0;
6380         }
6381
6382         mutex_lock(&root->fs_info->tree_reloc_mutex);
6383         ret = init_reloc_tree(trans, root);
6384         BUG_ON(ret);
6385         reloc_root = root->reloc_root;
6386
6387         shared_level = ref_path->shared_level;
6388         ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
6389
6390         keys = ref_path->node_keys;
6391         nodes = ref_path->new_nodes;
6392         memset(&keys[shared_level + 1], 0,
6393                sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
6394         memset(&nodes[shared_level + 1], 0,
6395                sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
6396
6397         if (nodes[lowest_level] == 0) {
6398                 path->lowest_level = lowest_level;
6399                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6400                                         0, 1);
6401                 BUG_ON(ret);
6402                 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
6403                         eb = path->nodes[level];
6404                         if (!eb || eb == reloc_root->node)
6405                                 break;
6406                         nodes[level] = eb->start;
6407                         if (level == 0)
6408                                 btrfs_item_key_to_cpu(eb, &keys[level], 0);
6409                         else
6410                                 btrfs_node_key_to_cpu(eb, &keys[level], 0);
6411                 }
6412                 if (nodes[0] &&
6413                     ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6414                         eb = path->nodes[0];
6415                         ret = replace_extents_in_leaf(trans, reloc_root, eb,
6416                                                       group, reloc_inode);
6417                         BUG_ON(ret);
6418                 }
6419                 btrfs_release_path(reloc_root, path);
6420         } else {
6421                 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
6422                                        lowest_level);
6423                 BUG_ON(ret);
6424         }
6425
6426         /*
6427          * replace tree blocks in the fs tree with tree blocks in
6428          * the reloc tree.
6429          */
6430         ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
6431         BUG_ON(ret < 0);
6432
6433         if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6434                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6435                                         0, 0);
6436                 BUG_ON(ret);
6437                 extent_buffer_get(path->nodes[0]);
6438                 eb = path->nodes[0];
6439                 btrfs_release_path(reloc_root, path);
6440                 ret = invalidate_extent_cache(reloc_root, eb, group, root);
6441                 BUG_ON(ret);
6442                 free_extent_buffer(eb);
6443         }
6444
6445         mutex_unlock(&root->fs_info->tree_reloc_mutex);
6446         path->lowest_level = 0;
6447         return 0;
6448 }
6449
6450 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
6451                                         struct btrfs_root *root,
6452                                         struct btrfs_path *path,
6453                                         struct btrfs_key *first_key,
6454                                         struct btrfs_ref_path *ref_path)
6455 {
6456         int ret;
6457
6458         ret = relocate_one_path(trans, root, path, first_key,
6459                                 ref_path, NULL, NULL);
6460         BUG_ON(ret);
6461
6462         return 0;
6463 }
6464
6465 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
6466                                     struct btrfs_root *extent_root,
6467                                     struct btrfs_path *path,
6468                                     struct btrfs_key *extent_key)
6469 {
6470         int ret;
6471
6472         ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
6473         if (ret)
6474                 goto out;
6475         ret = btrfs_del_item(trans, extent_root, path);
6476 out:
6477         btrfs_release_path(extent_root, path);
6478         return ret;
6479 }
6480
6481 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
6482                                                 struct btrfs_ref_path *ref_path)
6483 {
6484         struct btrfs_key root_key;
6485
6486         root_key.objectid = ref_path->root_objectid;
6487         root_key.type = BTRFS_ROOT_ITEM_KEY;
6488         if (is_cowonly_root(ref_path->root_objectid))
6489                 root_key.offset = 0;
6490         else
6491                 root_key.offset = (u64)-1;
6492
6493         return btrfs_read_fs_root_no_name(fs_info, &root_key);
6494 }
6495
6496 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
6497                                         struct btrfs_path *path,
6498                                         struct btrfs_key *extent_key,
6499                                         struct btrfs_block_group_cache *group,
6500                                         struct inode *reloc_inode, int pass)
6501 {
6502         struct btrfs_trans_handle *trans;
6503         struct btrfs_root *found_root;
6504         struct btrfs_ref_path *ref_path = NULL;
6505         struct disk_extent *new_extents = NULL;
6506         int nr_extents = 0;
6507         int loops;
6508         int ret;
6509         int level;
6510         struct btrfs_key first_key;
6511         u64 prev_block = 0;
6512
6513
6514         trans = btrfs_start_transaction(extent_root, 1);
6515         BUG_ON(!trans);
6516
6517         if (extent_key->objectid == 0) {
6518                 ret = del_extent_zero(trans, extent_root, path, extent_key);
6519                 goto out;
6520         }
6521
6522         ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
6523         if (!ref_path) {
6524                 ret = -ENOMEM;
6525                 goto out;
6526         }
6527
6528         for (loops = 0; ; loops++) {
6529                 if (loops == 0) {
6530                         ret = btrfs_first_ref_path(trans, extent_root, ref_path,
6531                                                    extent_key->objectid);
6532                 } else {
6533                         ret = btrfs_next_ref_path(trans, extent_root, ref_path);
6534                 }
6535                 if (ret < 0)
6536                         goto out;
6537                 if (ret > 0)
6538                         break;
6539
6540                 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6541                     ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
6542                         continue;
6543
6544                 found_root = read_ref_root(extent_root->fs_info, ref_path);
6545                 BUG_ON(!found_root);
6546                 /*
6547                  * for reference counted tree, only process reference paths
6548                  * rooted at the latest committed root.
6549                  */
6550                 if (found_root->ref_cows &&
6551                     ref_path->root_generation != found_root->root_key.offset)
6552                         continue;
6553
6554                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6555                         if (pass == 0) {
6556                                 /*
6557                                  * copy data extents to new locations
6558                                  */
6559                                 u64 group_start = group->key.objectid;
6560                                 ret = relocate_data_extent(reloc_inode,
6561                                                            extent_key,
6562                                                            group_start);
6563                                 if (ret < 0)
6564                                         goto out;
6565                                 break;
6566                         }
6567                         level = 0;
6568                 } else {
6569                         level = ref_path->owner_objectid;
6570                 }
6571
6572                 if (prev_block != ref_path->nodes[level]) {
6573                         struct extent_buffer *eb;
6574                         u64 block_start = ref_path->nodes[level];
6575                         u64 block_size = btrfs_level_size(found_root, level);
6576
6577                         eb = read_tree_block(found_root, block_start,
6578                                              block_size, 0);
6579                         btrfs_tree_lock(eb);
6580                         BUG_ON(level != btrfs_header_level(eb));
6581
6582                         if (level == 0)
6583                                 btrfs_item_key_to_cpu(eb, &first_key, 0);
6584                         else
6585                                 btrfs_node_key_to_cpu(eb, &first_key, 0);
6586
6587                         btrfs_tree_unlock(eb);
6588                         free_extent_buffer(eb);
6589                         prev_block = block_start;
6590                 }
6591
6592                 mutex_lock(&extent_root->fs_info->trans_mutex);
6593                 btrfs_record_root_in_trans(found_root);
6594                 mutex_unlock(&extent_root->fs_info->trans_mutex);
6595                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6596                         /*
6597                          * try to update data extent references while
6598                          * keeping metadata shared between snapshots.
6599                          */
6600                         if (pass == 1) {
6601                                 ret = relocate_one_path(trans, found_root,
6602                                                 path, &first_key, ref_path,
6603                                                 group, reloc_inode);
6604                                 if (ret < 0)
6605                                         goto out;
6606                                 continue;
6607                         }
6608                         /*
6609                          * use fallback method to process the remaining
6610                          * references.
6611                          */
6612                         if (!new_extents) {
6613                                 u64 group_start = group->key.objectid;
6614                                 new_extents = kmalloc(sizeof(*new_extents),
6615                                                       GFP_NOFS);
6616                                 nr_extents = 1;
6617                                 ret = get_new_locations(reloc_inode,
6618                                                         extent_key,
6619                                                         group_start, 1,
6620                                                         &new_extents,
6621                                                         &nr_extents);
6622                                 if (ret)
6623                                         goto out;
6624                         }
6625                         ret = replace_one_extent(trans, found_root,
6626                                                 path, extent_key,
6627                                                 &first_key, ref_path,
6628                                                 new_extents, nr_extents);
6629                 } else {
6630                         ret = relocate_tree_block(trans, found_root, path,
6631                                                   &first_key, ref_path);
6632                 }
6633                 if (ret < 0)
6634                         goto out;
6635         }
6636         ret = 0;
6637 out:
6638         btrfs_end_transaction(trans, extent_root);
6639         kfree(new_extents);
6640         kfree(ref_path);
6641         return ret;
6642 }
6643 #endif
6644
6645 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6646 {
6647         u64 num_devices;
6648         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6649                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6650
6651         num_devices = root->fs_info->fs_devices->rw_devices;
6652         if (num_devices == 1) {
6653                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6654                 stripped = flags & ~stripped;
6655
6656                 /* turn raid0 into single device chunks */
6657                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6658                         return stripped;
6659
6660                 /* turn mirroring into duplication */
6661                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6662                              BTRFS_BLOCK_GROUP_RAID10))
6663                         return stripped | BTRFS_BLOCK_GROUP_DUP;
6664                 return flags;
6665         } else {
6666                 /* they already had raid on here, just return */
6667                 if (flags & stripped)
6668                         return flags;
6669
6670                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6671                 stripped = flags & ~stripped;
6672
6673                 /* switch duplicated blocks with raid1 */
6674                 if (flags & BTRFS_BLOCK_GROUP_DUP)
6675                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
6676
6677                 /* turn single device chunks into raid0 */
6678                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6679         }
6680         return flags;
6681 }
6682
6683 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
6684                      struct btrfs_block_group_cache *shrink_block_group,
6685                      int force)
6686 {
6687         struct btrfs_trans_handle *trans;
6688         u64 new_alloc_flags;
6689         u64 calc;
6690
6691         spin_lock(&shrink_block_group->lock);
6692         if (btrfs_block_group_used(&shrink_block_group->item) +
6693             shrink_block_group->reserved > 0) {
6694                 spin_unlock(&shrink_block_group->lock);
6695
6696                 trans = btrfs_start_transaction(root, 1);
6697                 spin_lock(&shrink_block_group->lock);
6698
6699                 new_alloc_flags = update_block_group_flags(root,
6700                                                    shrink_block_group->flags);
6701                 if (new_alloc_flags != shrink_block_group->flags) {
6702                         calc =
6703                              btrfs_block_group_used(&shrink_block_group->item);
6704                 } else {
6705                         calc = shrink_block_group->key.offset;
6706                 }
6707                 spin_unlock(&shrink_block_group->lock);
6708
6709                 do_chunk_alloc(trans, root->fs_info->extent_root,
6710                                calc + 2 * 1024 * 1024, new_alloc_flags, force);
6711
6712                 btrfs_end_transaction(trans, root);
6713         } else
6714                 spin_unlock(&shrink_block_group->lock);
6715         return 0;
6716 }
6717
6718
6719 int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
6720                                          struct btrfs_block_group_cache *group)
6721
6722 {
6723         __alloc_chunk_for_shrink(root, group, 1);
6724         set_block_group_readonly(group);
6725         return 0;
6726 }
6727
6728 /*
6729  * checks to see if its even possible to relocate this block group.
6730  *
6731  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
6732  * ok to go ahead and try.
6733  */
6734 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
6735 {
6736         struct btrfs_block_group_cache *block_group;
6737         struct btrfs_space_info *space_info;
6738         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
6739         struct btrfs_device *device;
6740         int full = 0;
6741         int ret = 0;
6742
6743         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
6744
6745         /* odd, couldn't find the block group, leave it alone */
6746         if (!block_group)
6747                 return -1;
6748
6749         /* no bytes used, we're good */
6750         if (!btrfs_block_group_used(&block_group->item))
6751                 goto out;
6752
6753         space_info = block_group->space_info;
6754         spin_lock(&space_info->lock);
6755
6756         full = space_info->full;
6757
6758         /*
6759          * if this is the last block group we have in this space, we can't
6760          * relocate it unless we're able to allocate a new chunk below.
6761          *
6762          * Otherwise, we need to make sure we have room in the space to handle
6763          * all of the extents from this block group.  If we can, we're good
6764          */
6765         if ((space_info->total_bytes != block_group->key.offset) &&
6766            (space_info->bytes_used + space_info->bytes_reserved +
6767             space_info->bytes_pinned + space_info->bytes_readonly +
6768             btrfs_block_group_used(&block_group->item) <
6769             space_info->total_bytes)) {
6770                 spin_unlock(&space_info->lock);
6771                 goto out;
6772         }
6773         spin_unlock(&space_info->lock);
6774
6775         /*
6776          * ok we don't have enough space, but maybe we have free space on our
6777          * devices to allocate new chunks for relocation, so loop through our
6778          * alloc devices and guess if we have enough space.  However, if we
6779          * were marked as full, then we know there aren't enough chunks, and we
6780          * can just return.
6781          */
6782         ret = -1;
6783         if (full)
6784                 goto out;
6785
6786         mutex_lock(&root->fs_info->chunk_mutex);
6787         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
6788                 u64 min_free = btrfs_block_group_used(&block_group->item);
6789                 u64 dev_offset, max_avail;
6790
6791                 /*
6792                  * check to make sure we can actually find a chunk with enough
6793                  * space to fit our block group in.
6794                  */
6795                 if (device->total_bytes > device->bytes_used + min_free) {
6796                         ret = find_free_dev_extent(NULL, device, min_free,
6797                                                    &dev_offset, &max_avail);
6798                         if (!ret)
6799                                 break;
6800                         ret = -1;
6801                 }
6802         }
6803         mutex_unlock(&root->fs_info->chunk_mutex);
6804 out:
6805         btrfs_put_block_group(block_group);
6806         return ret;
6807 }
6808
6809 static int find_first_block_group(struct btrfs_root *root,
6810                 struct btrfs_path *path, struct btrfs_key *key)
6811 {
6812         int ret = 0;
6813         struct btrfs_key found_key;
6814         struct extent_buffer *leaf;
6815         int slot;
6816
6817         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6818         if (ret < 0)
6819                 goto out;
6820
6821         while (1) {
6822                 slot = path->slots[0];
6823                 leaf = path->nodes[0];
6824                 if (slot >= btrfs_header_nritems(leaf)) {
6825                         ret = btrfs_next_leaf(root, path);
6826                         if (ret == 0)
6827                                 continue;
6828                         if (ret < 0)
6829                                 goto out;
6830                         break;
6831                 }
6832                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6833
6834                 if (found_key.objectid >= key->objectid &&
6835                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
6836                         ret = 0;
6837                         goto out;
6838                 }
6839                 path->slots[0]++;
6840         }
6841         ret = -ENOENT;
6842 out:
6843         return ret;
6844 }
6845
6846 int btrfs_free_block_groups(struct btrfs_fs_info *info)
6847 {
6848         struct btrfs_block_group_cache *block_group;
6849         struct btrfs_space_info *space_info;
6850         struct btrfs_caching_control *caching_ctl;
6851         struct rb_node *n;
6852
6853         down_write(&info->extent_commit_sem);
6854         while (!list_empty(&info->caching_block_groups)) {
6855                 caching_ctl = list_entry(info->caching_block_groups.next,
6856                                          struct btrfs_caching_control, list);
6857                 list_del(&caching_ctl->list);
6858                 put_caching_control(caching_ctl);
6859         }
6860         up_write(&info->extent_commit_sem);
6861
6862         spin_lock(&info->block_group_cache_lock);
6863         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
6864                 block_group = rb_entry(n, struct btrfs_block_group_cache,
6865                                        cache_node);
6866                 rb_erase(&block_group->cache_node,
6867                          &info->block_group_cache_tree);
6868                 spin_unlock(&info->block_group_cache_lock);
6869
6870                 down_write(&block_group->space_info->groups_sem);
6871                 list_del(&block_group->list);
6872                 up_write(&block_group->space_info->groups_sem);
6873
6874                 if (block_group->cached == BTRFS_CACHE_STARTED)
6875                         wait_block_group_cache_done(block_group);
6876
6877                 btrfs_remove_free_space_cache(block_group);
6878
6879                 WARN_ON(atomic_read(&block_group->count) != 1);
6880                 kfree(block_group);
6881
6882                 spin_lock(&info->block_group_cache_lock);
6883         }
6884         spin_unlock(&info->block_group_cache_lock);
6885
6886         /* now that all the block groups are freed, go through and
6887          * free all the space_info structs.  This is only called during
6888          * the final stages of unmount, and so we know nobody is
6889          * using them.  We call synchronize_rcu() once before we start,
6890          * just to be on the safe side.
6891          */
6892         synchronize_rcu();
6893
6894         while(!list_empty(&info->space_info)) {
6895                 space_info = list_entry(info->space_info.next,
6896                                         struct btrfs_space_info,
6897                                         list);
6898
6899                 list_del(&space_info->list);
6900                 kfree(space_info);
6901         }
6902         return 0;
6903 }
6904
6905 int btrfs_read_block_groups(struct btrfs_root *root)
6906 {
6907         struct btrfs_path *path;
6908         int ret;
6909         struct btrfs_block_group_cache *cache;
6910         struct btrfs_fs_info *info = root->fs_info;
6911         struct btrfs_space_info *space_info;
6912         struct btrfs_key key;
6913         struct btrfs_key found_key;
6914         struct extent_buffer *leaf;
6915
6916         root = info->extent_root;
6917         key.objectid = 0;
6918         key.offset = 0;
6919         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
6920         path = btrfs_alloc_path();
6921         if (!path)
6922                 return -ENOMEM;
6923
6924         while (1) {
6925                 ret = find_first_block_group(root, path, &key);
6926                 if (ret > 0) {
6927                         ret = 0;
6928                         goto error;
6929                 }
6930                 if (ret != 0)
6931                         goto error;
6932
6933                 leaf = path->nodes[0];
6934                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6935                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
6936                 if (!cache) {
6937                         ret = -ENOMEM;
6938                         break;
6939                 }
6940
6941                 atomic_set(&cache->count, 1);
6942                 spin_lock_init(&cache->lock);
6943                 spin_lock_init(&cache->tree_lock);
6944                 cache->fs_info = info;
6945                 INIT_LIST_HEAD(&cache->list);
6946                 INIT_LIST_HEAD(&cache->cluster_list);
6947
6948                 /*
6949                  * we only want to have 32k of ram per block group for keeping
6950                  * track of free space, and if we pass 1/2 of that we want to
6951                  * start converting things over to using bitmaps
6952                  */
6953                 cache->extents_thresh = ((1024 * 32) / 2) /
6954                         sizeof(struct btrfs_free_space);
6955
6956                 read_extent_buffer(leaf, &cache->item,
6957                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
6958                                    sizeof(cache->item));
6959                 memcpy(&cache->key, &found_key, sizeof(found_key));
6960
6961                 key.objectid = found_key.objectid + found_key.offset;
6962                 btrfs_release_path(root, path);
6963                 cache->flags = btrfs_block_group_flags(&cache->item);
6964                 cache->sectorsize = root->sectorsize;
6965
6966                 /*
6967                  * check for two cases, either we are full, and therefore
6968                  * don't need to bother with the caching work since we won't
6969                  * find any space, or we are empty, and we can just add all
6970                  * the space in and be done with it.  This saves us _alot_ of
6971                  * time, particularly in the full case.
6972                  */
6973                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
6974                         exclude_super_stripes(root, cache);
6975                         cache->last_byte_to_unpin = (u64)-1;
6976                         cache->cached = BTRFS_CACHE_FINISHED;
6977                         free_excluded_extents(root, cache);
6978                 } else if (btrfs_block_group_used(&cache->item) == 0) {
6979                         exclude_super_stripes(root, cache);
6980                         cache->last_byte_to_unpin = (u64)-1;
6981                         cache->cached = BTRFS_CACHE_FINISHED;
6982                         add_new_free_space(cache, root->fs_info,
6983                                            found_key.objectid,
6984                                            found_key.objectid +
6985                                            found_key.offset);
6986                         free_excluded_extents(root, cache);
6987                 }
6988
6989                 ret = update_space_info(info, cache->flags, found_key.offset,
6990                                         btrfs_block_group_used(&cache->item),
6991                                         &space_info);
6992                 BUG_ON(ret);
6993                 cache->space_info = space_info;
6994                 spin_lock(&cache->space_info->lock);
6995                 cache->space_info->bytes_super += cache->bytes_super;
6996                 spin_unlock(&cache->space_info->lock);
6997
6998                 down_write(&space_info->groups_sem);
6999                 list_add_tail(&cache->list, &space_info->block_groups);
7000                 up_write(&space_info->groups_sem);
7001
7002                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7003                 BUG_ON(ret);
7004
7005                 set_avail_alloc_bits(root->fs_info, cache->flags);
7006                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7007                         set_block_group_readonly(cache);
7008         }
7009         ret = 0;
7010 error:
7011         btrfs_free_path(path);
7012         return ret;
7013 }
7014
7015 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7016                            struct btrfs_root *root, u64 bytes_used,
7017                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7018                            u64 size)
7019 {
7020         int ret;
7021         struct btrfs_root *extent_root;
7022         struct btrfs_block_group_cache *cache;
7023
7024         extent_root = root->fs_info->extent_root;
7025
7026         root->fs_info->last_trans_log_full_commit = trans->transid;
7027
7028         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7029         if (!cache)
7030                 return -ENOMEM;
7031
7032         cache->key.objectid = chunk_offset;
7033         cache->key.offset = size;
7034         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7035         cache->sectorsize = root->sectorsize;
7036
7037         /*
7038          * we only want to have 32k of ram per block group for keeping track
7039          * of free space, and if we pass 1/2 of that we want to start
7040          * converting things over to using bitmaps
7041          */
7042         cache->extents_thresh = ((1024 * 32) / 2) /
7043                 sizeof(struct btrfs_free_space);
7044         atomic_set(&cache->count, 1);
7045         spin_lock_init(&cache->lock);
7046         spin_lock_init(&cache->tree_lock);
7047         INIT_LIST_HEAD(&cache->list);
7048         INIT_LIST_HEAD(&cache->cluster_list);
7049
7050         btrfs_set_block_group_used(&cache->item, bytes_used);
7051         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7052         cache->flags = type;
7053         btrfs_set_block_group_flags(&cache->item, type);
7054
7055         cache->last_byte_to_unpin = (u64)-1;
7056         cache->cached = BTRFS_CACHE_FINISHED;
7057         exclude_super_stripes(root, cache);
7058
7059         add_new_free_space(cache, root->fs_info, chunk_offset,
7060                            chunk_offset + size);
7061
7062         free_excluded_extents(root, cache);
7063
7064         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7065                                 &cache->space_info);
7066         BUG_ON(ret);
7067
7068         spin_lock(&cache->space_info->lock);
7069         cache->space_info->bytes_super += cache->bytes_super;
7070         spin_unlock(&cache->space_info->lock);
7071
7072         down_write(&cache->space_info->groups_sem);
7073         list_add_tail(&cache->list, &cache->space_info->block_groups);
7074         up_write(&cache->space_info->groups_sem);
7075
7076         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7077         BUG_ON(ret);
7078
7079         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7080                                 sizeof(cache->item));
7081         BUG_ON(ret);
7082
7083         set_avail_alloc_bits(extent_root->fs_info, type);
7084
7085         return 0;
7086 }
7087
7088 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7089                              struct btrfs_root *root, u64 group_start)
7090 {
7091         struct btrfs_path *path;
7092         struct btrfs_block_group_cache *block_group;
7093         struct btrfs_free_cluster *cluster;
7094         struct btrfs_key key;
7095         int ret;
7096
7097         root = root->fs_info->extent_root;
7098
7099         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7100         BUG_ON(!block_group);
7101         BUG_ON(!block_group->ro);
7102
7103         memcpy(&key, &block_group->key, sizeof(key));
7104
7105         /* make sure this block group isn't part of an allocation cluster */
7106         cluster = &root->fs_info->data_alloc_cluster;
7107         spin_lock(&cluster->refill_lock);
7108         btrfs_return_cluster_to_free_space(block_group, cluster);
7109         spin_unlock(&cluster->refill_lock);
7110
7111         /*
7112          * make sure this block group isn't part of a metadata
7113          * allocation cluster
7114          */
7115         cluster = &root->fs_info->meta_alloc_cluster;
7116         spin_lock(&cluster->refill_lock);
7117         btrfs_return_cluster_to_free_space(block_group, cluster);
7118         spin_unlock(&cluster->refill_lock);
7119
7120         path = btrfs_alloc_path();
7121         BUG_ON(!path);
7122
7123         spin_lock(&root->fs_info->block_group_cache_lock);
7124         rb_erase(&block_group->cache_node,
7125                  &root->fs_info->block_group_cache_tree);
7126         spin_unlock(&root->fs_info->block_group_cache_lock);
7127
7128         down_write(&block_group->space_info->groups_sem);
7129         /*
7130          * we must use list_del_init so people can check to see if they
7131          * are still on the list after taking the semaphore
7132          */
7133         list_del_init(&block_group->list);
7134         up_write(&block_group->space_info->groups_sem);
7135
7136         if (block_group->cached == BTRFS_CACHE_STARTED)
7137                 wait_block_group_cache_done(block_group);
7138
7139         btrfs_remove_free_space_cache(block_group);
7140
7141         spin_lock(&block_group->space_info->lock);
7142         block_group->space_info->total_bytes -= block_group->key.offset;
7143         block_group->space_info->bytes_readonly -= block_group->key.offset;
7144         spin_unlock(&block_group->space_info->lock);
7145
7146         btrfs_clear_space_info_full(root->fs_info);
7147
7148         btrfs_put_block_group(block_group);
7149         btrfs_put_block_group(block_group);
7150
7151         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7152         if (ret > 0)
7153                 ret = -EIO;
7154         if (ret < 0)
7155                 goto out;
7156
7157         ret = btrfs_del_item(trans, root, path);
7158 out:
7159         btrfs_free_path(path);
7160         return ret;
7161 }