]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/extent-tree.c
Btrfs: don't leak block group on error
[karo-tx-linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "compat.h"
29 #include "hash.h"
30 #include "ctree.h"
31 #include "disk-io.h"
32 #include "print-tree.h"
33 #include "transaction.h"
34 #include "volumes.h"
35 #include "raid56.h"
36 #include "locking.h"
37 #include "free-space-cache.h"
38 #include "math.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_root *root,
78                               u64 bytenr, u64 num_bytes, int alloc);
79 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
80                                 struct btrfs_root *root,
81                                 u64 bytenr, u64 num_bytes, u64 parent,
82                                 u64 root_objectid, u64 owner_objectid,
83                                 u64 owner_offset, int refs_to_drop,
84                                 struct btrfs_delayed_extent_op *extra_op);
85 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
86                                     struct extent_buffer *leaf,
87                                     struct btrfs_extent_item *ei);
88 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
89                                       struct btrfs_root *root,
90                                       u64 parent, u64 root_objectid,
91                                       u64 flags, u64 owner, u64 offset,
92                                       struct btrfs_key *ins, int ref_mod);
93 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
94                                      struct btrfs_root *root,
95                                      u64 parent, u64 root_objectid,
96                                      u64 flags, struct btrfs_disk_key *key,
97                                      int level, struct btrfs_key *ins);
98 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
99                           struct btrfs_root *extent_root, u64 flags,
100                           int force);
101 static int find_next_key(struct btrfs_path *path, int level,
102                          struct btrfs_key *key);
103 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
104                             int dump_block_groups);
105 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
106                                        u64 num_bytes, int reserve);
107 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
108                                u64 num_bytes);
109 int btrfs_pin_extent(struct btrfs_root *root,
110                      u64 bytenr, u64 num_bytes, int reserved);
111
112 static noinline int
113 block_group_cache_done(struct btrfs_block_group_cache *cache)
114 {
115         smp_mb();
116         return cache->cached == BTRFS_CACHE_FINISHED ||
117                 cache->cached == BTRFS_CACHE_ERROR;
118 }
119
120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
121 {
122         return (cache->flags & bits) == bits;
123 }
124
125 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
126 {
127         atomic_inc(&cache->count);
128 }
129
130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
131 {
132         if (atomic_dec_and_test(&cache->count)) {
133                 WARN_ON(cache->pinned > 0);
134                 WARN_ON(cache->reserved > 0);
135                 kfree(cache->free_space_ctl);
136                 kfree(cache);
137         }
138 }
139
140 /*
141  * this adds the block group to the fs_info rb tree for the block group
142  * cache
143  */
144 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
145                                 struct btrfs_block_group_cache *block_group)
146 {
147         struct rb_node **p;
148         struct rb_node *parent = NULL;
149         struct btrfs_block_group_cache *cache;
150
151         spin_lock(&info->block_group_cache_lock);
152         p = &info->block_group_cache_tree.rb_node;
153
154         while (*p) {
155                 parent = *p;
156                 cache = rb_entry(parent, struct btrfs_block_group_cache,
157                                  cache_node);
158                 if (block_group->key.objectid < cache->key.objectid) {
159                         p = &(*p)->rb_left;
160                 } else if (block_group->key.objectid > cache->key.objectid) {
161                         p = &(*p)->rb_right;
162                 } else {
163                         spin_unlock(&info->block_group_cache_lock);
164                         return -EEXIST;
165                 }
166         }
167
168         rb_link_node(&block_group->cache_node, parent, p);
169         rb_insert_color(&block_group->cache_node,
170                         &info->block_group_cache_tree);
171
172         if (info->first_logical_byte > block_group->key.objectid)
173                 info->first_logical_byte = block_group->key.objectid;
174
175         spin_unlock(&info->block_group_cache_lock);
176
177         return 0;
178 }
179
180 /*
181  * This will return the block group at or after bytenr if contains is 0, else
182  * it will return the block group that contains the bytenr
183  */
184 static struct btrfs_block_group_cache *
185 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
186                               int contains)
187 {
188         struct btrfs_block_group_cache *cache, *ret = NULL;
189         struct rb_node *n;
190         u64 end, start;
191
192         spin_lock(&info->block_group_cache_lock);
193         n = info->block_group_cache_tree.rb_node;
194
195         while (n) {
196                 cache = rb_entry(n, struct btrfs_block_group_cache,
197                                  cache_node);
198                 end = cache->key.objectid + cache->key.offset - 1;
199                 start = cache->key.objectid;
200
201                 if (bytenr < start) {
202                         if (!contains && (!ret || start < ret->key.objectid))
203                                 ret = cache;
204                         n = n->rb_left;
205                 } else if (bytenr > start) {
206                         if (contains && bytenr <= end) {
207                                 ret = cache;
208                                 break;
209                         }
210                         n = n->rb_right;
211                 } else {
212                         ret = cache;
213                         break;
214                 }
215         }
216         if (ret) {
217                 btrfs_get_block_group(ret);
218                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
219                         info->first_logical_byte = ret->key.objectid;
220         }
221         spin_unlock(&info->block_group_cache_lock);
222
223         return ret;
224 }
225
226 static int add_excluded_extent(struct btrfs_root *root,
227                                u64 start, u64 num_bytes)
228 {
229         u64 end = start + num_bytes - 1;
230         set_extent_bits(&root->fs_info->freed_extents[0],
231                         start, end, EXTENT_UPTODATE, GFP_NOFS);
232         set_extent_bits(&root->fs_info->freed_extents[1],
233                         start, end, EXTENT_UPTODATE, GFP_NOFS);
234         return 0;
235 }
236
237 static void free_excluded_extents(struct btrfs_root *root,
238                                   struct btrfs_block_group_cache *cache)
239 {
240         u64 start, end;
241
242         start = cache->key.objectid;
243         end = start + cache->key.offset - 1;
244
245         clear_extent_bits(&root->fs_info->freed_extents[0],
246                           start, end, EXTENT_UPTODATE, GFP_NOFS);
247         clear_extent_bits(&root->fs_info->freed_extents[1],
248                           start, end, EXTENT_UPTODATE, GFP_NOFS);
249 }
250
251 static int exclude_super_stripes(struct btrfs_root *root,
252                                  struct btrfs_block_group_cache *cache)
253 {
254         u64 bytenr;
255         u64 *logical;
256         int stripe_len;
257         int i, nr, ret;
258
259         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
260                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
261                 cache->bytes_super += stripe_len;
262                 ret = add_excluded_extent(root, cache->key.objectid,
263                                           stripe_len);
264                 if (ret)
265                         return ret;
266         }
267
268         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
269                 bytenr = btrfs_sb_offset(i);
270                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
271                                        cache->key.objectid, bytenr,
272                                        0, &logical, &nr, &stripe_len);
273                 if (ret)
274                         return ret;
275
276                 while (nr--) {
277                         u64 start, len;
278
279                         if (logical[nr] > cache->key.objectid +
280                             cache->key.offset)
281                                 continue;
282
283                         if (logical[nr] + stripe_len <= cache->key.objectid)
284                                 continue;
285
286                         start = logical[nr];
287                         if (start < cache->key.objectid) {
288                                 start = cache->key.objectid;
289                                 len = (logical[nr] + stripe_len) - start;
290                         } else {
291                                 len = min_t(u64, stripe_len,
292                                             cache->key.objectid +
293                                             cache->key.offset - start);
294                         }
295
296                         cache->bytes_super += len;
297                         ret = add_excluded_extent(root, start, len);
298                         if (ret) {
299                                 kfree(logical);
300                                 return ret;
301                         }
302                 }
303
304                 kfree(logical);
305         }
306         return 0;
307 }
308
309 static struct btrfs_caching_control *
310 get_caching_control(struct btrfs_block_group_cache *cache)
311 {
312         struct btrfs_caching_control *ctl;
313
314         spin_lock(&cache->lock);
315         if (cache->cached != BTRFS_CACHE_STARTED) {
316                 spin_unlock(&cache->lock);
317                 return NULL;
318         }
319
320         /* We're loading it the fast way, so we don't have a caching_ctl. */
321         if (!cache->caching_ctl) {
322                 spin_unlock(&cache->lock);
323                 return NULL;
324         }
325
326         ctl = cache->caching_ctl;
327         atomic_inc(&ctl->count);
328         spin_unlock(&cache->lock);
329         return ctl;
330 }
331
332 static void put_caching_control(struct btrfs_caching_control *ctl)
333 {
334         if (atomic_dec_and_test(&ctl->count))
335                 kfree(ctl);
336 }
337
338 /*
339  * this is only called by cache_block_group, since we could have freed extents
340  * we need to check the pinned_extents for any extents that can't be used yet
341  * since their free space will be released as soon as the transaction commits.
342  */
343 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
344                               struct btrfs_fs_info *info, u64 start, u64 end)
345 {
346         u64 extent_start, extent_end, size, total_added = 0;
347         int ret;
348
349         while (start < end) {
350                 ret = find_first_extent_bit(info->pinned_extents, start,
351                                             &extent_start, &extent_end,
352                                             EXTENT_DIRTY | EXTENT_UPTODATE,
353                                             NULL);
354                 if (ret)
355                         break;
356
357                 if (extent_start <= start) {
358                         start = extent_end + 1;
359                 } else if (extent_start > start && extent_start < end) {
360                         size = extent_start - start;
361                         total_added += size;
362                         ret = btrfs_add_free_space(block_group, start,
363                                                    size);
364                         BUG_ON(ret); /* -ENOMEM or logic error */
365                         start = extent_end + 1;
366                 } else {
367                         break;
368                 }
369         }
370
371         if (start < end) {
372                 size = end - start;
373                 total_added += size;
374                 ret = btrfs_add_free_space(block_group, start, size);
375                 BUG_ON(ret); /* -ENOMEM or logic error */
376         }
377
378         return total_added;
379 }
380
381 static noinline void caching_thread(struct btrfs_work *work)
382 {
383         struct btrfs_block_group_cache *block_group;
384         struct btrfs_fs_info *fs_info;
385         struct btrfs_caching_control *caching_ctl;
386         struct btrfs_root *extent_root;
387         struct btrfs_path *path;
388         struct extent_buffer *leaf;
389         struct btrfs_key key;
390         u64 total_found = 0;
391         u64 last = 0;
392         u32 nritems;
393         int ret = -ENOMEM;
394
395         caching_ctl = container_of(work, struct btrfs_caching_control, work);
396         block_group = caching_ctl->block_group;
397         fs_info = block_group->fs_info;
398         extent_root = fs_info->extent_root;
399
400         path = btrfs_alloc_path();
401         if (!path)
402                 goto out;
403
404         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
405
406         /*
407          * We don't want to deadlock with somebody trying to allocate a new
408          * extent for the extent root while also trying to search the extent
409          * root to add free space.  So we skip locking and search the commit
410          * root, since its read-only
411          */
412         path->skip_locking = 1;
413         path->search_commit_root = 1;
414         path->reada = 1;
415
416         key.objectid = last;
417         key.offset = 0;
418         key.type = BTRFS_EXTENT_ITEM_KEY;
419 again:
420         mutex_lock(&caching_ctl->mutex);
421         /* need to make sure the commit_root doesn't disappear */
422         down_read(&fs_info->extent_commit_sem);
423
424 next:
425         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
426         if (ret < 0)
427                 goto err;
428
429         leaf = path->nodes[0];
430         nritems = btrfs_header_nritems(leaf);
431
432         while (1) {
433                 if (btrfs_fs_closing(fs_info) > 1) {
434                         last = (u64)-1;
435                         break;
436                 }
437
438                 if (path->slots[0] < nritems) {
439                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
440                 } else {
441                         ret = find_next_key(path, 0, &key);
442                         if (ret)
443                                 break;
444
445                         if (need_resched()) {
446                                 caching_ctl->progress = last;
447                                 btrfs_release_path(path);
448                                 up_read(&fs_info->extent_commit_sem);
449                                 mutex_unlock(&caching_ctl->mutex);
450                                 cond_resched();
451                                 goto again;
452                         }
453
454                         ret = btrfs_next_leaf(extent_root, path);
455                         if (ret < 0)
456                                 goto err;
457                         if (ret)
458                                 break;
459                         leaf = path->nodes[0];
460                         nritems = btrfs_header_nritems(leaf);
461                         continue;
462                 }
463
464                 if (key.objectid < last) {
465                         key.objectid = last;
466                         key.offset = 0;
467                         key.type = BTRFS_EXTENT_ITEM_KEY;
468
469                         caching_ctl->progress = last;
470                         btrfs_release_path(path);
471                         goto next;
472                 }
473
474                 if (key.objectid < block_group->key.objectid) {
475                         path->slots[0]++;
476                         continue;
477                 }
478
479                 if (key.objectid >= block_group->key.objectid +
480                     block_group->key.offset)
481                         break;
482
483                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
484                     key.type == BTRFS_METADATA_ITEM_KEY) {
485                         total_found += add_new_free_space(block_group,
486                                                           fs_info, last,
487                                                           key.objectid);
488                         if (key.type == BTRFS_METADATA_ITEM_KEY)
489                                 last = key.objectid +
490                                         fs_info->tree_root->leafsize;
491                         else
492                                 last = key.objectid + key.offset;
493
494                         if (total_found > (1024 * 1024 * 2)) {
495                                 total_found = 0;
496                                 wake_up(&caching_ctl->wait);
497                         }
498                 }
499                 path->slots[0]++;
500         }
501         ret = 0;
502
503         total_found += add_new_free_space(block_group, fs_info, last,
504                                           block_group->key.objectid +
505                                           block_group->key.offset);
506         caching_ctl->progress = (u64)-1;
507
508         spin_lock(&block_group->lock);
509         block_group->caching_ctl = NULL;
510         block_group->cached = BTRFS_CACHE_FINISHED;
511         spin_unlock(&block_group->lock);
512
513 err:
514         btrfs_free_path(path);
515         up_read(&fs_info->extent_commit_sem);
516
517         free_excluded_extents(extent_root, block_group);
518
519         mutex_unlock(&caching_ctl->mutex);
520 out:
521         if (ret) {
522                 spin_lock(&block_group->lock);
523                 block_group->caching_ctl = NULL;
524                 block_group->cached = BTRFS_CACHE_ERROR;
525                 spin_unlock(&block_group->lock);
526         }
527         wake_up(&caching_ctl->wait);
528
529         put_caching_control(caching_ctl);
530         btrfs_put_block_group(block_group);
531 }
532
533 static int cache_block_group(struct btrfs_block_group_cache *cache,
534                              int load_cache_only)
535 {
536         DEFINE_WAIT(wait);
537         struct btrfs_fs_info *fs_info = cache->fs_info;
538         struct btrfs_caching_control *caching_ctl;
539         int ret = 0;
540
541         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
542         if (!caching_ctl)
543                 return -ENOMEM;
544
545         INIT_LIST_HEAD(&caching_ctl->list);
546         mutex_init(&caching_ctl->mutex);
547         init_waitqueue_head(&caching_ctl->wait);
548         caching_ctl->block_group = cache;
549         caching_ctl->progress = cache->key.objectid;
550         atomic_set(&caching_ctl->count, 1);
551         caching_ctl->work.func = caching_thread;
552
553         spin_lock(&cache->lock);
554         /*
555          * This should be a rare occasion, but this could happen I think in the
556          * case where one thread starts to load the space cache info, and then
557          * some other thread starts a transaction commit which tries to do an
558          * allocation while the other thread is still loading the space cache
559          * info.  The previous loop should have kept us from choosing this block
560          * group, but if we've moved to the state where we will wait on caching
561          * block groups we need to first check if we're doing a fast load here,
562          * so we can wait for it to finish, otherwise we could end up allocating
563          * from a block group who's cache gets evicted for one reason or
564          * another.
565          */
566         while (cache->cached == BTRFS_CACHE_FAST) {
567                 struct btrfs_caching_control *ctl;
568
569                 ctl = cache->caching_ctl;
570                 atomic_inc(&ctl->count);
571                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
572                 spin_unlock(&cache->lock);
573
574                 schedule();
575
576                 finish_wait(&ctl->wait, &wait);
577                 put_caching_control(ctl);
578                 spin_lock(&cache->lock);
579         }
580
581         if (cache->cached != BTRFS_CACHE_NO) {
582                 spin_unlock(&cache->lock);
583                 kfree(caching_ctl);
584                 return 0;
585         }
586         WARN_ON(cache->caching_ctl);
587         cache->caching_ctl = caching_ctl;
588         cache->cached = BTRFS_CACHE_FAST;
589         spin_unlock(&cache->lock);
590
591         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
592                 ret = load_free_space_cache(fs_info, cache);
593
594                 spin_lock(&cache->lock);
595                 if (ret == 1) {
596                         cache->caching_ctl = NULL;
597                         cache->cached = BTRFS_CACHE_FINISHED;
598                         cache->last_byte_to_unpin = (u64)-1;
599                 } else {
600                         if (load_cache_only) {
601                                 cache->caching_ctl = NULL;
602                                 cache->cached = BTRFS_CACHE_NO;
603                         } else {
604                                 cache->cached = BTRFS_CACHE_STARTED;
605                         }
606                 }
607                 spin_unlock(&cache->lock);
608                 wake_up(&caching_ctl->wait);
609                 if (ret == 1) {
610                         put_caching_control(caching_ctl);
611                         free_excluded_extents(fs_info->extent_root, cache);
612                         return 0;
613                 }
614         } else {
615                 /*
616                  * We are not going to do the fast caching, set cached to the
617                  * appropriate value and wakeup any waiters.
618                  */
619                 spin_lock(&cache->lock);
620                 if (load_cache_only) {
621                         cache->caching_ctl = NULL;
622                         cache->cached = BTRFS_CACHE_NO;
623                 } else {
624                         cache->cached = BTRFS_CACHE_STARTED;
625                 }
626                 spin_unlock(&cache->lock);
627                 wake_up(&caching_ctl->wait);
628         }
629
630         if (load_cache_only) {
631                 put_caching_control(caching_ctl);
632                 return 0;
633         }
634
635         down_write(&fs_info->extent_commit_sem);
636         atomic_inc(&caching_ctl->count);
637         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
638         up_write(&fs_info->extent_commit_sem);
639
640         btrfs_get_block_group(cache);
641
642         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
643
644         return ret;
645 }
646
647 /*
648  * return the block group that starts at or after bytenr
649  */
650 static struct btrfs_block_group_cache *
651 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
652 {
653         struct btrfs_block_group_cache *cache;
654
655         cache = block_group_cache_tree_search(info, bytenr, 0);
656
657         return cache;
658 }
659
660 /*
661  * return the block group that contains the given bytenr
662  */
663 struct btrfs_block_group_cache *btrfs_lookup_block_group(
664                                                  struct btrfs_fs_info *info,
665                                                  u64 bytenr)
666 {
667         struct btrfs_block_group_cache *cache;
668
669         cache = block_group_cache_tree_search(info, bytenr, 1);
670
671         return cache;
672 }
673
674 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
675                                                   u64 flags)
676 {
677         struct list_head *head = &info->space_info;
678         struct btrfs_space_info *found;
679
680         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
681
682         rcu_read_lock();
683         list_for_each_entry_rcu(found, head, list) {
684                 if (found->flags & flags) {
685                         rcu_read_unlock();
686                         return found;
687                 }
688         }
689         rcu_read_unlock();
690         return NULL;
691 }
692
693 /*
694  * after adding space to the filesystem, we need to clear the full flags
695  * on all the space infos.
696  */
697 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
698 {
699         struct list_head *head = &info->space_info;
700         struct btrfs_space_info *found;
701
702         rcu_read_lock();
703         list_for_each_entry_rcu(found, head, list)
704                 found->full = 0;
705         rcu_read_unlock();
706 }
707
708 /* simple helper to search for an existing extent at a given offset */
709 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
710 {
711         int ret;
712         struct btrfs_key key;
713         struct btrfs_path *path;
714
715         path = btrfs_alloc_path();
716         if (!path)
717                 return -ENOMEM;
718
719         key.objectid = start;
720         key.offset = len;
721         key.type = BTRFS_EXTENT_ITEM_KEY;
722         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
723                                 0, 0);
724         if (ret > 0) {
725                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
726                 if (key.objectid == start &&
727                     key.type == BTRFS_METADATA_ITEM_KEY)
728                         ret = 0;
729         }
730         btrfs_free_path(path);
731         return ret;
732 }
733
734 /*
735  * helper function to lookup reference count and flags of a tree block.
736  *
737  * the head node for delayed ref is used to store the sum of all the
738  * reference count modifications queued up in the rbtree. the head
739  * node may also store the extent flags to set. This way you can check
740  * to see what the reference count and extent flags would be if all of
741  * the delayed refs are not processed.
742  */
743 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
744                              struct btrfs_root *root, u64 bytenr,
745                              u64 offset, int metadata, u64 *refs, u64 *flags)
746 {
747         struct btrfs_delayed_ref_head *head;
748         struct btrfs_delayed_ref_root *delayed_refs;
749         struct btrfs_path *path;
750         struct btrfs_extent_item *ei;
751         struct extent_buffer *leaf;
752         struct btrfs_key key;
753         u32 item_size;
754         u64 num_refs;
755         u64 extent_flags;
756         int ret;
757
758         /*
759          * If we don't have skinny metadata, don't bother doing anything
760          * different
761          */
762         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
763                 offset = root->leafsize;
764                 metadata = 0;
765         }
766
767         path = btrfs_alloc_path();
768         if (!path)
769                 return -ENOMEM;
770
771         if (metadata) {
772                 key.objectid = bytenr;
773                 key.type = BTRFS_METADATA_ITEM_KEY;
774                 key.offset = offset;
775         } else {
776                 key.objectid = bytenr;
777                 key.type = BTRFS_EXTENT_ITEM_KEY;
778                 key.offset = offset;
779         }
780
781         if (!trans) {
782                 path->skip_locking = 1;
783                 path->search_commit_root = 1;
784         }
785 again:
786         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
787                                 &key, path, 0, 0);
788         if (ret < 0)
789                 goto out_free;
790
791         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
792                 metadata = 0;
793                 if (path->slots[0]) {
794                         path->slots[0]--;
795                         btrfs_item_key_to_cpu(path->nodes[0], &key,
796                                               path->slots[0]);
797                         if (key.objectid == bytenr &&
798                             key.type == BTRFS_EXTENT_ITEM_KEY &&
799                             key.offset == root->leafsize)
800                                 ret = 0;
801                 }
802                 if (ret) {
803                         key.objectid = bytenr;
804                         key.type = BTRFS_EXTENT_ITEM_KEY;
805                         key.offset = root->leafsize;
806                         btrfs_release_path(path);
807                         goto again;
808                 }
809         }
810
811         if (ret == 0) {
812                 leaf = path->nodes[0];
813                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
814                 if (item_size >= sizeof(*ei)) {
815                         ei = btrfs_item_ptr(leaf, path->slots[0],
816                                             struct btrfs_extent_item);
817                         num_refs = btrfs_extent_refs(leaf, ei);
818                         extent_flags = btrfs_extent_flags(leaf, ei);
819                 } else {
820 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
821                         struct btrfs_extent_item_v0 *ei0;
822                         BUG_ON(item_size != sizeof(*ei0));
823                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
824                                              struct btrfs_extent_item_v0);
825                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
826                         /* FIXME: this isn't correct for data */
827                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
828 #else
829                         BUG();
830 #endif
831                 }
832                 BUG_ON(num_refs == 0);
833         } else {
834                 num_refs = 0;
835                 extent_flags = 0;
836                 ret = 0;
837         }
838
839         if (!trans)
840                 goto out;
841
842         delayed_refs = &trans->transaction->delayed_refs;
843         spin_lock(&delayed_refs->lock);
844         head = btrfs_find_delayed_ref_head(trans, bytenr);
845         if (head) {
846                 if (!mutex_trylock(&head->mutex)) {
847                         atomic_inc(&head->node.refs);
848                         spin_unlock(&delayed_refs->lock);
849
850                         btrfs_release_path(path);
851
852                         /*
853                          * Mutex was contended, block until it's released and try
854                          * again
855                          */
856                         mutex_lock(&head->mutex);
857                         mutex_unlock(&head->mutex);
858                         btrfs_put_delayed_ref(&head->node);
859                         goto again;
860                 }
861                 if (head->extent_op && head->extent_op->update_flags)
862                         extent_flags |= head->extent_op->flags_to_set;
863                 else
864                         BUG_ON(num_refs == 0);
865
866                 num_refs += head->node.ref_mod;
867                 mutex_unlock(&head->mutex);
868         }
869         spin_unlock(&delayed_refs->lock);
870 out:
871         WARN_ON(num_refs == 0);
872         if (refs)
873                 *refs = num_refs;
874         if (flags)
875                 *flags = extent_flags;
876 out_free:
877         btrfs_free_path(path);
878         return ret;
879 }
880
881 /*
882  * Back reference rules.  Back refs have three main goals:
883  *
884  * 1) differentiate between all holders of references to an extent so that
885  *    when a reference is dropped we can make sure it was a valid reference
886  *    before freeing the extent.
887  *
888  * 2) Provide enough information to quickly find the holders of an extent
889  *    if we notice a given block is corrupted or bad.
890  *
891  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
892  *    maintenance.  This is actually the same as #2, but with a slightly
893  *    different use case.
894  *
895  * There are two kinds of back refs. The implicit back refs is optimized
896  * for pointers in non-shared tree blocks. For a given pointer in a block,
897  * back refs of this kind provide information about the block's owner tree
898  * and the pointer's key. These information allow us to find the block by
899  * b-tree searching. The full back refs is for pointers in tree blocks not
900  * referenced by their owner trees. The location of tree block is recorded
901  * in the back refs. Actually the full back refs is generic, and can be
902  * used in all cases the implicit back refs is used. The major shortcoming
903  * of the full back refs is its overhead. Every time a tree block gets
904  * COWed, we have to update back refs entry for all pointers in it.
905  *
906  * For a newly allocated tree block, we use implicit back refs for
907  * pointers in it. This means most tree related operations only involve
908  * implicit back refs. For a tree block created in old transaction, the
909  * only way to drop a reference to it is COW it. So we can detect the
910  * event that tree block loses its owner tree's reference and do the
911  * back refs conversion.
912  *
913  * When a tree block is COW'd through a tree, there are four cases:
914  *
915  * The reference count of the block is one and the tree is the block's
916  * owner tree. Nothing to do in this case.
917  *
918  * The reference count of the block is one and the tree is not the
919  * block's owner tree. In this case, full back refs is used for pointers
920  * in the block. Remove these full back refs, add implicit back refs for
921  * every pointers in the new block.
922  *
923  * The reference count of the block is greater than one and the tree is
924  * the block's owner tree. In this case, implicit back refs is used for
925  * pointers in the block. Add full back refs for every pointers in the
926  * block, increase lower level extents' reference counts. The original
927  * implicit back refs are entailed to the new block.
928  *
929  * The reference count of the block is greater than one and the tree is
930  * not the block's owner tree. Add implicit back refs for every pointer in
931  * the new block, increase lower level extents' reference count.
932  *
933  * Back Reference Key composing:
934  *
935  * The key objectid corresponds to the first byte in the extent,
936  * The key type is used to differentiate between types of back refs.
937  * There are different meanings of the key offset for different types
938  * of back refs.
939  *
940  * File extents can be referenced by:
941  *
942  * - multiple snapshots, subvolumes, or different generations in one subvol
943  * - different files inside a single subvolume
944  * - different offsets inside a file (bookend extents in file.c)
945  *
946  * The extent ref structure for the implicit back refs has fields for:
947  *
948  * - Objectid of the subvolume root
949  * - objectid of the file holding the reference
950  * - original offset in the file
951  * - how many bookend extents
952  *
953  * The key offset for the implicit back refs is hash of the first
954  * three fields.
955  *
956  * The extent ref structure for the full back refs has field for:
957  *
958  * - number of pointers in the tree leaf
959  *
960  * The key offset for the implicit back refs is the first byte of
961  * the tree leaf
962  *
963  * When a file extent is allocated, The implicit back refs is used.
964  * the fields are filled in:
965  *
966  *     (root_key.objectid, inode objectid, offset in file, 1)
967  *
968  * When a file extent is removed file truncation, we find the
969  * corresponding implicit back refs and check the following fields:
970  *
971  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
972  *
973  * Btree extents can be referenced by:
974  *
975  * - Different subvolumes
976  *
977  * Both the implicit back refs and the full back refs for tree blocks
978  * only consist of key. The key offset for the implicit back refs is
979  * objectid of block's owner tree. The key offset for the full back refs
980  * is the first byte of parent block.
981  *
982  * When implicit back refs is used, information about the lowest key and
983  * level of the tree block are required. These information are stored in
984  * tree block info structure.
985  */
986
987 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
988 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
989                                   struct btrfs_root *root,
990                                   struct btrfs_path *path,
991                                   u64 owner, u32 extra_size)
992 {
993         struct btrfs_extent_item *item;
994         struct btrfs_extent_item_v0 *ei0;
995         struct btrfs_extent_ref_v0 *ref0;
996         struct btrfs_tree_block_info *bi;
997         struct extent_buffer *leaf;
998         struct btrfs_key key;
999         struct btrfs_key found_key;
1000         u32 new_size = sizeof(*item);
1001         u64 refs;
1002         int ret;
1003
1004         leaf = path->nodes[0];
1005         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1006
1007         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1008         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1009                              struct btrfs_extent_item_v0);
1010         refs = btrfs_extent_refs_v0(leaf, ei0);
1011
1012         if (owner == (u64)-1) {
1013                 while (1) {
1014                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1015                                 ret = btrfs_next_leaf(root, path);
1016                                 if (ret < 0)
1017                                         return ret;
1018                                 BUG_ON(ret > 0); /* Corruption */
1019                                 leaf = path->nodes[0];
1020                         }
1021                         btrfs_item_key_to_cpu(leaf, &found_key,
1022                                               path->slots[0]);
1023                         BUG_ON(key.objectid != found_key.objectid);
1024                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1025                                 path->slots[0]++;
1026                                 continue;
1027                         }
1028                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1029                                               struct btrfs_extent_ref_v0);
1030                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1031                         break;
1032                 }
1033         }
1034         btrfs_release_path(path);
1035
1036         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1037                 new_size += sizeof(*bi);
1038
1039         new_size -= sizeof(*ei0);
1040         ret = btrfs_search_slot(trans, root, &key, path,
1041                                 new_size + extra_size, 1);
1042         if (ret < 0)
1043                 return ret;
1044         BUG_ON(ret); /* Corruption */
1045
1046         btrfs_extend_item(root, path, new_size);
1047
1048         leaf = path->nodes[0];
1049         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1050         btrfs_set_extent_refs(leaf, item, refs);
1051         /* FIXME: get real generation */
1052         btrfs_set_extent_generation(leaf, item, 0);
1053         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1054                 btrfs_set_extent_flags(leaf, item,
1055                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1056                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1057                 bi = (struct btrfs_tree_block_info *)(item + 1);
1058                 /* FIXME: get first key of the block */
1059                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1060                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1061         } else {
1062                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1063         }
1064         btrfs_mark_buffer_dirty(leaf);
1065         return 0;
1066 }
1067 #endif
1068
1069 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1070 {
1071         u32 high_crc = ~(u32)0;
1072         u32 low_crc = ~(u32)0;
1073         __le64 lenum;
1074
1075         lenum = cpu_to_le64(root_objectid);
1076         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1077         lenum = cpu_to_le64(owner);
1078         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1079         lenum = cpu_to_le64(offset);
1080         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1081
1082         return ((u64)high_crc << 31) ^ (u64)low_crc;
1083 }
1084
1085 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1086                                      struct btrfs_extent_data_ref *ref)
1087 {
1088         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1089                                     btrfs_extent_data_ref_objectid(leaf, ref),
1090                                     btrfs_extent_data_ref_offset(leaf, ref));
1091 }
1092
1093 static int match_extent_data_ref(struct extent_buffer *leaf,
1094                                  struct btrfs_extent_data_ref *ref,
1095                                  u64 root_objectid, u64 owner, u64 offset)
1096 {
1097         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1098             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1099             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1100                 return 0;
1101         return 1;
1102 }
1103
1104 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1105                                            struct btrfs_root *root,
1106                                            struct btrfs_path *path,
1107                                            u64 bytenr, u64 parent,
1108                                            u64 root_objectid,
1109                                            u64 owner, u64 offset)
1110 {
1111         struct btrfs_key key;
1112         struct btrfs_extent_data_ref *ref;
1113         struct extent_buffer *leaf;
1114         u32 nritems;
1115         int ret;
1116         int recow;
1117         int err = -ENOENT;
1118
1119         key.objectid = bytenr;
1120         if (parent) {
1121                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1122                 key.offset = parent;
1123         } else {
1124                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1125                 key.offset = hash_extent_data_ref(root_objectid,
1126                                                   owner, offset);
1127         }
1128 again:
1129         recow = 0;
1130         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1131         if (ret < 0) {
1132                 err = ret;
1133                 goto fail;
1134         }
1135
1136         if (parent) {
1137                 if (!ret)
1138                         return 0;
1139 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1140                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1141                 btrfs_release_path(path);
1142                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1143                 if (ret < 0) {
1144                         err = ret;
1145                         goto fail;
1146                 }
1147                 if (!ret)
1148                         return 0;
1149 #endif
1150                 goto fail;
1151         }
1152
1153         leaf = path->nodes[0];
1154         nritems = btrfs_header_nritems(leaf);
1155         while (1) {
1156                 if (path->slots[0] >= nritems) {
1157                         ret = btrfs_next_leaf(root, path);
1158                         if (ret < 0)
1159                                 err = ret;
1160                         if (ret)
1161                                 goto fail;
1162
1163                         leaf = path->nodes[0];
1164                         nritems = btrfs_header_nritems(leaf);
1165                         recow = 1;
1166                 }
1167
1168                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1169                 if (key.objectid != bytenr ||
1170                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1171                         goto fail;
1172
1173                 ref = btrfs_item_ptr(leaf, path->slots[0],
1174                                      struct btrfs_extent_data_ref);
1175
1176                 if (match_extent_data_ref(leaf, ref, root_objectid,
1177                                           owner, offset)) {
1178                         if (recow) {
1179                                 btrfs_release_path(path);
1180                                 goto again;
1181                         }
1182                         err = 0;
1183                         break;
1184                 }
1185                 path->slots[0]++;
1186         }
1187 fail:
1188         return err;
1189 }
1190
1191 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1192                                            struct btrfs_root *root,
1193                                            struct btrfs_path *path,
1194                                            u64 bytenr, u64 parent,
1195                                            u64 root_objectid, u64 owner,
1196                                            u64 offset, int refs_to_add)
1197 {
1198         struct btrfs_key key;
1199         struct extent_buffer *leaf;
1200         u32 size;
1201         u32 num_refs;
1202         int ret;
1203
1204         key.objectid = bytenr;
1205         if (parent) {
1206                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1207                 key.offset = parent;
1208                 size = sizeof(struct btrfs_shared_data_ref);
1209         } else {
1210                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1211                 key.offset = hash_extent_data_ref(root_objectid,
1212                                                   owner, offset);
1213                 size = sizeof(struct btrfs_extent_data_ref);
1214         }
1215
1216         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1217         if (ret && ret != -EEXIST)
1218                 goto fail;
1219
1220         leaf = path->nodes[0];
1221         if (parent) {
1222                 struct btrfs_shared_data_ref *ref;
1223                 ref = btrfs_item_ptr(leaf, path->slots[0],
1224                                      struct btrfs_shared_data_ref);
1225                 if (ret == 0) {
1226                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1227                 } else {
1228                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1229                         num_refs += refs_to_add;
1230                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1231                 }
1232         } else {
1233                 struct btrfs_extent_data_ref *ref;
1234                 while (ret == -EEXIST) {
1235                         ref = btrfs_item_ptr(leaf, path->slots[0],
1236                                              struct btrfs_extent_data_ref);
1237                         if (match_extent_data_ref(leaf, ref, root_objectid,
1238                                                   owner, offset))
1239                                 break;
1240                         btrfs_release_path(path);
1241                         key.offset++;
1242                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1243                                                       size);
1244                         if (ret && ret != -EEXIST)
1245                                 goto fail;
1246
1247                         leaf = path->nodes[0];
1248                 }
1249                 ref = btrfs_item_ptr(leaf, path->slots[0],
1250                                      struct btrfs_extent_data_ref);
1251                 if (ret == 0) {
1252                         btrfs_set_extent_data_ref_root(leaf, ref,
1253                                                        root_objectid);
1254                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1255                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1256                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1257                 } else {
1258                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1259                         num_refs += refs_to_add;
1260                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1261                 }
1262         }
1263         btrfs_mark_buffer_dirty(leaf);
1264         ret = 0;
1265 fail:
1266         btrfs_release_path(path);
1267         return ret;
1268 }
1269
1270 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1271                                            struct btrfs_root *root,
1272                                            struct btrfs_path *path,
1273                                            int refs_to_drop)
1274 {
1275         struct btrfs_key key;
1276         struct btrfs_extent_data_ref *ref1 = NULL;
1277         struct btrfs_shared_data_ref *ref2 = NULL;
1278         struct extent_buffer *leaf;
1279         u32 num_refs = 0;
1280         int ret = 0;
1281
1282         leaf = path->nodes[0];
1283         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1284
1285         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1286                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1287                                       struct btrfs_extent_data_ref);
1288                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1289         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1290                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1291                                       struct btrfs_shared_data_ref);
1292                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1293 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1294         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1295                 struct btrfs_extent_ref_v0 *ref0;
1296                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1297                                       struct btrfs_extent_ref_v0);
1298                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1299 #endif
1300         } else {
1301                 BUG();
1302         }
1303
1304         BUG_ON(num_refs < refs_to_drop);
1305         num_refs -= refs_to_drop;
1306
1307         if (num_refs == 0) {
1308                 ret = btrfs_del_item(trans, root, path);
1309         } else {
1310                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1311                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1312                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1313                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1314 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1315                 else {
1316                         struct btrfs_extent_ref_v0 *ref0;
1317                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1318                                         struct btrfs_extent_ref_v0);
1319                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1320                 }
1321 #endif
1322                 btrfs_mark_buffer_dirty(leaf);
1323         }
1324         return ret;
1325 }
1326
1327 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1328                                           struct btrfs_path *path,
1329                                           struct btrfs_extent_inline_ref *iref)
1330 {
1331         struct btrfs_key key;
1332         struct extent_buffer *leaf;
1333         struct btrfs_extent_data_ref *ref1;
1334         struct btrfs_shared_data_ref *ref2;
1335         u32 num_refs = 0;
1336
1337         leaf = path->nodes[0];
1338         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1339         if (iref) {
1340                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1341                     BTRFS_EXTENT_DATA_REF_KEY) {
1342                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1343                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1344                 } else {
1345                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1346                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1347                 }
1348         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1349                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1350                                       struct btrfs_extent_data_ref);
1351                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1352         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1353                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1354                                       struct btrfs_shared_data_ref);
1355                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1356 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1357         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1358                 struct btrfs_extent_ref_v0 *ref0;
1359                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1360                                       struct btrfs_extent_ref_v0);
1361                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1362 #endif
1363         } else {
1364                 WARN_ON(1);
1365         }
1366         return num_refs;
1367 }
1368
1369 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1370                                           struct btrfs_root *root,
1371                                           struct btrfs_path *path,
1372                                           u64 bytenr, u64 parent,
1373                                           u64 root_objectid)
1374 {
1375         struct btrfs_key key;
1376         int ret;
1377
1378         key.objectid = bytenr;
1379         if (parent) {
1380                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1381                 key.offset = parent;
1382         } else {
1383                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1384                 key.offset = root_objectid;
1385         }
1386
1387         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1388         if (ret > 0)
1389                 ret = -ENOENT;
1390 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1391         if (ret == -ENOENT && parent) {
1392                 btrfs_release_path(path);
1393                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1394                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1395                 if (ret > 0)
1396                         ret = -ENOENT;
1397         }
1398 #endif
1399         return ret;
1400 }
1401
1402 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1403                                           struct btrfs_root *root,
1404                                           struct btrfs_path *path,
1405                                           u64 bytenr, u64 parent,
1406                                           u64 root_objectid)
1407 {
1408         struct btrfs_key key;
1409         int ret;
1410
1411         key.objectid = bytenr;
1412         if (parent) {
1413                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1414                 key.offset = parent;
1415         } else {
1416                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1417                 key.offset = root_objectid;
1418         }
1419
1420         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1421         btrfs_release_path(path);
1422         return ret;
1423 }
1424
1425 static inline int extent_ref_type(u64 parent, u64 owner)
1426 {
1427         int type;
1428         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1429                 if (parent > 0)
1430                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1431                 else
1432                         type = BTRFS_TREE_BLOCK_REF_KEY;
1433         } else {
1434                 if (parent > 0)
1435                         type = BTRFS_SHARED_DATA_REF_KEY;
1436                 else
1437                         type = BTRFS_EXTENT_DATA_REF_KEY;
1438         }
1439         return type;
1440 }
1441
1442 static int find_next_key(struct btrfs_path *path, int level,
1443                          struct btrfs_key *key)
1444
1445 {
1446         for (; level < BTRFS_MAX_LEVEL; level++) {
1447                 if (!path->nodes[level])
1448                         break;
1449                 if (path->slots[level] + 1 >=
1450                     btrfs_header_nritems(path->nodes[level]))
1451                         continue;
1452                 if (level == 0)
1453                         btrfs_item_key_to_cpu(path->nodes[level], key,
1454                                               path->slots[level] + 1);
1455                 else
1456                         btrfs_node_key_to_cpu(path->nodes[level], key,
1457                                               path->slots[level] + 1);
1458                 return 0;
1459         }
1460         return 1;
1461 }
1462
1463 /*
1464  * look for inline back ref. if back ref is found, *ref_ret is set
1465  * to the address of inline back ref, and 0 is returned.
1466  *
1467  * if back ref isn't found, *ref_ret is set to the address where it
1468  * should be inserted, and -ENOENT is returned.
1469  *
1470  * if insert is true and there are too many inline back refs, the path
1471  * points to the extent item, and -EAGAIN is returned.
1472  *
1473  * NOTE: inline back refs are ordered in the same way that back ref
1474  *       items in the tree are ordered.
1475  */
1476 static noinline_for_stack
1477 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1478                                  struct btrfs_root *root,
1479                                  struct btrfs_path *path,
1480                                  struct btrfs_extent_inline_ref **ref_ret,
1481                                  u64 bytenr, u64 num_bytes,
1482                                  u64 parent, u64 root_objectid,
1483                                  u64 owner, u64 offset, int insert)
1484 {
1485         struct btrfs_key key;
1486         struct extent_buffer *leaf;
1487         struct btrfs_extent_item *ei;
1488         struct btrfs_extent_inline_ref *iref;
1489         u64 flags;
1490         u64 item_size;
1491         unsigned long ptr;
1492         unsigned long end;
1493         int extra_size;
1494         int type;
1495         int want;
1496         int ret;
1497         int err = 0;
1498         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1499                                                  SKINNY_METADATA);
1500
1501         key.objectid = bytenr;
1502         key.type = BTRFS_EXTENT_ITEM_KEY;
1503         key.offset = num_bytes;
1504
1505         want = extent_ref_type(parent, owner);
1506         if (insert) {
1507                 extra_size = btrfs_extent_inline_ref_size(want);
1508                 path->keep_locks = 1;
1509         } else
1510                 extra_size = -1;
1511
1512         /*
1513          * Owner is our parent level, so we can just add one to get the level
1514          * for the block we are interested in.
1515          */
1516         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1517                 key.type = BTRFS_METADATA_ITEM_KEY;
1518                 key.offset = owner;
1519         }
1520
1521 again:
1522         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1523         if (ret < 0) {
1524                 err = ret;
1525                 goto out;
1526         }
1527
1528         /*
1529          * We may be a newly converted file system which still has the old fat
1530          * extent entries for metadata, so try and see if we have one of those.
1531          */
1532         if (ret > 0 && skinny_metadata) {
1533                 skinny_metadata = false;
1534                 if (path->slots[0]) {
1535                         path->slots[0]--;
1536                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1537                                               path->slots[0]);
1538                         if (key.objectid == bytenr &&
1539                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1540                             key.offset == num_bytes)
1541                                 ret = 0;
1542                 }
1543                 if (ret) {
1544                         key.type = BTRFS_EXTENT_ITEM_KEY;
1545                         key.offset = num_bytes;
1546                         btrfs_release_path(path);
1547                         goto again;
1548                 }
1549         }
1550
1551         if (ret && !insert) {
1552                 err = -ENOENT;
1553                 goto out;
1554         } else if (ret) {
1555                 err = -EIO;
1556                 WARN_ON(1);
1557                 goto out;
1558         }
1559
1560         leaf = path->nodes[0];
1561         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1562 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1563         if (item_size < sizeof(*ei)) {
1564                 if (!insert) {
1565                         err = -ENOENT;
1566                         goto out;
1567                 }
1568                 ret = convert_extent_item_v0(trans, root, path, owner,
1569                                              extra_size);
1570                 if (ret < 0) {
1571                         err = ret;
1572                         goto out;
1573                 }
1574                 leaf = path->nodes[0];
1575                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1576         }
1577 #endif
1578         BUG_ON(item_size < sizeof(*ei));
1579
1580         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1581         flags = btrfs_extent_flags(leaf, ei);
1582
1583         ptr = (unsigned long)(ei + 1);
1584         end = (unsigned long)ei + item_size;
1585
1586         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1587                 ptr += sizeof(struct btrfs_tree_block_info);
1588                 BUG_ON(ptr > end);
1589         }
1590
1591         err = -ENOENT;
1592         while (1) {
1593                 if (ptr >= end) {
1594                         WARN_ON(ptr > end);
1595                         break;
1596                 }
1597                 iref = (struct btrfs_extent_inline_ref *)ptr;
1598                 type = btrfs_extent_inline_ref_type(leaf, iref);
1599                 if (want < type)
1600                         break;
1601                 if (want > type) {
1602                         ptr += btrfs_extent_inline_ref_size(type);
1603                         continue;
1604                 }
1605
1606                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1607                         struct btrfs_extent_data_ref *dref;
1608                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1609                         if (match_extent_data_ref(leaf, dref, root_objectid,
1610                                                   owner, offset)) {
1611                                 err = 0;
1612                                 break;
1613                         }
1614                         if (hash_extent_data_ref_item(leaf, dref) <
1615                             hash_extent_data_ref(root_objectid, owner, offset))
1616                                 break;
1617                 } else {
1618                         u64 ref_offset;
1619                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1620                         if (parent > 0) {
1621                                 if (parent == ref_offset) {
1622                                         err = 0;
1623                                         break;
1624                                 }
1625                                 if (ref_offset < parent)
1626                                         break;
1627                         } else {
1628                                 if (root_objectid == ref_offset) {
1629                                         err = 0;
1630                                         break;
1631                                 }
1632                                 if (ref_offset < root_objectid)
1633                                         break;
1634                         }
1635                 }
1636                 ptr += btrfs_extent_inline_ref_size(type);
1637         }
1638         if (err == -ENOENT && insert) {
1639                 if (item_size + extra_size >=
1640                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1641                         err = -EAGAIN;
1642                         goto out;
1643                 }
1644                 /*
1645                  * To add new inline back ref, we have to make sure
1646                  * there is no corresponding back ref item.
1647                  * For simplicity, we just do not add new inline back
1648                  * ref if there is any kind of item for this block
1649                  */
1650                 if (find_next_key(path, 0, &key) == 0 &&
1651                     key.objectid == bytenr &&
1652                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1653                         err = -EAGAIN;
1654                         goto out;
1655                 }
1656         }
1657         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1658 out:
1659         if (insert) {
1660                 path->keep_locks = 0;
1661                 btrfs_unlock_up_safe(path, 1);
1662         }
1663         return err;
1664 }
1665
1666 /*
1667  * helper to add new inline back ref
1668  */
1669 static noinline_for_stack
1670 void setup_inline_extent_backref(struct btrfs_root *root,
1671                                  struct btrfs_path *path,
1672                                  struct btrfs_extent_inline_ref *iref,
1673                                  u64 parent, u64 root_objectid,
1674                                  u64 owner, u64 offset, int refs_to_add,
1675                                  struct btrfs_delayed_extent_op *extent_op)
1676 {
1677         struct extent_buffer *leaf;
1678         struct btrfs_extent_item *ei;
1679         unsigned long ptr;
1680         unsigned long end;
1681         unsigned long item_offset;
1682         u64 refs;
1683         int size;
1684         int type;
1685
1686         leaf = path->nodes[0];
1687         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1688         item_offset = (unsigned long)iref - (unsigned long)ei;
1689
1690         type = extent_ref_type(parent, owner);
1691         size = btrfs_extent_inline_ref_size(type);
1692
1693         btrfs_extend_item(root, path, size);
1694
1695         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1696         refs = btrfs_extent_refs(leaf, ei);
1697         refs += refs_to_add;
1698         btrfs_set_extent_refs(leaf, ei, refs);
1699         if (extent_op)
1700                 __run_delayed_extent_op(extent_op, leaf, ei);
1701
1702         ptr = (unsigned long)ei + item_offset;
1703         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1704         if (ptr < end - size)
1705                 memmove_extent_buffer(leaf, ptr + size, ptr,
1706                                       end - size - ptr);
1707
1708         iref = (struct btrfs_extent_inline_ref *)ptr;
1709         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1710         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1711                 struct btrfs_extent_data_ref *dref;
1712                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1713                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1714                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1715                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1716                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1717         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1718                 struct btrfs_shared_data_ref *sref;
1719                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1720                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1721                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1722         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1723                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1724         } else {
1725                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1726         }
1727         btrfs_mark_buffer_dirty(leaf);
1728 }
1729
1730 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1731                                  struct btrfs_root *root,
1732                                  struct btrfs_path *path,
1733                                  struct btrfs_extent_inline_ref **ref_ret,
1734                                  u64 bytenr, u64 num_bytes, u64 parent,
1735                                  u64 root_objectid, u64 owner, u64 offset)
1736 {
1737         int ret;
1738
1739         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1740                                            bytenr, num_bytes, parent,
1741                                            root_objectid, owner, offset, 0);
1742         if (ret != -ENOENT)
1743                 return ret;
1744
1745         btrfs_release_path(path);
1746         *ref_ret = NULL;
1747
1748         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1749                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1750                                             root_objectid);
1751         } else {
1752                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1753                                              root_objectid, owner, offset);
1754         }
1755         return ret;
1756 }
1757
1758 /*
1759  * helper to update/remove inline back ref
1760  */
1761 static noinline_for_stack
1762 void update_inline_extent_backref(struct btrfs_root *root,
1763                                   struct btrfs_path *path,
1764                                   struct btrfs_extent_inline_ref *iref,
1765                                   int refs_to_mod,
1766                                   struct btrfs_delayed_extent_op *extent_op)
1767 {
1768         struct extent_buffer *leaf;
1769         struct btrfs_extent_item *ei;
1770         struct btrfs_extent_data_ref *dref = NULL;
1771         struct btrfs_shared_data_ref *sref = NULL;
1772         unsigned long ptr;
1773         unsigned long end;
1774         u32 item_size;
1775         int size;
1776         int type;
1777         u64 refs;
1778
1779         leaf = path->nodes[0];
1780         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1781         refs = btrfs_extent_refs(leaf, ei);
1782         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1783         refs += refs_to_mod;
1784         btrfs_set_extent_refs(leaf, ei, refs);
1785         if (extent_op)
1786                 __run_delayed_extent_op(extent_op, leaf, ei);
1787
1788         type = btrfs_extent_inline_ref_type(leaf, iref);
1789
1790         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1791                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1792                 refs = btrfs_extent_data_ref_count(leaf, dref);
1793         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1794                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1795                 refs = btrfs_shared_data_ref_count(leaf, sref);
1796         } else {
1797                 refs = 1;
1798                 BUG_ON(refs_to_mod != -1);
1799         }
1800
1801         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1802         refs += refs_to_mod;
1803
1804         if (refs > 0) {
1805                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1806                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1807                 else
1808                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1809         } else {
1810                 size =  btrfs_extent_inline_ref_size(type);
1811                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1812                 ptr = (unsigned long)iref;
1813                 end = (unsigned long)ei + item_size;
1814                 if (ptr + size < end)
1815                         memmove_extent_buffer(leaf, ptr, ptr + size,
1816                                               end - ptr - size);
1817                 item_size -= size;
1818                 btrfs_truncate_item(root, path, item_size, 1);
1819         }
1820         btrfs_mark_buffer_dirty(leaf);
1821 }
1822
1823 static noinline_for_stack
1824 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1825                                  struct btrfs_root *root,
1826                                  struct btrfs_path *path,
1827                                  u64 bytenr, u64 num_bytes, u64 parent,
1828                                  u64 root_objectid, u64 owner,
1829                                  u64 offset, int refs_to_add,
1830                                  struct btrfs_delayed_extent_op *extent_op)
1831 {
1832         struct btrfs_extent_inline_ref *iref;
1833         int ret;
1834
1835         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1836                                            bytenr, num_bytes, parent,
1837                                            root_objectid, owner, offset, 1);
1838         if (ret == 0) {
1839                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1840                 update_inline_extent_backref(root, path, iref,
1841                                              refs_to_add, extent_op);
1842         } else if (ret == -ENOENT) {
1843                 setup_inline_extent_backref(root, path, iref, parent,
1844                                             root_objectid, owner, offset,
1845                                             refs_to_add, extent_op);
1846                 ret = 0;
1847         }
1848         return ret;
1849 }
1850
1851 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1852                                  struct btrfs_root *root,
1853                                  struct btrfs_path *path,
1854                                  u64 bytenr, u64 parent, u64 root_objectid,
1855                                  u64 owner, u64 offset, int refs_to_add)
1856 {
1857         int ret;
1858         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1859                 BUG_ON(refs_to_add != 1);
1860                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1861                                             parent, root_objectid);
1862         } else {
1863                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1864                                              parent, root_objectid,
1865                                              owner, offset, refs_to_add);
1866         }
1867         return ret;
1868 }
1869
1870 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1871                                  struct btrfs_root *root,
1872                                  struct btrfs_path *path,
1873                                  struct btrfs_extent_inline_ref *iref,
1874                                  int refs_to_drop, int is_data)
1875 {
1876         int ret = 0;
1877
1878         BUG_ON(!is_data && refs_to_drop != 1);
1879         if (iref) {
1880                 update_inline_extent_backref(root, path, iref,
1881                                              -refs_to_drop, NULL);
1882         } else if (is_data) {
1883                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1884         } else {
1885                 ret = btrfs_del_item(trans, root, path);
1886         }
1887         return ret;
1888 }
1889
1890 static int btrfs_issue_discard(struct block_device *bdev,
1891                                 u64 start, u64 len)
1892 {
1893         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1894 }
1895
1896 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1897                                 u64 num_bytes, u64 *actual_bytes)
1898 {
1899         int ret;
1900         u64 discarded_bytes = 0;
1901         struct btrfs_bio *bbio = NULL;
1902
1903
1904         /* Tell the block device(s) that the sectors can be discarded */
1905         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1906                               bytenr, &num_bytes, &bbio, 0);
1907         /* Error condition is -ENOMEM */
1908         if (!ret) {
1909                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1910                 int i;
1911
1912
1913                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1914                         if (!stripe->dev->can_discard)
1915                                 continue;
1916
1917                         ret = btrfs_issue_discard(stripe->dev->bdev,
1918                                                   stripe->physical,
1919                                                   stripe->length);
1920                         if (!ret)
1921                                 discarded_bytes += stripe->length;
1922                         else if (ret != -EOPNOTSUPP)
1923                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1924
1925                         /*
1926                          * Just in case we get back EOPNOTSUPP for some reason,
1927                          * just ignore the return value so we don't screw up
1928                          * people calling discard_extent.
1929                          */
1930                         ret = 0;
1931                 }
1932                 kfree(bbio);
1933         }
1934
1935         if (actual_bytes)
1936                 *actual_bytes = discarded_bytes;
1937
1938
1939         if (ret == -EOPNOTSUPP)
1940                 ret = 0;
1941         return ret;
1942 }
1943
1944 /* Can return -ENOMEM */
1945 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1946                          struct btrfs_root *root,
1947                          u64 bytenr, u64 num_bytes, u64 parent,
1948                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1949 {
1950         int ret;
1951         struct btrfs_fs_info *fs_info = root->fs_info;
1952
1953         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1954                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1955
1956         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1957                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1958                                         num_bytes,
1959                                         parent, root_objectid, (int)owner,
1960                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1961         } else {
1962                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1963                                         num_bytes,
1964                                         parent, root_objectid, owner, offset,
1965                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1966         }
1967         return ret;
1968 }
1969
1970 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1971                                   struct btrfs_root *root,
1972                                   u64 bytenr, u64 num_bytes,
1973                                   u64 parent, u64 root_objectid,
1974                                   u64 owner, u64 offset, int refs_to_add,
1975                                   struct btrfs_delayed_extent_op *extent_op)
1976 {
1977         struct btrfs_path *path;
1978         struct extent_buffer *leaf;
1979         struct btrfs_extent_item *item;
1980         u64 refs;
1981         int ret;
1982         int err = 0;
1983
1984         path = btrfs_alloc_path();
1985         if (!path)
1986                 return -ENOMEM;
1987
1988         path->reada = 1;
1989         path->leave_spinning = 1;
1990         /* this will setup the path even if it fails to insert the back ref */
1991         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1992                                            path, bytenr, num_bytes, parent,
1993                                            root_objectid, owner, offset,
1994                                            refs_to_add, extent_op);
1995         if (ret == 0)
1996                 goto out;
1997
1998         if (ret != -EAGAIN) {
1999                 err = ret;
2000                 goto out;
2001         }
2002
2003         leaf = path->nodes[0];
2004         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2005         refs = btrfs_extent_refs(leaf, item);
2006         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2007         if (extent_op)
2008                 __run_delayed_extent_op(extent_op, leaf, item);
2009
2010         btrfs_mark_buffer_dirty(leaf);
2011         btrfs_release_path(path);
2012
2013         path->reada = 1;
2014         path->leave_spinning = 1;
2015
2016         /* now insert the actual backref */
2017         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2018                                     path, bytenr, parent, root_objectid,
2019                                     owner, offset, refs_to_add);
2020         if (ret)
2021                 btrfs_abort_transaction(trans, root, ret);
2022 out:
2023         btrfs_free_path(path);
2024         return err;
2025 }
2026
2027 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2028                                 struct btrfs_root *root,
2029                                 struct btrfs_delayed_ref_node *node,
2030                                 struct btrfs_delayed_extent_op *extent_op,
2031                                 int insert_reserved)
2032 {
2033         int ret = 0;
2034         struct btrfs_delayed_data_ref *ref;
2035         struct btrfs_key ins;
2036         u64 parent = 0;
2037         u64 ref_root = 0;
2038         u64 flags = 0;
2039
2040         ins.objectid = node->bytenr;
2041         ins.offset = node->num_bytes;
2042         ins.type = BTRFS_EXTENT_ITEM_KEY;
2043
2044         ref = btrfs_delayed_node_to_data_ref(node);
2045         trace_run_delayed_data_ref(node, ref, node->action);
2046
2047         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2048                 parent = ref->parent;
2049         else
2050                 ref_root = ref->root;
2051
2052         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2053                 if (extent_op)
2054                         flags |= extent_op->flags_to_set;
2055                 ret = alloc_reserved_file_extent(trans, root,
2056                                                  parent, ref_root, flags,
2057                                                  ref->objectid, ref->offset,
2058                                                  &ins, node->ref_mod);
2059         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2060                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2061                                              node->num_bytes, parent,
2062                                              ref_root, ref->objectid,
2063                                              ref->offset, node->ref_mod,
2064                                              extent_op);
2065         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2066                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2067                                           node->num_bytes, parent,
2068                                           ref_root, ref->objectid,
2069                                           ref->offset, node->ref_mod,
2070                                           extent_op);
2071         } else {
2072                 BUG();
2073         }
2074         return ret;
2075 }
2076
2077 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2078                                     struct extent_buffer *leaf,
2079                                     struct btrfs_extent_item *ei)
2080 {
2081         u64 flags = btrfs_extent_flags(leaf, ei);
2082         if (extent_op->update_flags) {
2083                 flags |= extent_op->flags_to_set;
2084                 btrfs_set_extent_flags(leaf, ei, flags);
2085         }
2086
2087         if (extent_op->update_key) {
2088                 struct btrfs_tree_block_info *bi;
2089                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2090                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2091                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2092         }
2093 }
2094
2095 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2096                                  struct btrfs_root *root,
2097                                  struct btrfs_delayed_ref_node *node,
2098                                  struct btrfs_delayed_extent_op *extent_op)
2099 {
2100         struct btrfs_key key;
2101         struct btrfs_path *path;
2102         struct btrfs_extent_item *ei;
2103         struct extent_buffer *leaf;
2104         u32 item_size;
2105         int ret;
2106         int err = 0;
2107         int metadata = !extent_op->is_data;
2108
2109         if (trans->aborted)
2110                 return 0;
2111
2112         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2113                 metadata = 0;
2114
2115         path = btrfs_alloc_path();
2116         if (!path)
2117                 return -ENOMEM;
2118
2119         key.objectid = node->bytenr;
2120
2121         if (metadata) {
2122                 key.type = BTRFS_METADATA_ITEM_KEY;
2123                 key.offset = extent_op->level;
2124         } else {
2125                 key.type = BTRFS_EXTENT_ITEM_KEY;
2126                 key.offset = node->num_bytes;
2127         }
2128
2129 again:
2130         path->reada = 1;
2131         path->leave_spinning = 1;
2132         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2133                                 path, 0, 1);
2134         if (ret < 0) {
2135                 err = ret;
2136                 goto out;
2137         }
2138         if (ret > 0) {
2139                 if (metadata) {
2140                         btrfs_release_path(path);
2141                         metadata = 0;
2142
2143                         key.offset = node->num_bytes;
2144                         key.type = BTRFS_EXTENT_ITEM_KEY;
2145                         goto again;
2146                 }
2147                 err = -EIO;
2148                 goto out;
2149         }
2150
2151         leaf = path->nodes[0];
2152         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2153 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2154         if (item_size < sizeof(*ei)) {
2155                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2156                                              path, (u64)-1, 0);
2157                 if (ret < 0) {
2158                         err = ret;
2159                         goto out;
2160                 }
2161                 leaf = path->nodes[0];
2162                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2163         }
2164 #endif
2165         BUG_ON(item_size < sizeof(*ei));
2166         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2167         __run_delayed_extent_op(extent_op, leaf, ei);
2168
2169         btrfs_mark_buffer_dirty(leaf);
2170 out:
2171         btrfs_free_path(path);
2172         return err;
2173 }
2174
2175 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2176                                 struct btrfs_root *root,
2177                                 struct btrfs_delayed_ref_node *node,
2178                                 struct btrfs_delayed_extent_op *extent_op,
2179                                 int insert_reserved)
2180 {
2181         int ret = 0;
2182         struct btrfs_delayed_tree_ref *ref;
2183         struct btrfs_key ins;
2184         u64 parent = 0;
2185         u64 ref_root = 0;
2186         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2187                                                  SKINNY_METADATA);
2188
2189         ref = btrfs_delayed_node_to_tree_ref(node);
2190         trace_run_delayed_tree_ref(node, ref, node->action);
2191
2192         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2193                 parent = ref->parent;
2194         else
2195                 ref_root = ref->root;
2196
2197         ins.objectid = node->bytenr;
2198         if (skinny_metadata) {
2199                 ins.offset = ref->level;
2200                 ins.type = BTRFS_METADATA_ITEM_KEY;
2201         } else {
2202                 ins.offset = node->num_bytes;
2203                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2204         }
2205
2206         BUG_ON(node->ref_mod != 1);
2207         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2208                 BUG_ON(!extent_op || !extent_op->update_flags);
2209                 ret = alloc_reserved_tree_block(trans, root,
2210                                                 parent, ref_root,
2211                                                 extent_op->flags_to_set,
2212                                                 &extent_op->key,
2213                                                 ref->level, &ins);
2214         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2215                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2216                                              node->num_bytes, parent, ref_root,
2217                                              ref->level, 0, 1, extent_op);
2218         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2219                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2220                                           node->num_bytes, parent, ref_root,
2221                                           ref->level, 0, 1, extent_op);
2222         } else {
2223                 BUG();
2224         }
2225         return ret;
2226 }
2227
2228 /* helper function to actually process a single delayed ref entry */
2229 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2230                                struct btrfs_root *root,
2231                                struct btrfs_delayed_ref_node *node,
2232                                struct btrfs_delayed_extent_op *extent_op,
2233                                int insert_reserved)
2234 {
2235         int ret = 0;
2236
2237         if (trans->aborted)
2238                 return 0;
2239
2240         if (btrfs_delayed_ref_is_head(node)) {
2241                 struct btrfs_delayed_ref_head *head;
2242                 /*
2243                  * we've hit the end of the chain and we were supposed
2244                  * to insert this extent into the tree.  But, it got
2245                  * deleted before we ever needed to insert it, so all
2246                  * we have to do is clean up the accounting
2247                  */
2248                 BUG_ON(extent_op);
2249                 head = btrfs_delayed_node_to_head(node);
2250                 trace_run_delayed_ref_head(node, head, node->action);
2251
2252                 if (insert_reserved) {
2253                         btrfs_pin_extent(root, node->bytenr,
2254                                          node->num_bytes, 1);
2255                         if (head->is_data) {
2256                                 ret = btrfs_del_csums(trans, root,
2257                                                       node->bytenr,
2258                                                       node->num_bytes);
2259                         }
2260                 }
2261                 return ret;
2262         }
2263
2264         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2265             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2266                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2267                                            insert_reserved);
2268         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2269                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2270                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2271                                            insert_reserved);
2272         else
2273                 BUG();
2274         return ret;
2275 }
2276
2277 static noinline struct btrfs_delayed_ref_node *
2278 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2279 {
2280         struct rb_node *node;
2281         struct btrfs_delayed_ref_node *ref;
2282         int action = BTRFS_ADD_DELAYED_REF;
2283 again:
2284         /*
2285          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2286          * this prevents ref count from going down to zero when
2287          * there still are pending delayed ref.
2288          */
2289         node = rb_prev(&head->node.rb_node);
2290         while (1) {
2291                 if (!node)
2292                         break;
2293                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2294                                 rb_node);
2295                 if (ref->bytenr != head->node.bytenr)
2296                         break;
2297                 if (ref->action == action)
2298                         return ref;
2299                 node = rb_prev(node);
2300         }
2301         if (action == BTRFS_ADD_DELAYED_REF) {
2302                 action = BTRFS_DROP_DELAYED_REF;
2303                 goto again;
2304         }
2305         return NULL;
2306 }
2307
2308 /*
2309  * Returns 0 on success or if called with an already aborted transaction.
2310  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2311  */
2312 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2313                                        struct btrfs_root *root,
2314                                        struct list_head *cluster)
2315 {
2316         struct btrfs_delayed_ref_root *delayed_refs;
2317         struct btrfs_delayed_ref_node *ref;
2318         struct btrfs_delayed_ref_head *locked_ref = NULL;
2319         struct btrfs_delayed_extent_op *extent_op;
2320         struct btrfs_fs_info *fs_info = root->fs_info;
2321         int ret;
2322         int count = 0;
2323         int must_insert_reserved = 0;
2324
2325         delayed_refs = &trans->transaction->delayed_refs;
2326         while (1) {
2327                 if (!locked_ref) {
2328                         /* pick a new head ref from the cluster list */
2329                         if (list_empty(cluster))
2330                                 break;
2331
2332                         locked_ref = list_entry(cluster->next,
2333                                      struct btrfs_delayed_ref_head, cluster);
2334
2335                         /* grab the lock that says we are going to process
2336                          * all the refs for this head */
2337                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2338
2339                         /*
2340                          * we may have dropped the spin lock to get the head
2341                          * mutex lock, and that might have given someone else
2342                          * time to free the head.  If that's true, it has been
2343                          * removed from our list and we can move on.
2344                          */
2345                         if (ret == -EAGAIN) {
2346                                 locked_ref = NULL;
2347                                 count++;
2348                                 continue;
2349                         }
2350                 }
2351
2352                 /*
2353                  * We need to try and merge add/drops of the same ref since we
2354                  * can run into issues with relocate dropping the implicit ref
2355                  * and then it being added back again before the drop can
2356                  * finish.  If we merged anything we need to re-loop so we can
2357                  * get a good ref.
2358                  */
2359                 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2360                                          locked_ref);
2361
2362                 /*
2363                  * locked_ref is the head node, so we have to go one
2364                  * node back for any delayed ref updates
2365                  */
2366                 ref = select_delayed_ref(locked_ref);
2367
2368                 if (ref && ref->seq &&
2369                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2370                         /*
2371                          * there are still refs with lower seq numbers in the
2372                          * process of being added. Don't run this ref yet.
2373                          */
2374                         list_del_init(&locked_ref->cluster);
2375                         btrfs_delayed_ref_unlock(locked_ref);
2376                         locked_ref = NULL;
2377                         delayed_refs->num_heads_ready++;
2378                         spin_unlock(&delayed_refs->lock);
2379                         cond_resched();
2380                         spin_lock(&delayed_refs->lock);
2381                         continue;
2382                 }
2383
2384                 /*
2385                  * record the must insert reserved flag before we
2386                  * drop the spin lock.
2387                  */
2388                 must_insert_reserved = locked_ref->must_insert_reserved;
2389                 locked_ref->must_insert_reserved = 0;
2390
2391                 extent_op = locked_ref->extent_op;
2392                 locked_ref->extent_op = NULL;
2393
2394                 if (!ref) {
2395                         /* All delayed refs have been processed, Go ahead
2396                          * and send the head node to run_one_delayed_ref,
2397                          * so that any accounting fixes can happen
2398                          */
2399                         ref = &locked_ref->node;
2400
2401                         if (extent_op && must_insert_reserved) {
2402                                 btrfs_free_delayed_extent_op(extent_op);
2403                                 extent_op = NULL;
2404                         }
2405
2406                         if (extent_op) {
2407                                 spin_unlock(&delayed_refs->lock);
2408
2409                                 ret = run_delayed_extent_op(trans, root,
2410                                                             ref, extent_op);
2411                                 btrfs_free_delayed_extent_op(extent_op);
2412
2413                                 if (ret) {
2414                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2415                                         spin_lock(&delayed_refs->lock);
2416                                         btrfs_delayed_ref_unlock(locked_ref);
2417                                         return ret;
2418                                 }
2419
2420                                 goto next;
2421                         }
2422                 }
2423
2424                 ref->in_tree = 0;
2425                 rb_erase(&ref->rb_node, &delayed_refs->root);
2426                 delayed_refs->num_entries--;
2427                 if (!btrfs_delayed_ref_is_head(ref)) {
2428                         /*
2429                          * when we play the delayed ref, also correct the
2430                          * ref_mod on head
2431                          */
2432                         switch (ref->action) {
2433                         case BTRFS_ADD_DELAYED_REF:
2434                         case BTRFS_ADD_DELAYED_EXTENT:
2435                                 locked_ref->node.ref_mod -= ref->ref_mod;
2436                                 break;
2437                         case BTRFS_DROP_DELAYED_REF:
2438                                 locked_ref->node.ref_mod += ref->ref_mod;
2439                                 break;
2440                         default:
2441                                 WARN_ON(1);
2442                         }
2443                 } else {
2444                         list_del_init(&locked_ref->cluster);
2445                 }
2446                 spin_unlock(&delayed_refs->lock);
2447
2448                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2449                                           must_insert_reserved);
2450
2451                 btrfs_free_delayed_extent_op(extent_op);
2452                 if (ret) {
2453                         btrfs_delayed_ref_unlock(locked_ref);
2454                         btrfs_put_delayed_ref(ref);
2455                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2456                         spin_lock(&delayed_refs->lock);
2457                         return ret;
2458                 }
2459
2460                 /*
2461                  * If this node is a head, that means all the refs in this head
2462                  * have been dealt with, and we will pick the next head to deal
2463                  * with, so we must unlock the head and drop it from the cluster
2464                  * list before we release it.
2465                  */
2466                 if (btrfs_delayed_ref_is_head(ref)) {
2467                         btrfs_delayed_ref_unlock(locked_ref);
2468                         locked_ref = NULL;
2469                 }
2470                 btrfs_put_delayed_ref(ref);
2471                 count++;
2472 next:
2473                 cond_resched();
2474                 spin_lock(&delayed_refs->lock);
2475         }
2476         return count;
2477 }
2478
2479 #ifdef SCRAMBLE_DELAYED_REFS
2480 /*
2481  * Normally delayed refs get processed in ascending bytenr order. This
2482  * correlates in most cases to the order added. To expose dependencies on this
2483  * order, we start to process the tree in the middle instead of the beginning
2484  */
2485 static u64 find_middle(struct rb_root *root)
2486 {
2487         struct rb_node *n = root->rb_node;
2488         struct btrfs_delayed_ref_node *entry;
2489         int alt = 1;
2490         u64 middle;
2491         u64 first = 0, last = 0;
2492
2493         n = rb_first(root);
2494         if (n) {
2495                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2496                 first = entry->bytenr;
2497         }
2498         n = rb_last(root);
2499         if (n) {
2500                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2501                 last = entry->bytenr;
2502         }
2503         n = root->rb_node;
2504
2505         while (n) {
2506                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2507                 WARN_ON(!entry->in_tree);
2508
2509                 middle = entry->bytenr;
2510
2511                 if (alt)
2512                         n = n->rb_left;
2513                 else
2514                         n = n->rb_right;
2515
2516                 alt = 1 - alt;
2517         }
2518         return middle;
2519 }
2520 #endif
2521
2522 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
2523                                          struct btrfs_fs_info *fs_info)
2524 {
2525         struct qgroup_update *qgroup_update;
2526         int ret = 0;
2527
2528         if (list_empty(&trans->qgroup_ref_list) !=
2529             !trans->delayed_ref_elem.seq) {
2530                 /* list without seq or seq without list */
2531                 btrfs_err(fs_info,
2532                         "qgroup accounting update error, list is%s empty, seq is %#x.%x",
2533                         list_empty(&trans->qgroup_ref_list) ? "" : " not",
2534                         (u32)(trans->delayed_ref_elem.seq >> 32),
2535                         (u32)trans->delayed_ref_elem.seq);
2536                 BUG();
2537         }
2538
2539         if (!trans->delayed_ref_elem.seq)
2540                 return 0;
2541
2542         while (!list_empty(&trans->qgroup_ref_list)) {
2543                 qgroup_update = list_first_entry(&trans->qgroup_ref_list,
2544                                                  struct qgroup_update, list);
2545                 list_del(&qgroup_update->list);
2546                 if (!ret)
2547                         ret = btrfs_qgroup_account_ref(
2548                                         trans, fs_info, qgroup_update->node,
2549                                         qgroup_update->extent_op);
2550                 kfree(qgroup_update);
2551         }
2552
2553         btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
2554
2555         return ret;
2556 }
2557
2558 static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
2559                       int count)
2560 {
2561         int val = atomic_read(&delayed_refs->ref_seq);
2562
2563         if (val < seq || val >= seq + count)
2564                 return 1;
2565         return 0;
2566 }
2567
2568 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2569 {
2570         u64 num_bytes;
2571
2572         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2573                              sizeof(struct btrfs_extent_inline_ref));
2574         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2575                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2576
2577         /*
2578          * We don't ever fill up leaves all the way so multiply by 2 just to be
2579          * closer to what we're really going to want to ouse.
2580          */
2581         return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2582 }
2583
2584 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2585                                        struct btrfs_root *root)
2586 {
2587         struct btrfs_block_rsv *global_rsv;
2588         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2589         u64 num_bytes;
2590         int ret = 0;
2591
2592         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2593         num_heads = heads_to_leaves(root, num_heads);
2594         if (num_heads > 1)
2595                 num_bytes += (num_heads - 1) * root->leafsize;
2596         num_bytes <<= 1;
2597         global_rsv = &root->fs_info->global_block_rsv;
2598
2599         /*
2600          * If we can't allocate any more chunks lets make sure we have _lots_ of
2601          * wiggle room since running delayed refs can create more delayed refs.
2602          */
2603         if (global_rsv->space_info->full)
2604                 num_bytes <<= 1;
2605
2606         spin_lock(&global_rsv->lock);
2607         if (global_rsv->reserved <= num_bytes)
2608                 ret = 1;
2609         spin_unlock(&global_rsv->lock);
2610         return ret;
2611 }
2612
2613 /*
2614  * this starts processing the delayed reference count updates and
2615  * extent insertions we have queued up so far.  count can be
2616  * 0, which means to process everything in the tree at the start
2617  * of the run (but not newly added entries), or it can be some target
2618  * number you'd like to process.
2619  *
2620  * Returns 0 on success or if called with an aborted transaction
2621  * Returns <0 on error and aborts the transaction
2622  */
2623 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2624                            struct btrfs_root *root, unsigned long count)
2625 {
2626         struct rb_node *node;
2627         struct btrfs_delayed_ref_root *delayed_refs;
2628         struct btrfs_delayed_ref_node *ref;
2629         struct list_head cluster;
2630         int ret;
2631         u64 delayed_start;
2632         int run_all = count == (unsigned long)-1;
2633         int run_most = 0;
2634         int loops;
2635
2636         /* We'll clean this up in btrfs_cleanup_transaction */
2637         if (trans->aborted)
2638                 return 0;
2639
2640         if (root == root->fs_info->extent_root)
2641                 root = root->fs_info->tree_root;
2642
2643         btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
2644
2645         delayed_refs = &trans->transaction->delayed_refs;
2646         INIT_LIST_HEAD(&cluster);
2647         if (count == 0) {
2648                 count = delayed_refs->num_entries * 2;
2649                 run_most = 1;
2650         }
2651
2652         if (!run_all && !run_most) {
2653                 int old;
2654                 int seq = atomic_read(&delayed_refs->ref_seq);
2655
2656 progress:
2657                 old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2658                 if (old) {
2659                         DEFINE_WAIT(__wait);
2660                         if (delayed_refs->flushing ||
2661                             !btrfs_should_throttle_delayed_refs(trans, root))
2662                                 return 0;
2663
2664                         prepare_to_wait(&delayed_refs->wait, &__wait,
2665                                         TASK_UNINTERRUPTIBLE);
2666
2667                         old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
2668                         if (old) {
2669                                 schedule();
2670                                 finish_wait(&delayed_refs->wait, &__wait);
2671
2672                                 if (!refs_newer(delayed_refs, seq, 256))
2673                                         goto progress;
2674                                 else
2675                                         return 0;
2676                         } else {
2677                                 finish_wait(&delayed_refs->wait, &__wait);
2678                                 goto again;
2679                         }
2680                 }
2681
2682         } else {
2683                 atomic_inc(&delayed_refs->procs_running_refs);
2684         }
2685
2686 again:
2687         loops = 0;
2688         spin_lock(&delayed_refs->lock);
2689
2690 #ifdef SCRAMBLE_DELAYED_REFS
2691         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2692 #endif
2693
2694         while (1) {
2695                 if (!(run_all || run_most) &&
2696                     !btrfs_should_throttle_delayed_refs(trans, root))
2697                         break;
2698
2699                 /*
2700                  * go find something we can process in the rbtree.  We start at
2701                  * the beginning of the tree, and then build a cluster
2702                  * of refs to process starting at the first one we are able to
2703                  * lock
2704                  */
2705                 delayed_start = delayed_refs->run_delayed_start;
2706                 ret = btrfs_find_ref_cluster(trans, &cluster,
2707                                              delayed_refs->run_delayed_start);
2708                 if (ret)
2709                         break;
2710
2711                 ret = run_clustered_refs(trans, root, &cluster);
2712                 if (ret < 0) {
2713                         btrfs_release_ref_cluster(&cluster);
2714                         spin_unlock(&delayed_refs->lock);
2715                         btrfs_abort_transaction(trans, root, ret);
2716                         atomic_dec(&delayed_refs->procs_running_refs);
2717                         wake_up(&delayed_refs->wait);
2718                         return ret;
2719                 }
2720
2721                 atomic_add(ret, &delayed_refs->ref_seq);
2722
2723                 count -= min_t(unsigned long, ret, count);
2724
2725                 if (count == 0)
2726                         break;
2727
2728                 if (delayed_start >= delayed_refs->run_delayed_start) {
2729                         if (loops == 0) {
2730                                 /*
2731                                  * btrfs_find_ref_cluster looped. let's do one
2732                                  * more cycle. if we don't run any delayed ref
2733                                  * during that cycle (because we can't because
2734                                  * all of them are blocked), bail out.
2735                                  */
2736                                 loops = 1;
2737                         } else {
2738                                 /*
2739                                  * no runnable refs left, stop trying
2740                                  */
2741                                 BUG_ON(run_all);
2742                                 break;
2743                         }
2744                 }
2745                 if (ret) {
2746                         /* refs were run, let's reset staleness detection */
2747                         loops = 0;
2748                 }
2749         }
2750
2751         if (run_all) {
2752                 if (!list_empty(&trans->new_bgs)) {
2753                         spin_unlock(&delayed_refs->lock);
2754                         btrfs_create_pending_block_groups(trans, root);
2755                         spin_lock(&delayed_refs->lock);
2756                 }
2757
2758                 node = rb_first(&delayed_refs->root);
2759                 if (!node)
2760                         goto out;
2761                 count = (unsigned long)-1;
2762
2763                 while (node) {
2764                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2765                                        rb_node);
2766                         if (btrfs_delayed_ref_is_head(ref)) {
2767                                 struct btrfs_delayed_ref_head *head;
2768
2769                                 head = btrfs_delayed_node_to_head(ref);
2770                                 atomic_inc(&ref->refs);
2771
2772                                 spin_unlock(&delayed_refs->lock);
2773                                 /*
2774                                  * Mutex was contended, block until it's
2775                                  * released and try again
2776                                  */
2777                                 mutex_lock(&head->mutex);
2778                                 mutex_unlock(&head->mutex);
2779
2780                                 btrfs_put_delayed_ref(ref);
2781                                 cond_resched();
2782                                 goto again;
2783                         }
2784                         node = rb_next(node);
2785                 }
2786                 spin_unlock(&delayed_refs->lock);
2787                 schedule_timeout(1);
2788                 goto again;
2789         }
2790 out:
2791         atomic_dec(&delayed_refs->procs_running_refs);
2792         smp_mb();
2793         if (waitqueue_active(&delayed_refs->wait))
2794                 wake_up(&delayed_refs->wait);
2795
2796         spin_unlock(&delayed_refs->lock);
2797         assert_qgroups_uptodate(trans);
2798         return 0;
2799 }
2800
2801 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2802                                 struct btrfs_root *root,
2803                                 u64 bytenr, u64 num_bytes, u64 flags,
2804                                 int level, int is_data)
2805 {
2806         struct btrfs_delayed_extent_op *extent_op;
2807         int ret;
2808
2809         extent_op = btrfs_alloc_delayed_extent_op();
2810         if (!extent_op)
2811                 return -ENOMEM;
2812
2813         extent_op->flags_to_set = flags;
2814         extent_op->update_flags = 1;
2815         extent_op->update_key = 0;
2816         extent_op->is_data = is_data ? 1 : 0;
2817         extent_op->level = level;
2818
2819         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2820                                           num_bytes, extent_op);
2821         if (ret)
2822                 btrfs_free_delayed_extent_op(extent_op);
2823         return ret;
2824 }
2825
2826 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2827                                       struct btrfs_root *root,
2828                                       struct btrfs_path *path,
2829                                       u64 objectid, u64 offset, u64 bytenr)
2830 {
2831         struct btrfs_delayed_ref_head *head;
2832         struct btrfs_delayed_ref_node *ref;
2833         struct btrfs_delayed_data_ref *data_ref;
2834         struct btrfs_delayed_ref_root *delayed_refs;
2835         struct rb_node *node;
2836         int ret = 0;
2837
2838         ret = -ENOENT;
2839         delayed_refs = &trans->transaction->delayed_refs;
2840         spin_lock(&delayed_refs->lock);
2841         head = btrfs_find_delayed_ref_head(trans, bytenr);
2842         if (!head)
2843                 goto out;
2844
2845         if (!mutex_trylock(&head->mutex)) {
2846                 atomic_inc(&head->node.refs);
2847                 spin_unlock(&delayed_refs->lock);
2848
2849                 btrfs_release_path(path);
2850
2851                 /*
2852                  * Mutex was contended, block until it's released and let
2853                  * caller try again
2854                  */
2855                 mutex_lock(&head->mutex);
2856                 mutex_unlock(&head->mutex);
2857                 btrfs_put_delayed_ref(&head->node);
2858                 return -EAGAIN;
2859         }
2860
2861         node = rb_prev(&head->node.rb_node);
2862         if (!node)
2863                 goto out_unlock;
2864
2865         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2866
2867         if (ref->bytenr != bytenr)
2868                 goto out_unlock;
2869
2870         ret = 1;
2871         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2872                 goto out_unlock;
2873
2874         data_ref = btrfs_delayed_node_to_data_ref(ref);
2875
2876         node = rb_prev(node);
2877         if (node) {
2878                 int seq = ref->seq;
2879
2880                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2881                 if (ref->bytenr == bytenr && ref->seq == seq)
2882                         goto out_unlock;
2883         }
2884
2885         if (data_ref->root != root->root_key.objectid ||
2886             data_ref->objectid != objectid || data_ref->offset != offset)
2887                 goto out_unlock;
2888
2889         ret = 0;
2890 out_unlock:
2891         mutex_unlock(&head->mutex);
2892 out:
2893         spin_unlock(&delayed_refs->lock);
2894         return ret;
2895 }
2896
2897 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2898                                         struct btrfs_root *root,
2899                                         struct btrfs_path *path,
2900                                         u64 objectid, u64 offset, u64 bytenr)
2901 {
2902         struct btrfs_root *extent_root = root->fs_info->extent_root;
2903         struct extent_buffer *leaf;
2904         struct btrfs_extent_data_ref *ref;
2905         struct btrfs_extent_inline_ref *iref;
2906         struct btrfs_extent_item *ei;
2907         struct btrfs_key key;
2908         u32 item_size;
2909         int ret;
2910
2911         key.objectid = bytenr;
2912         key.offset = (u64)-1;
2913         key.type = BTRFS_EXTENT_ITEM_KEY;
2914
2915         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2916         if (ret < 0)
2917                 goto out;
2918         BUG_ON(ret == 0); /* Corruption */
2919
2920         ret = -ENOENT;
2921         if (path->slots[0] == 0)
2922                 goto out;
2923
2924         path->slots[0]--;
2925         leaf = path->nodes[0];
2926         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2927
2928         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2929                 goto out;
2930
2931         ret = 1;
2932         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2933 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2934         if (item_size < sizeof(*ei)) {
2935                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2936                 goto out;
2937         }
2938 #endif
2939         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2940
2941         if (item_size != sizeof(*ei) +
2942             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2943                 goto out;
2944
2945         if (btrfs_extent_generation(leaf, ei) <=
2946             btrfs_root_last_snapshot(&root->root_item))
2947                 goto out;
2948
2949         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2950         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2951             BTRFS_EXTENT_DATA_REF_KEY)
2952                 goto out;
2953
2954         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2955         if (btrfs_extent_refs(leaf, ei) !=
2956             btrfs_extent_data_ref_count(leaf, ref) ||
2957             btrfs_extent_data_ref_root(leaf, ref) !=
2958             root->root_key.objectid ||
2959             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2960             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2961                 goto out;
2962
2963         ret = 0;
2964 out:
2965         return ret;
2966 }
2967
2968 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2969                           struct btrfs_root *root,
2970                           u64 objectid, u64 offset, u64 bytenr)
2971 {
2972         struct btrfs_path *path;
2973         int ret;
2974         int ret2;
2975
2976         path = btrfs_alloc_path();
2977         if (!path)
2978                 return -ENOENT;
2979
2980         do {
2981                 ret = check_committed_ref(trans, root, path, objectid,
2982                                           offset, bytenr);
2983                 if (ret && ret != -ENOENT)
2984                         goto out;
2985
2986                 ret2 = check_delayed_ref(trans, root, path, objectid,
2987                                          offset, bytenr);
2988         } while (ret2 == -EAGAIN);
2989
2990         if (ret2 && ret2 != -ENOENT) {
2991                 ret = ret2;
2992                 goto out;
2993         }
2994
2995         if (ret != -ENOENT || ret2 != -ENOENT)
2996                 ret = 0;
2997 out:
2998         btrfs_free_path(path);
2999         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3000                 WARN_ON(ret > 0);
3001         return ret;
3002 }
3003
3004 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3005                            struct btrfs_root *root,
3006                            struct extent_buffer *buf,
3007                            int full_backref, int inc, int for_cow)
3008 {
3009         u64 bytenr;
3010         u64 num_bytes;
3011         u64 parent;
3012         u64 ref_root;
3013         u32 nritems;
3014         struct btrfs_key key;
3015         struct btrfs_file_extent_item *fi;
3016         int i;
3017         int level;
3018         int ret = 0;
3019         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3020                             u64, u64, u64, u64, u64, u64, int);
3021
3022         ref_root = btrfs_header_owner(buf);
3023         nritems = btrfs_header_nritems(buf);
3024         level = btrfs_header_level(buf);
3025
3026         if (!root->ref_cows && level == 0)
3027                 return 0;
3028
3029         if (inc)
3030                 process_func = btrfs_inc_extent_ref;
3031         else
3032                 process_func = btrfs_free_extent;
3033
3034         if (full_backref)
3035                 parent = buf->start;
3036         else
3037                 parent = 0;
3038
3039         for (i = 0; i < nritems; i++) {
3040                 if (level == 0) {
3041                         btrfs_item_key_to_cpu(buf, &key, i);
3042                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3043                                 continue;
3044                         fi = btrfs_item_ptr(buf, i,
3045                                             struct btrfs_file_extent_item);
3046                         if (btrfs_file_extent_type(buf, fi) ==
3047                             BTRFS_FILE_EXTENT_INLINE)
3048                                 continue;
3049                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3050                         if (bytenr == 0)
3051                                 continue;
3052
3053                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3054                         key.offset -= btrfs_file_extent_offset(buf, fi);
3055                         ret = process_func(trans, root, bytenr, num_bytes,
3056                                            parent, ref_root, key.objectid,
3057                                            key.offset, for_cow);
3058                         if (ret)
3059                                 goto fail;
3060                 } else {
3061                         bytenr = btrfs_node_blockptr(buf, i);
3062                         num_bytes = btrfs_level_size(root, level - 1);
3063                         ret = process_func(trans, root, bytenr, num_bytes,
3064                                            parent, ref_root, level - 1, 0,
3065                                            for_cow);
3066                         if (ret)
3067                                 goto fail;
3068                 }
3069         }
3070         return 0;
3071 fail:
3072         return ret;
3073 }
3074
3075 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3076                   struct extent_buffer *buf, int full_backref, int for_cow)
3077 {
3078         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3079 }
3080
3081 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3082                   struct extent_buffer *buf, int full_backref, int for_cow)
3083 {
3084         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
3085 }
3086
3087 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3088                                  struct btrfs_root *root,
3089                                  struct btrfs_path *path,
3090                                  struct btrfs_block_group_cache *cache)
3091 {
3092         int ret;
3093         struct btrfs_root *extent_root = root->fs_info->extent_root;
3094         unsigned long bi;
3095         struct extent_buffer *leaf;
3096
3097         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3098         if (ret < 0)
3099                 goto fail;
3100         BUG_ON(ret); /* Corruption */
3101
3102         leaf = path->nodes[0];
3103         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3104         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3105         btrfs_mark_buffer_dirty(leaf);
3106         btrfs_release_path(path);
3107 fail:
3108         if (ret) {
3109                 btrfs_abort_transaction(trans, root, ret);
3110                 return ret;
3111         }
3112         return 0;
3113
3114 }
3115
3116 static struct btrfs_block_group_cache *
3117 next_block_group(struct btrfs_root *root,
3118                  struct btrfs_block_group_cache *cache)
3119 {
3120         struct rb_node *node;
3121         spin_lock(&root->fs_info->block_group_cache_lock);
3122         node = rb_next(&cache->cache_node);
3123         btrfs_put_block_group(cache);
3124         if (node) {
3125                 cache = rb_entry(node, struct btrfs_block_group_cache,
3126                                  cache_node);
3127                 btrfs_get_block_group(cache);
3128         } else
3129                 cache = NULL;
3130         spin_unlock(&root->fs_info->block_group_cache_lock);
3131         return cache;
3132 }
3133
3134 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3135                             struct btrfs_trans_handle *trans,
3136                             struct btrfs_path *path)
3137 {
3138         struct btrfs_root *root = block_group->fs_info->tree_root;
3139         struct inode *inode = NULL;
3140         u64 alloc_hint = 0;
3141         int dcs = BTRFS_DC_ERROR;
3142         int num_pages = 0;
3143         int retries = 0;
3144         int ret = 0;
3145
3146         /*
3147          * If this block group is smaller than 100 megs don't bother caching the
3148          * block group.
3149          */
3150         if (block_group->key.offset < (100 * 1024 * 1024)) {
3151                 spin_lock(&block_group->lock);
3152                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3153                 spin_unlock(&block_group->lock);
3154                 return 0;
3155         }
3156
3157 again:
3158         inode = lookup_free_space_inode(root, block_group, path);
3159         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3160                 ret = PTR_ERR(inode);
3161                 btrfs_release_path(path);
3162                 goto out;
3163         }
3164
3165         if (IS_ERR(inode)) {
3166                 BUG_ON(retries);
3167                 retries++;
3168
3169                 if (block_group->ro)
3170                         goto out_free;
3171
3172                 ret = create_free_space_inode(root, trans, block_group, path);
3173                 if (ret)
3174                         goto out_free;
3175                 goto again;
3176         }
3177
3178         /* We've already setup this transaction, go ahead and exit */
3179         if (block_group->cache_generation == trans->transid &&
3180             i_size_read(inode)) {
3181                 dcs = BTRFS_DC_SETUP;
3182                 goto out_put;
3183         }
3184
3185         /*
3186          * We want to set the generation to 0, that way if anything goes wrong
3187          * from here on out we know not to trust this cache when we load up next
3188          * time.
3189          */
3190         BTRFS_I(inode)->generation = 0;
3191         ret = btrfs_update_inode(trans, root, inode);
3192         WARN_ON(ret);
3193
3194         if (i_size_read(inode) > 0) {
3195                 ret = btrfs_check_trunc_cache_free_space(root,
3196                                         &root->fs_info->global_block_rsv);
3197                 if (ret)
3198                         goto out_put;
3199
3200                 ret = btrfs_truncate_free_space_cache(root, trans, inode);
3201                 if (ret)
3202                         goto out_put;
3203         }
3204
3205         spin_lock(&block_group->lock);
3206         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3207             !btrfs_test_opt(root, SPACE_CACHE)) {
3208                 /*
3209                  * don't bother trying to write stuff out _if_
3210                  * a) we're not cached,
3211                  * b) we're with nospace_cache mount option.
3212                  */
3213                 dcs = BTRFS_DC_WRITTEN;
3214                 spin_unlock(&block_group->lock);
3215                 goto out_put;
3216         }
3217         spin_unlock(&block_group->lock);
3218
3219         /*
3220          * Try to preallocate enough space based on how big the block group is.
3221          * Keep in mind this has to include any pinned space which could end up
3222          * taking up quite a bit since it's not folded into the other space
3223          * cache.
3224          */
3225         num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3226         if (!num_pages)
3227                 num_pages = 1;
3228
3229         num_pages *= 16;
3230         num_pages *= PAGE_CACHE_SIZE;
3231
3232         ret = btrfs_check_data_free_space(inode, num_pages);
3233         if (ret)
3234                 goto out_put;
3235
3236         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3237                                               num_pages, num_pages,
3238                                               &alloc_hint);
3239         if (!ret)
3240                 dcs = BTRFS_DC_SETUP;
3241         btrfs_free_reserved_data_space(inode, num_pages);
3242
3243 out_put:
3244         iput(inode);
3245 out_free:
3246         btrfs_release_path(path);
3247 out:
3248         spin_lock(&block_group->lock);
3249         if (!ret && dcs == BTRFS_DC_SETUP)
3250                 block_group->cache_generation = trans->transid;
3251         block_group->disk_cache_state = dcs;
3252         spin_unlock(&block_group->lock);
3253
3254         return ret;
3255 }
3256
3257 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3258                                    struct btrfs_root *root)
3259 {
3260         struct btrfs_block_group_cache *cache;
3261         int err = 0;
3262         struct btrfs_path *path;
3263         u64 last = 0;
3264
3265         path = btrfs_alloc_path();
3266         if (!path)
3267                 return -ENOMEM;
3268
3269 again:
3270         while (1) {
3271                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3272                 while (cache) {
3273                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3274                                 break;
3275                         cache = next_block_group(root, cache);
3276                 }
3277                 if (!cache) {
3278                         if (last == 0)
3279                                 break;
3280                         last = 0;
3281                         continue;
3282                 }
3283                 err = cache_save_setup(cache, trans, path);
3284                 last = cache->key.objectid + cache->key.offset;
3285                 btrfs_put_block_group(cache);
3286         }
3287
3288         while (1) {
3289                 if (last == 0) {
3290                         err = btrfs_run_delayed_refs(trans, root,
3291                                                      (unsigned long)-1);
3292                         if (err) /* File system offline */
3293                                 goto out;
3294                 }
3295
3296                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3297                 while (cache) {
3298                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
3299                                 btrfs_put_block_group(cache);
3300                                 goto again;
3301                         }
3302
3303                         if (cache->dirty)
3304                                 break;
3305                         cache = next_block_group(root, cache);
3306                 }
3307                 if (!cache) {
3308                         if (last == 0)
3309                                 break;
3310                         last = 0;
3311                         continue;
3312                 }
3313
3314                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
3315                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3316                 cache->dirty = 0;
3317                 last = cache->key.objectid + cache->key.offset;
3318
3319                 err = write_one_cache_group(trans, root, path, cache);
3320                 btrfs_put_block_group(cache);
3321                 if (err) /* File system offline */
3322                         goto out;
3323         }
3324
3325         while (1) {
3326                 /*
3327                  * I don't think this is needed since we're just marking our
3328                  * preallocated extent as written, but just in case it can't
3329                  * hurt.
3330                  */
3331                 if (last == 0) {
3332                         err = btrfs_run_delayed_refs(trans, root,
3333                                                      (unsigned long)-1);
3334                         if (err) /* File system offline */
3335                                 goto out;
3336                 }
3337
3338                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
3339                 while (cache) {
3340                         /*
3341                          * Really this shouldn't happen, but it could if we
3342                          * couldn't write the entire preallocated extent and
3343                          * splitting the extent resulted in a new block.
3344                          */
3345                         if (cache->dirty) {
3346                                 btrfs_put_block_group(cache);
3347                                 goto again;
3348                         }
3349                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3350                                 break;
3351                         cache = next_block_group(root, cache);
3352                 }
3353                 if (!cache) {
3354                         if (last == 0)
3355                                 break;
3356                         last = 0;
3357                         continue;
3358                 }
3359
3360                 err = btrfs_write_out_cache(root, trans, cache, path);
3361
3362                 /*
3363                  * If we didn't have an error then the cache state is still
3364                  * NEED_WRITE, so we can set it to WRITTEN.
3365                  */
3366                 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3367                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3368                 last = cache->key.objectid + cache->key.offset;
3369                 btrfs_put_block_group(cache);
3370         }
3371 out:
3372
3373         btrfs_free_path(path);
3374         return err;
3375 }
3376
3377 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3378 {
3379         struct btrfs_block_group_cache *block_group;
3380         int readonly = 0;
3381
3382         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3383         if (!block_group || block_group->ro)
3384                 readonly = 1;
3385         if (block_group)
3386                 btrfs_put_block_group(block_group);
3387         return readonly;
3388 }
3389
3390 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3391                              u64 total_bytes, u64 bytes_used,
3392                              struct btrfs_space_info **space_info)
3393 {
3394         struct btrfs_space_info *found;
3395         int i;
3396         int factor;
3397         int ret;
3398
3399         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3400                      BTRFS_BLOCK_GROUP_RAID10))
3401                 factor = 2;
3402         else
3403                 factor = 1;
3404
3405         found = __find_space_info(info, flags);
3406         if (found) {
3407                 spin_lock(&found->lock);
3408                 found->total_bytes += total_bytes;
3409                 found->disk_total += total_bytes * factor;
3410                 found->bytes_used += bytes_used;
3411                 found->disk_used += bytes_used * factor;
3412                 found->full = 0;
3413                 spin_unlock(&found->lock);
3414                 *space_info = found;
3415                 return 0;
3416         }
3417         found = kzalloc(sizeof(*found), GFP_NOFS);
3418         if (!found)
3419                 return -ENOMEM;
3420
3421         ret = percpu_counter_init(&found->total_bytes_pinned, 0);
3422         if (ret) {
3423                 kfree(found);
3424                 return ret;
3425         }
3426
3427         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3428                 INIT_LIST_HEAD(&found->block_groups[i]);
3429         init_rwsem(&found->groups_sem);
3430         spin_lock_init(&found->lock);
3431         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3432         found->total_bytes = total_bytes;
3433         found->disk_total = total_bytes * factor;
3434         found->bytes_used = bytes_used;
3435         found->disk_used = bytes_used * factor;
3436         found->bytes_pinned = 0;
3437         found->bytes_reserved = 0;
3438         found->bytes_readonly = 0;
3439         found->bytes_may_use = 0;
3440         found->full = 0;
3441         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3442         found->chunk_alloc = 0;
3443         found->flush = 0;
3444         init_waitqueue_head(&found->wait);
3445         *space_info = found;
3446         list_add_rcu(&found->list, &info->space_info);
3447         if (flags & BTRFS_BLOCK_GROUP_DATA)
3448                 info->data_sinfo = found;
3449         return 0;
3450 }
3451
3452 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3453 {
3454         u64 extra_flags = chunk_to_extended(flags) &
3455                                 BTRFS_EXTENDED_PROFILE_MASK;
3456
3457         write_seqlock(&fs_info->profiles_lock);
3458         if (flags & BTRFS_BLOCK_GROUP_DATA)
3459                 fs_info->avail_data_alloc_bits |= extra_flags;
3460         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3461                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3462         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3463                 fs_info->avail_system_alloc_bits |= extra_flags;
3464         write_sequnlock(&fs_info->profiles_lock);
3465 }
3466
3467 /*
3468  * returns target flags in extended format or 0 if restripe for this
3469  * chunk_type is not in progress
3470  *
3471  * should be called with either volume_mutex or balance_lock held
3472  */
3473 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3474 {
3475         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3476         u64 target = 0;
3477
3478         if (!bctl)
3479                 return 0;
3480
3481         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3482             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3483                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3484         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3485                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3486                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3487         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3488                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3489                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3490         }
3491
3492         return target;
3493 }
3494
3495 /*
3496  * @flags: available profiles in extended format (see ctree.h)
3497  *
3498  * Returns reduced profile in chunk format.  If profile changing is in
3499  * progress (either running or paused) picks the target profile (if it's
3500  * already available), otherwise falls back to plain reducing.
3501  */
3502 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3503 {
3504         /*
3505          * we add in the count of missing devices because we want
3506          * to make sure that any RAID levels on a degraded FS
3507          * continue to be honored.
3508          */
3509         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3510                 root->fs_info->fs_devices->missing_devices;
3511         u64 target;
3512         u64 tmp;
3513
3514         /*
3515          * see if restripe for this chunk_type is in progress, if so
3516          * try to reduce to the target profile
3517          */
3518         spin_lock(&root->fs_info->balance_lock);
3519         target = get_restripe_target(root->fs_info, flags);
3520         if (target) {
3521                 /* pick target profile only if it's already available */
3522                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3523                         spin_unlock(&root->fs_info->balance_lock);
3524                         return extended_to_chunk(target);
3525                 }
3526         }
3527         spin_unlock(&root->fs_info->balance_lock);
3528
3529         /* First, mask out the RAID levels which aren't possible */
3530         if (num_devices == 1)
3531                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3532                            BTRFS_BLOCK_GROUP_RAID5);
3533         if (num_devices < 3)
3534                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3535         if (num_devices < 4)
3536                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3537
3538         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3539                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3540                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3541         flags &= ~tmp;
3542
3543         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3544                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3545         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3546                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3547         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3548                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3549         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3550                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3551         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3552                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3553
3554         return extended_to_chunk(flags | tmp);
3555 }
3556
3557 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3558 {
3559         unsigned seq;
3560
3561         do {
3562                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3563
3564                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3565                         flags |= root->fs_info->avail_data_alloc_bits;
3566                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3567                         flags |= root->fs_info->avail_system_alloc_bits;
3568                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3569                         flags |= root->fs_info->avail_metadata_alloc_bits;
3570         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3571
3572         return btrfs_reduce_alloc_profile(root, flags);
3573 }
3574
3575 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3576 {
3577         u64 flags;
3578         u64 ret;
3579
3580         if (data)
3581                 flags = BTRFS_BLOCK_GROUP_DATA;
3582         else if (root == root->fs_info->chunk_root)
3583                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3584         else
3585                 flags = BTRFS_BLOCK_GROUP_METADATA;
3586
3587         ret = get_alloc_profile(root, flags);
3588         return ret;
3589 }
3590
3591 /*
3592  * This will check the space that the inode allocates from to make sure we have
3593  * enough space for bytes.
3594  */
3595 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3596 {
3597         struct btrfs_space_info *data_sinfo;
3598         struct btrfs_root *root = BTRFS_I(inode)->root;
3599         struct btrfs_fs_info *fs_info = root->fs_info;
3600         u64 used;
3601         int ret = 0, committed = 0, alloc_chunk = 1;
3602
3603         /* make sure bytes are sectorsize aligned */
3604         bytes = ALIGN(bytes, root->sectorsize);
3605
3606         if (root == root->fs_info->tree_root ||
3607             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3608                 alloc_chunk = 0;
3609                 committed = 1;
3610         }
3611
3612         data_sinfo = fs_info->data_sinfo;
3613         if (!data_sinfo)
3614                 goto alloc;
3615
3616 again:
3617         /* make sure we have enough space to handle the data first */
3618         spin_lock(&data_sinfo->lock);
3619         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3620                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3621                 data_sinfo->bytes_may_use;
3622
3623         if (used + bytes > data_sinfo->total_bytes) {
3624                 struct btrfs_trans_handle *trans;
3625
3626                 /*
3627                  * if we don't have enough free bytes in this space then we need
3628                  * to alloc a new chunk.
3629                  */
3630                 if (!data_sinfo->full && alloc_chunk) {
3631                         u64 alloc_target;
3632
3633                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3634                         spin_unlock(&data_sinfo->lock);
3635 alloc:
3636                         alloc_target = btrfs_get_alloc_profile(root, 1);
3637                         trans = btrfs_join_transaction(root);
3638                         if (IS_ERR(trans))
3639                                 return PTR_ERR(trans);
3640
3641                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3642                                              alloc_target,
3643                                              CHUNK_ALLOC_NO_FORCE);
3644                         btrfs_end_transaction(trans, root);
3645                         if (ret < 0) {
3646                                 if (ret != -ENOSPC)
3647                                         return ret;
3648                                 else
3649                                         goto commit_trans;
3650                         }
3651
3652                         if (!data_sinfo)
3653                                 data_sinfo = fs_info->data_sinfo;
3654
3655                         goto again;
3656                 }
3657
3658                 /*
3659                  * If we don't have enough pinned space to deal with this
3660                  * allocation don't bother committing the transaction.
3661                  */
3662                 if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
3663                                            bytes) < 0)
3664                         committed = 1;
3665                 spin_unlock(&data_sinfo->lock);
3666
3667                 /* commit the current transaction and try again */
3668 commit_trans:
3669                 if (!committed &&
3670                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3671                         committed = 1;
3672
3673                         trans = btrfs_join_transaction(root);
3674                         if (IS_ERR(trans))
3675                                 return PTR_ERR(trans);
3676                         ret = btrfs_commit_transaction(trans, root);
3677                         if (ret)
3678                                 return ret;
3679                         goto again;
3680                 }
3681
3682                 return -ENOSPC;
3683         }
3684         data_sinfo->bytes_may_use += bytes;
3685         trace_btrfs_space_reservation(root->fs_info, "space_info",
3686                                       data_sinfo->flags, bytes, 1);
3687         spin_unlock(&data_sinfo->lock);
3688
3689         return 0;
3690 }
3691
3692 /*
3693  * Called if we need to clear a data reservation for this inode.
3694  */
3695 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3696 {
3697         struct btrfs_root *root = BTRFS_I(inode)->root;
3698         struct btrfs_space_info *data_sinfo;
3699
3700         /* make sure bytes are sectorsize aligned */
3701         bytes = ALIGN(bytes, root->sectorsize);
3702
3703         data_sinfo = root->fs_info->data_sinfo;
3704         spin_lock(&data_sinfo->lock);
3705         WARN_ON(data_sinfo->bytes_may_use < bytes);
3706         data_sinfo->bytes_may_use -= bytes;
3707         trace_btrfs_space_reservation(root->fs_info, "space_info",
3708                                       data_sinfo->flags, bytes, 0);
3709         spin_unlock(&data_sinfo->lock);
3710 }
3711
3712 static void force_metadata_allocation(struct btrfs_fs_info *info)
3713 {
3714         struct list_head *head = &info->space_info;
3715         struct btrfs_space_info *found;
3716
3717         rcu_read_lock();
3718         list_for_each_entry_rcu(found, head, list) {
3719                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3720                         found->force_alloc = CHUNK_ALLOC_FORCE;
3721         }
3722         rcu_read_unlock();
3723 }
3724
3725 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
3726 {
3727         return (global->size << 1);
3728 }
3729
3730 static int should_alloc_chunk(struct btrfs_root *root,
3731                               struct btrfs_space_info *sinfo, int force)
3732 {
3733         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3734         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3735         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3736         u64 thresh;
3737
3738         if (force == CHUNK_ALLOC_FORCE)
3739                 return 1;
3740
3741         /*
3742          * We need to take into account the global rsv because for all intents
3743          * and purposes it's used space.  Don't worry about locking the
3744          * global_rsv, it doesn't change except when the transaction commits.
3745          */
3746         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3747                 num_allocated += calc_global_rsv_need_space(global_rsv);
3748
3749         /*
3750          * in limited mode, we want to have some free space up to
3751          * about 1% of the FS size.
3752          */
3753         if (force == CHUNK_ALLOC_LIMITED) {
3754                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3755                 thresh = max_t(u64, 64 * 1024 * 1024,
3756                                div_factor_fine(thresh, 1));
3757
3758                 if (num_bytes - num_allocated < thresh)
3759                         return 1;
3760         }
3761
3762         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3763                 return 0;
3764         return 1;
3765 }
3766
3767 static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3768 {
3769         u64 num_dev;
3770
3771         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
3772                     BTRFS_BLOCK_GROUP_RAID0 |
3773                     BTRFS_BLOCK_GROUP_RAID5 |
3774                     BTRFS_BLOCK_GROUP_RAID6))
3775                 num_dev = root->fs_info->fs_devices->rw_devices;
3776         else if (type & BTRFS_BLOCK_GROUP_RAID1)
3777                 num_dev = 2;
3778         else
3779                 num_dev = 1;    /* DUP or single */
3780
3781         /* metadata for updaing devices and chunk tree */
3782         return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3783 }
3784
3785 static void check_system_chunk(struct btrfs_trans_handle *trans,
3786                                struct btrfs_root *root, u64 type)
3787 {
3788         struct btrfs_space_info *info;
3789         u64 left;
3790         u64 thresh;
3791
3792         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3793         spin_lock(&info->lock);
3794         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3795                 info->bytes_reserved - info->bytes_readonly;
3796         spin_unlock(&info->lock);
3797
3798         thresh = get_system_chunk_thresh(root, type);
3799         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3800                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
3801                         left, thresh, type);
3802                 dump_space_info(info, 0, 0);
3803         }
3804
3805         if (left < thresh) {
3806                 u64 flags;
3807
3808                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3809                 btrfs_alloc_chunk(trans, root, flags);
3810         }
3811 }
3812
3813 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3814                           struct btrfs_root *extent_root, u64 flags, int force)
3815 {
3816         struct btrfs_space_info *space_info;
3817         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3818         int wait_for_alloc = 0;
3819         int ret = 0;
3820
3821         /* Don't re-enter if we're already allocating a chunk */
3822         if (trans->allocating_chunk)
3823                 return -ENOSPC;
3824
3825         space_info = __find_space_info(extent_root->fs_info, flags);
3826         if (!space_info) {
3827                 ret = update_space_info(extent_root->fs_info, flags,
3828                                         0, 0, &space_info);
3829                 BUG_ON(ret); /* -ENOMEM */
3830         }
3831         BUG_ON(!space_info); /* Logic error */
3832
3833 again:
3834         spin_lock(&space_info->lock);
3835         if (force < space_info->force_alloc)
3836                 force = space_info->force_alloc;
3837         if (space_info->full) {
3838                 if (should_alloc_chunk(extent_root, space_info, force))
3839                         ret = -ENOSPC;
3840                 else
3841                         ret = 0;
3842                 spin_unlock(&space_info->lock);
3843                 return ret;
3844         }
3845
3846         if (!should_alloc_chunk(extent_root, space_info, force)) {
3847                 spin_unlock(&space_info->lock);
3848                 return 0;
3849         } else if (space_info->chunk_alloc) {
3850                 wait_for_alloc = 1;
3851         } else {
3852                 space_info->chunk_alloc = 1;
3853         }
3854
3855         spin_unlock(&space_info->lock);
3856
3857         mutex_lock(&fs_info->chunk_mutex);
3858
3859         /*
3860          * The chunk_mutex is held throughout the entirety of a chunk
3861          * allocation, so once we've acquired the chunk_mutex we know that the
3862          * other guy is done and we need to recheck and see if we should
3863          * allocate.
3864          */
3865         if (wait_for_alloc) {
3866                 mutex_unlock(&fs_info->chunk_mutex);
3867                 wait_for_alloc = 0;
3868                 goto again;
3869         }
3870
3871         trans->allocating_chunk = true;
3872
3873         /*
3874          * If we have mixed data/metadata chunks we want to make sure we keep
3875          * allocating mixed chunks instead of individual chunks.
3876          */
3877         if (btrfs_mixed_space_info(space_info))
3878                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3879
3880         /*
3881          * if we're doing a data chunk, go ahead and make sure that
3882          * we keep a reasonable number of metadata chunks allocated in the
3883          * FS as well.
3884          */
3885         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3886                 fs_info->data_chunk_allocations++;
3887                 if (!(fs_info->data_chunk_allocations %
3888                       fs_info->metadata_ratio))
3889                         force_metadata_allocation(fs_info);
3890         }
3891
3892         /*
3893          * Check if we have enough space in SYSTEM chunk because we may need
3894          * to update devices.
3895          */
3896         check_system_chunk(trans, extent_root, flags);
3897
3898         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3899         trans->allocating_chunk = false;
3900
3901         spin_lock(&space_info->lock);
3902         if (ret < 0 && ret != -ENOSPC)
3903                 goto out;
3904         if (ret)
3905                 space_info->full = 1;
3906         else
3907                 ret = 1;
3908
3909         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3910 out:
3911         space_info->chunk_alloc = 0;
3912         spin_unlock(&space_info->lock);
3913         mutex_unlock(&fs_info->chunk_mutex);
3914         return ret;
3915 }
3916
3917 static int can_overcommit(struct btrfs_root *root,
3918                           struct btrfs_space_info *space_info, u64 bytes,
3919                           enum btrfs_reserve_flush_enum flush)
3920 {
3921         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3922         u64 profile = btrfs_get_alloc_profile(root, 0);
3923         u64 space_size;
3924         u64 avail;
3925         u64 used;
3926
3927         used = space_info->bytes_used + space_info->bytes_reserved +
3928                 space_info->bytes_pinned + space_info->bytes_readonly;
3929
3930         /*
3931          * We only want to allow over committing if we have lots of actual space
3932          * free, but if we don't have enough space to handle the global reserve
3933          * space then we could end up having a real enospc problem when trying
3934          * to allocate a chunk or some other such important allocation.
3935          */
3936         spin_lock(&global_rsv->lock);
3937         space_size = calc_global_rsv_need_space(global_rsv);
3938         spin_unlock(&global_rsv->lock);
3939         if (used + space_size >= space_info->total_bytes)
3940                 return 0;
3941
3942         used += space_info->bytes_may_use;
3943
3944         spin_lock(&root->fs_info->free_chunk_lock);
3945         avail = root->fs_info->free_chunk_space;
3946         spin_unlock(&root->fs_info->free_chunk_lock);
3947
3948         /*
3949          * If we have dup, raid1 or raid10 then only half of the free
3950          * space is actually useable.  For raid56, the space info used
3951          * doesn't include the parity drive, so we don't have to
3952          * change the math
3953          */
3954         if (profile & (BTRFS_BLOCK_GROUP_DUP |
3955                        BTRFS_BLOCK_GROUP_RAID1 |
3956                        BTRFS_BLOCK_GROUP_RAID10))
3957                 avail >>= 1;
3958
3959         /*
3960          * If we aren't flushing all things, let us overcommit up to
3961          * 1/2th of the space. If we can flush, don't let us overcommit
3962          * too much, let it overcommit up to 1/8 of the space.
3963          */
3964         if (flush == BTRFS_RESERVE_FLUSH_ALL)
3965                 avail >>= 3;
3966         else
3967                 avail >>= 1;
3968
3969         if (used + bytes < space_info->total_bytes + avail)
3970                 return 1;
3971         return 0;
3972 }
3973
3974 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3975                                          unsigned long nr_pages)
3976 {
3977         struct super_block *sb = root->fs_info->sb;
3978
3979         if (down_read_trylock(&sb->s_umount)) {
3980                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
3981                 up_read(&sb->s_umount);
3982         } else {
3983                 /*
3984                  * We needn't worry the filesystem going from r/w to r/o though
3985                  * we don't acquire ->s_umount mutex, because the filesystem
3986                  * should guarantee the delalloc inodes list be empty after
3987                  * the filesystem is readonly(all dirty pages are written to
3988                  * the disk).
3989                  */
3990                 btrfs_start_all_delalloc_inodes(root->fs_info, 0);
3991                 if (!current->journal_info)
3992                         btrfs_wait_all_ordered_extents(root->fs_info);
3993         }
3994 }
3995
3996 /*
3997  * shrink metadata reservation for delalloc
3998  */
3999 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4000                             bool wait_ordered)
4001 {
4002         struct btrfs_block_rsv *block_rsv;
4003         struct btrfs_space_info *space_info;
4004         struct btrfs_trans_handle *trans;
4005         u64 delalloc_bytes;
4006         u64 max_reclaim;
4007         long time_left;
4008         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
4009         int loops = 0;
4010         enum btrfs_reserve_flush_enum flush;
4011
4012         trans = (struct btrfs_trans_handle *)current->journal_info;
4013         block_rsv = &root->fs_info->delalloc_block_rsv;
4014         space_info = block_rsv->space_info;
4015
4016         smp_mb();
4017         delalloc_bytes = percpu_counter_sum_positive(
4018                                                 &root->fs_info->delalloc_bytes);
4019         if (delalloc_bytes == 0) {
4020                 if (trans)
4021                         return;
4022                 btrfs_wait_all_ordered_extents(root->fs_info);
4023                 return;
4024         }
4025
4026         while (delalloc_bytes && loops < 3) {
4027                 max_reclaim = min(delalloc_bytes, to_reclaim);
4028                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4029                 btrfs_writeback_inodes_sb_nr(root, nr_pages);
4030                 /*
4031                  * We need to wait for the async pages to actually start before
4032                  * we do anything.
4033                  */
4034                 wait_event(root->fs_info->async_submit_wait,
4035                            !atomic_read(&root->fs_info->async_delalloc_pages));
4036
4037                 if (!trans)
4038                         flush = BTRFS_RESERVE_FLUSH_ALL;
4039                 else
4040                         flush = BTRFS_RESERVE_NO_FLUSH;
4041                 spin_lock(&space_info->lock);
4042                 if (can_overcommit(root, space_info, orig, flush)) {
4043                         spin_unlock(&space_info->lock);
4044                         break;
4045                 }
4046                 spin_unlock(&space_info->lock);
4047
4048                 loops++;
4049                 if (wait_ordered && !trans) {
4050                         btrfs_wait_all_ordered_extents(root->fs_info);
4051                 } else {
4052                         time_left = schedule_timeout_killable(1);
4053                         if (time_left)
4054                                 break;
4055                 }
4056                 smp_mb();
4057                 delalloc_bytes = percpu_counter_sum_positive(
4058                                                 &root->fs_info->delalloc_bytes);
4059         }
4060 }
4061
4062 /**
4063  * maybe_commit_transaction - possibly commit the transaction if its ok to
4064  * @root - the root we're allocating for
4065  * @bytes - the number of bytes we want to reserve
4066  * @force - force the commit
4067  *
4068  * This will check to make sure that committing the transaction will actually
4069  * get us somewhere and then commit the transaction if it does.  Otherwise it
4070  * will return -ENOSPC.
4071  */
4072 static int may_commit_transaction(struct btrfs_root *root,
4073                                   struct btrfs_space_info *space_info,
4074                                   u64 bytes, int force)
4075 {
4076         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4077         struct btrfs_trans_handle *trans;
4078
4079         trans = (struct btrfs_trans_handle *)current->journal_info;
4080         if (trans)
4081                 return -EAGAIN;
4082
4083         if (force)
4084                 goto commit;
4085
4086         /* See if there is enough pinned space to make this reservation */
4087         spin_lock(&space_info->lock);
4088         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4089                                    bytes) >= 0) {
4090                 spin_unlock(&space_info->lock);
4091                 goto commit;
4092         }
4093         spin_unlock(&space_info->lock);
4094
4095         /*
4096          * See if there is some space in the delayed insertion reservation for
4097          * this reservation.
4098          */
4099         if (space_info != delayed_rsv->space_info)
4100                 return -ENOSPC;
4101
4102         spin_lock(&space_info->lock);
4103         spin_lock(&delayed_rsv->lock);
4104         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4105                                    bytes - delayed_rsv->size) >= 0) {
4106                 spin_unlock(&delayed_rsv->lock);
4107                 spin_unlock(&space_info->lock);
4108                 return -ENOSPC;
4109         }
4110         spin_unlock(&delayed_rsv->lock);
4111         spin_unlock(&space_info->lock);
4112
4113 commit:
4114         trans = btrfs_join_transaction(root);
4115         if (IS_ERR(trans))
4116                 return -ENOSPC;
4117
4118         return btrfs_commit_transaction(trans, root);
4119 }
4120
4121 enum flush_state {
4122         FLUSH_DELAYED_ITEMS_NR  =       1,
4123         FLUSH_DELAYED_ITEMS     =       2,
4124         FLUSH_DELALLOC          =       3,
4125         FLUSH_DELALLOC_WAIT     =       4,
4126         ALLOC_CHUNK             =       5,
4127         COMMIT_TRANS            =       6,
4128 };
4129
4130 static int flush_space(struct btrfs_root *root,
4131                        struct btrfs_space_info *space_info, u64 num_bytes,
4132                        u64 orig_bytes, int state)
4133 {
4134         struct btrfs_trans_handle *trans;
4135         int nr;
4136         int ret = 0;
4137
4138         switch (state) {
4139         case FLUSH_DELAYED_ITEMS_NR:
4140         case FLUSH_DELAYED_ITEMS:
4141                 if (state == FLUSH_DELAYED_ITEMS_NR) {
4142                         u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
4143
4144                         nr = (int)div64_u64(num_bytes, bytes);
4145                         if (!nr)
4146                                 nr = 1;
4147                         nr *= 2;
4148                 } else {
4149                         nr = -1;
4150                 }
4151                 trans = btrfs_join_transaction(root);
4152                 if (IS_ERR(trans)) {
4153                         ret = PTR_ERR(trans);
4154                         break;
4155                 }
4156                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4157                 btrfs_end_transaction(trans, root);
4158                 break;
4159         case FLUSH_DELALLOC:
4160         case FLUSH_DELALLOC_WAIT:
4161                 shrink_delalloc(root, num_bytes, orig_bytes,
4162                                 state == FLUSH_DELALLOC_WAIT);
4163                 break;
4164         case ALLOC_CHUNK:
4165                 trans = btrfs_join_transaction(root);
4166                 if (IS_ERR(trans)) {
4167                         ret = PTR_ERR(trans);
4168                         break;
4169                 }
4170                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4171                                      btrfs_get_alloc_profile(root, 0),
4172                                      CHUNK_ALLOC_NO_FORCE);
4173                 btrfs_end_transaction(trans, root);
4174                 if (ret == -ENOSPC)
4175                         ret = 0;
4176                 break;
4177         case COMMIT_TRANS:
4178                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4179                 break;
4180         default:
4181                 ret = -ENOSPC;
4182                 break;
4183         }
4184
4185         return ret;
4186 }
4187 /**
4188  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4189  * @root - the root we're allocating for
4190  * @block_rsv - the block_rsv we're allocating for
4191  * @orig_bytes - the number of bytes we want
4192  * @flush - whether or not we can flush to make our reservation
4193  *
4194  * This will reserve orgi_bytes number of bytes from the space info associated
4195  * with the block_rsv.  If there is not enough space it will make an attempt to
4196  * flush out space to make room.  It will do this by flushing delalloc if
4197  * possible or committing the transaction.  If flush is 0 then no attempts to
4198  * regain reservations will be made and this will fail if there is not enough
4199  * space already.
4200  */
4201 static int reserve_metadata_bytes(struct btrfs_root *root,
4202                                   struct btrfs_block_rsv *block_rsv,
4203                                   u64 orig_bytes,
4204                                   enum btrfs_reserve_flush_enum flush)
4205 {
4206         struct btrfs_space_info *space_info = block_rsv->space_info;
4207         u64 used;
4208         u64 num_bytes = orig_bytes;
4209         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4210         int ret = 0;
4211         bool flushing = false;
4212
4213 again:
4214         ret = 0;
4215         spin_lock(&space_info->lock);
4216         /*
4217          * We only want to wait if somebody other than us is flushing and we
4218          * are actually allowed to flush all things.
4219          */
4220         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4221                space_info->flush) {
4222                 spin_unlock(&space_info->lock);
4223                 /*
4224                  * If we have a trans handle we can't wait because the flusher
4225                  * may have to commit the transaction, which would mean we would
4226                  * deadlock since we are waiting for the flusher to finish, but
4227                  * hold the current transaction open.
4228                  */
4229                 if (current->journal_info)
4230                         return -EAGAIN;
4231                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4232                 /* Must have been killed, return */
4233                 if (ret)
4234                         return -EINTR;
4235
4236                 spin_lock(&space_info->lock);
4237         }
4238
4239         ret = -ENOSPC;
4240         used = space_info->bytes_used + space_info->bytes_reserved +
4241                 space_info->bytes_pinned + space_info->bytes_readonly +
4242                 space_info->bytes_may_use;
4243
4244         /*
4245          * The idea here is that we've not already over-reserved the block group
4246          * then we can go ahead and save our reservation first and then start
4247          * flushing if we need to.  Otherwise if we've already overcommitted
4248          * lets start flushing stuff first and then come back and try to make
4249          * our reservation.
4250          */
4251         if (used <= space_info->total_bytes) {
4252                 if (used + orig_bytes <= space_info->total_bytes) {
4253                         space_info->bytes_may_use += orig_bytes;
4254                         trace_btrfs_space_reservation(root->fs_info,
4255                                 "space_info", space_info->flags, orig_bytes, 1);
4256                         ret = 0;
4257                 } else {
4258                         /*
4259                          * Ok set num_bytes to orig_bytes since we aren't
4260                          * overocmmitted, this way we only try and reclaim what
4261                          * we need.
4262                          */
4263                         num_bytes = orig_bytes;
4264                 }
4265         } else {
4266                 /*
4267                  * Ok we're over committed, set num_bytes to the overcommitted
4268                  * amount plus the amount of bytes that we need for this
4269                  * reservation.
4270                  */
4271                 num_bytes = used - space_info->total_bytes +
4272                         (orig_bytes * 2);
4273         }
4274
4275         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4276                 space_info->bytes_may_use += orig_bytes;
4277                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4278                                               space_info->flags, orig_bytes,
4279                                               1);
4280                 ret = 0;
4281         }
4282
4283         /*
4284          * Couldn't make our reservation, save our place so while we're trying
4285          * to reclaim space we can actually use it instead of somebody else
4286          * stealing it from us.
4287          *
4288          * We make the other tasks wait for the flush only when we can flush
4289          * all things.
4290          */
4291         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4292                 flushing = true;
4293                 space_info->flush = 1;
4294         }
4295
4296         spin_unlock(&space_info->lock);
4297
4298         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4299                 goto out;
4300
4301         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4302                           flush_state);
4303         flush_state++;
4304
4305         /*
4306          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4307          * would happen. So skip delalloc flush.
4308          */
4309         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4310             (flush_state == FLUSH_DELALLOC ||
4311              flush_state == FLUSH_DELALLOC_WAIT))
4312                 flush_state = ALLOC_CHUNK;
4313
4314         if (!ret)
4315                 goto again;
4316         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4317                  flush_state < COMMIT_TRANS)
4318                 goto again;
4319         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4320                  flush_state <= COMMIT_TRANS)
4321                 goto again;
4322
4323 out:
4324         if (ret == -ENOSPC &&
4325             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4326                 struct btrfs_block_rsv *global_rsv =
4327                         &root->fs_info->global_block_rsv;
4328
4329                 if (block_rsv != global_rsv &&
4330                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4331                         ret = 0;
4332         }
4333         if (flushing) {
4334                 spin_lock(&space_info->lock);
4335                 space_info->flush = 0;
4336                 wake_up_all(&space_info->wait);
4337                 spin_unlock(&space_info->lock);
4338         }
4339         return ret;
4340 }
4341
4342 static struct btrfs_block_rsv *get_block_rsv(
4343                                         const struct btrfs_trans_handle *trans,
4344                                         const struct btrfs_root *root)
4345 {
4346         struct btrfs_block_rsv *block_rsv = NULL;
4347
4348         if (root->ref_cows)
4349                 block_rsv = trans->block_rsv;
4350
4351         if (root == root->fs_info->csum_root && trans->adding_csums)
4352                 block_rsv = trans->block_rsv;
4353
4354         if (root == root->fs_info->uuid_root)
4355                 block_rsv = trans->block_rsv;
4356
4357         if (!block_rsv)
4358                 block_rsv = root->block_rsv;
4359
4360         if (!block_rsv)
4361                 block_rsv = &root->fs_info->empty_block_rsv;
4362
4363         return block_rsv;
4364 }
4365
4366 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4367                                u64 num_bytes)
4368 {
4369         int ret = -ENOSPC;
4370         spin_lock(&block_rsv->lock);
4371         if (block_rsv->reserved >= num_bytes) {
4372                 block_rsv->reserved -= num_bytes;
4373                 if (block_rsv->reserved < block_rsv->size)
4374                         block_rsv->full = 0;
4375                 ret = 0;
4376         }
4377         spin_unlock(&block_rsv->lock);
4378         return ret;
4379 }
4380
4381 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4382                                 u64 num_bytes, int update_size)
4383 {
4384         spin_lock(&block_rsv->lock);
4385         block_rsv->reserved += num_bytes;
4386         if (update_size)
4387                 block_rsv->size += num_bytes;
4388         else if (block_rsv->reserved >= block_rsv->size)
4389                 block_rsv->full = 1;
4390         spin_unlock(&block_rsv->lock);
4391 }
4392
4393 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4394                              struct btrfs_block_rsv *dest, u64 num_bytes,
4395                              int min_factor)
4396 {
4397         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4398         u64 min_bytes;
4399
4400         if (global_rsv->space_info != dest->space_info)
4401                 return -ENOSPC;
4402
4403         spin_lock(&global_rsv->lock);
4404         min_bytes = div_factor(global_rsv->size, min_factor);
4405         if (global_rsv->reserved < min_bytes + num_bytes) {
4406                 spin_unlock(&global_rsv->lock);
4407                 return -ENOSPC;
4408         }
4409         global_rsv->reserved -= num_bytes;
4410         if (global_rsv->reserved < global_rsv->size)
4411                 global_rsv->full = 0;
4412         spin_unlock(&global_rsv->lock);
4413
4414         block_rsv_add_bytes(dest, num_bytes, 1);
4415         return 0;
4416 }
4417
4418 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4419                                     struct btrfs_block_rsv *block_rsv,
4420                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4421 {
4422         struct btrfs_space_info *space_info = block_rsv->space_info;
4423
4424         spin_lock(&block_rsv->lock);
4425         if (num_bytes == (u64)-1)
4426                 num_bytes = block_rsv->size;
4427         block_rsv->size -= num_bytes;
4428         if (block_rsv->reserved >= block_rsv->size) {
4429                 num_bytes = block_rsv->reserved - block_rsv->size;
4430                 block_rsv->reserved = block_rsv->size;
4431                 block_rsv->full = 1;
4432         } else {
4433                 num_bytes = 0;
4434         }
4435         spin_unlock(&block_rsv->lock);
4436
4437         if (num_bytes > 0) {
4438                 if (dest) {
4439                         spin_lock(&dest->lock);
4440                         if (!dest->full) {
4441                                 u64 bytes_to_add;
4442
4443                                 bytes_to_add = dest->size - dest->reserved;
4444                                 bytes_to_add = min(num_bytes, bytes_to_add);
4445                                 dest->reserved += bytes_to_add;
4446                                 if (dest->reserved >= dest->size)
4447                                         dest->full = 1;
4448                                 num_bytes -= bytes_to_add;
4449                         }
4450                         spin_unlock(&dest->lock);
4451                 }
4452                 if (num_bytes) {
4453                         spin_lock(&space_info->lock);
4454                         space_info->bytes_may_use -= num_bytes;
4455                         trace_btrfs_space_reservation(fs_info, "space_info",
4456                                         space_info->flags, num_bytes, 0);
4457                         spin_unlock(&space_info->lock);
4458                 }
4459         }
4460 }
4461
4462 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
4463                                    struct btrfs_block_rsv *dst, u64 num_bytes)
4464 {
4465         int ret;
4466
4467         ret = block_rsv_use_bytes(src, num_bytes);
4468         if (ret)
4469                 return ret;
4470
4471         block_rsv_add_bytes(dst, num_bytes, 1);
4472         return 0;
4473 }
4474
4475 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
4476 {
4477         memset(rsv, 0, sizeof(*rsv));
4478         spin_lock_init(&rsv->lock);
4479         rsv->type = type;
4480 }
4481
4482 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
4483                                               unsigned short type)
4484 {
4485         struct btrfs_block_rsv *block_rsv;
4486         struct btrfs_fs_info *fs_info = root->fs_info;
4487
4488         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
4489         if (!block_rsv)
4490                 return NULL;
4491
4492         btrfs_init_block_rsv(block_rsv, type);
4493         block_rsv->space_info = __find_space_info(fs_info,
4494                                                   BTRFS_BLOCK_GROUP_METADATA);
4495         return block_rsv;
4496 }
4497
4498 void btrfs_free_block_rsv(struct btrfs_root *root,
4499                           struct btrfs_block_rsv *rsv)
4500 {
4501         if (!rsv)
4502                 return;
4503         btrfs_block_rsv_release(root, rsv, (u64)-1);
4504         kfree(rsv);
4505 }
4506
4507 int btrfs_block_rsv_add(struct btrfs_root *root,
4508                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
4509                         enum btrfs_reserve_flush_enum flush)
4510 {
4511         int ret;
4512
4513         if (num_bytes == 0)
4514                 return 0;
4515
4516         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4517         if (!ret) {
4518                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4519                 return 0;
4520         }
4521
4522         return ret;
4523 }
4524
4525 int btrfs_block_rsv_check(struct btrfs_root *root,
4526                           struct btrfs_block_rsv *block_rsv, int min_factor)
4527 {
4528         u64 num_bytes = 0;
4529         int ret = -ENOSPC;
4530
4531         if (!block_rsv)
4532                 return 0;
4533
4534         spin_lock(&block_rsv->lock);
4535         num_bytes = div_factor(block_rsv->size, min_factor);
4536         if (block_rsv->reserved >= num_bytes)
4537                 ret = 0;
4538         spin_unlock(&block_rsv->lock);
4539
4540         return ret;
4541 }
4542
4543 int btrfs_block_rsv_refill(struct btrfs_root *root,
4544                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
4545                            enum btrfs_reserve_flush_enum flush)
4546 {
4547         u64 num_bytes = 0;
4548         int ret = -ENOSPC;
4549
4550         if (!block_rsv)
4551                 return 0;
4552
4553         spin_lock(&block_rsv->lock);
4554         num_bytes = min_reserved;
4555         if (block_rsv->reserved >= num_bytes)
4556                 ret = 0;
4557         else
4558                 num_bytes -= block_rsv->reserved;
4559         spin_unlock(&block_rsv->lock);
4560
4561         if (!ret)
4562                 return 0;
4563
4564         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4565         if (!ret) {
4566                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4567                 return 0;
4568         }
4569
4570         return ret;
4571 }
4572
4573 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4574                             struct btrfs_block_rsv *dst_rsv,
4575                             u64 num_bytes)
4576 {
4577         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4578 }
4579
4580 void btrfs_block_rsv_release(struct btrfs_root *root,
4581                              struct btrfs_block_rsv *block_rsv,
4582                              u64 num_bytes)
4583 {
4584         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4585         if (global_rsv->full || global_rsv == block_rsv ||
4586             block_rsv->space_info != global_rsv->space_info)
4587                 global_rsv = NULL;
4588         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4589                                 num_bytes);
4590 }
4591
4592 /*
4593  * helper to calculate size of global block reservation.
4594  * the desired value is sum of space used by extent tree,
4595  * checksum tree and root tree
4596  */
4597 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4598 {
4599         struct btrfs_space_info *sinfo;
4600         u64 num_bytes;
4601         u64 meta_used;
4602         u64 data_used;
4603         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4604
4605         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4606         spin_lock(&sinfo->lock);
4607         data_used = sinfo->bytes_used;
4608         spin_unlock(&sinfo->lock);
4609
4610         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4611         spin_lock(&sinfo->lock);
4612         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4613                 data_used = 0;
4614         meta_used = sinfo->bytes_used;
4615         spin_unlock(&sinfo->lock);
4616
4617         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4618                     csum_size * 2;
4619         num_bytes += div64_u64(data_used + meta_used, 50);
4620
4621         if (num_bytes * 3 > meta_used)
4622                 num_bytes = div64_u64(meta_used, 3);
4623
4624         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4625 }
4626
4627 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4628 {
4629         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4630         struct btrfs_space_info *sinfo = block_rsv->space_info;
4631         u64 num_bytes;
4632
4633         num_bytes = calc_global_metadata_size(fs_info);
4634
4635         spin_lock(&sinfo->lock);
4636         spin_lock(&block_rsv->lock);
4637
4638         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4639
4640         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4641                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4642                     sinfo->bytes_may_use;
4643
4644         if (sinfo->total_bytes > num_bytes) {
4645                 num_bytes = sinfo->total_bytes - num_bytes;
4646                 block_rsv->reserved += num_bytes;
4647                 sinfo->bytes_may_use += num_bytes;
4648                 trace_btrfs_space_reservation(fs_info, "space_info",
4649                                       sinfo->flags, num_bytes, 1);
4650         }
4651
4652         if (block_rsv->reserved >= block_rsv->size) {
4653                 num_bytes = block_rsv->reserved - block_rsv->size;
4654                 sinfo->bytes_may_use -= num_bytes;
4655                 trace_btrfs_space_reservation(fs_info, "space_info",
4656                                       sinfo->flags, num_bytes, 0);
4657                 block_rsv->reserved = block_rsv->size;
4658                 block_rsv->full = 1;
4659         }
4660
4661         spin_unlock(&block_rsv->lock);
4662         spin_unlock(&sinfo->lock);
4663 }
4664
4665 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4666 {
4667         struct btrfs_space_info *space_info;
4668
4669         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4670         fs_info->chunk_block_rsv.space_info = space_info;
4671
4672         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4673         fs_info->global_block_rsv.space_info = space_info;
4674         fs_info->delalloc_block_rsv.space_info = space_info;
4675         fs_info->trans_block_rsv.space_info = space_info;
4676         fs_info->empty_block_rsv.space_info = space_info;
4677         fs_info->delayed_block_rsv.space_info = space_info;
4678
4679         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4680         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4681         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4682         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4683         if (fs_info->quota_root)
4684                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4685         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4686
4687         update_global_block_rsv(fs_info);
4688 }
4689
4690 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4691 {
4692         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4693                                 (u64)-1);
4694         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4695         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4696         WARN_ON(fs_info->trans_block_rsv.size > 0);
4697         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4698         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4699         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4700         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4701         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4702 }
4703
4704 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4705                                   struct btrfs_root *root)
4706 {
4707         if (!trans->block_rsv)
4708                 return;
4709
4710         if (!trans->bytes_reserved)
4711                 return;
4712
4713         trace_btrfs_space_reservation(root->fs_info, "transaction",
4714                                       trans->transid, trans->bytes_reserved, 0);
4715         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4716         trans->bytes_reserved = 0;
4717 }
4718
4719 /* Can only return 0 or -ENOSPC */
4720 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4721                                   struct inode *inode)
4722 {
4723         struct btrfs_root *root = BTRFS_I(inode)->root;
4724         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4725         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4726
4727         /*
4728          * We need to hold space in order to delete our orphan item once we've
4729          * added it, so this takes the reservation so we can release it later
4730          * when we are truly done with the orphan item.
4731          */
4732         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4733         trace_btrfs_space_reservation(root->fs_info, "orphan",
4734                                       btrfs_ino(inode), num_bytes, 1);
4735         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4736 }
4737
4738 void btrfs_orphan_release_metadata(struct inode *inode)
4739 {
4740         struct btrfs_root *root = BTRFS_I(inode)->root;
4741         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4742         trace_btrfs_space_reservation(root->fs_info, "orphan",
4743                                       btrfs_ino(inode), num_bytes, 0);
4744         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4745 }
4746
4747 /*
4748  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
4749  * root: the root of the parent directory
4750  * rsv: block reservation
4751  * items: the number of items that we need do reservation
4752  * qgroup_reserved: used to return the reserved size in qgroup
4753  *
4754  * This function is used to reserve the space for snapshot/subvolume
4755  * creation and deletion. Those operations are different with the
4756  * common file/directory operations, they change two fs/file trees
4757  * and root tree, the number of items that the qgroup reserves is
4758  * different with the free space reservation. So we can not use
4759  * the space reseravtion mechanism in start_transaction().
4760  */
4761 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
4762                                      struct btrfs_block_rsv *rsv,
4763                                      int items,
4764                                      u64 *qgroup_reserved,
4765                                      bool use_global_rsv)
4766 {
4767         u64 num_bytes;
4768         int ret;
4769         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4770
4771         if (root->fs_info->quota_enabled) {
4772                 /* One for parent inode, two for dir entries */
4773                 num_bytes = 3 * root->leafsize;
4774                 ret = btrfs_qgroup_reserve(root, num_bytes);
4775                 if (ret)
4776                         return ret;
4777         } else {
4778                 num_bytes = 0;
4779         }
4780
4781         *qgroup_reserved = num_bytes;
4782
4783         num_bytes = btrfs_calc_trans_metadata_size(root, items);
4784         rsv->space_info = __find_space_info(root->fs_info,
4785                                             BTRFS_BLOCK_GROUP_METADATA);
4786         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
4787                                   BTRFS_RESERVE_FLUSH_ALL);
4788
4789         if (ret == -ENOSPC && use_global_rsv)
4790                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
4791
4792         if (ret) {
4793                 if (*qgroup_reserved)
4794                         btrfs_qgroup_free(root, *qgroup_reserved);
4795         }
4796
4797         return ret;
4798 }
4799
4800 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
4801                                       struct btrfs_block_rsv *rsv,
4802                                       u64 qgroup_reserved)
4803 {
4804         btrfs_block_rsv_release(root, rsv, (u64)-1);
4805         if (qgroup_reserved)
4806                 btrfs_qgroup_free(root, qgroup_reserved);
4807 }
4808
4809 /**
4810  * drop_outstanding_extent - drop an outstanding extent
4811  * @inode: the inode we're dropping the extent for
4812  *
4813  * This is called when we are freeing up an outstanding extent, either called
4814  * after an error or after an extent is written.  This will return the number of
4815  * reserved extents that need to be freed.  This must be called with
4816  * BTRFS_I(inode)->lock held.
4817  */
4818 static unsigned drop_outstanding_extent(struct inode *inode)
4819 {
4820         unsigned drop_inode_space = 0;
4821         unsigned dropped_extents = 0;
4822
4823         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4824         BTRFS_I(inode)->outstanding_extents--;
4825
4826         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4827             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4828                                &BTRFS_I(inode)->runtime_flags))
4829                 drop_inode_space = 1;
4830
4831         /*
4832          * If we have more or the same amount of outsanding extents than we have
4833          * reserved then we need to leave the reserved extents count alone.
4834          */
4835         if (BTRFS_I(inode)->outstanding_extents >=
4836             BTRFS_I(inode)->reserved_extents)
4837                 return drop_inode_space;
4838
4839         dropped_extents = BTRFS_I(inode)->reserved_extents -
4840                 BTRFS_I(inode)->outstanding_extents;
4841         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4842         return dropped_extents + drop_inode_space;
4843 }
4844
4845 /**
4846  * calc_csum_metadata_size - return the amount of metada space that must be
4847  *      reserved/free'd for the given bytes.
4848  * @inode: the inode we're manipulating
4849  * @num_bytes: the number of bytes in question
4850  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4851  *
4852  * This adjusts the number of csum_bytes in the inode and then returns the
4853  * correct amount of metadata that must either be reserved or freed.  We
4854  * calculate how many checksums we can fit into one leaf and then divide the
4855  * number of bytes that will need to be checksumed by this value to figure out
4856  * how many checksums will be required.  If we are adding bytes then the number
4857  * may go up and we will return the number of additional bytes that must be
4858  * reserved.  If it is going down we will return the number of bytes that must
4859  * be freed.
4860  *
4861  * This must be called with BTRFS_I(inode)->lock held.
4862  */
4863 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4864                                    int reserve)
4865 {
4866         struct btrfs_root *root = BTRFS_I(inode)->root;
4867         u64 csum_size;
4868         int num_csums_per_leaf;
4869         int num_csums;
4870         int old_csums;
4871
4872         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4873             BTRFS_I(inode)->csum_bytes == 0)
4874                 return 0;
4875
4876         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4877         if (reserve)
4878                 BTRFS_I(inode)->csum_bytes += num_bytes;
4879         else
4880                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4881         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4882         num_csums_per_leaf = (int)div64_u64(csum_size,
4883                                             sizeof(struct btrfs_csum_item) +
4884                                             sizeof(struct btrfs_disk_key));
4885         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4886         num_csums = num_csums + num_csums_per_leaf - 1;
4887         num_csums = num_csums / num_csums_per_leaf;
4888
4889         old_csums = old_csums + num_csums_per_leaf - 1;
4890         old_csums = old_csums / num_csums_per_leaf;
4891
4892         /* No change, no need to reserve more */
4893         if (old_csums == num_csums)
4894                 return 0;
4895
4896         if (reserve)
4897                 return btrfs_calc_trans_metadata_size(root,
4898                                                       num_csums - old_csums);
4899
4900         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4901 }
4902
4903 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4904 {
4905         struct btrfs_root *root = BTRFS_I(inode)->root;
4906         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4907         u64 to_reserve = 0;
4908         u64 csum_bytes;
4909         unsigned nr_extents = 0;
4910         int extra_reserve = 0;
4911         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4912         int ret = 0;
4913         bool delalloc_lock = true;
4914         u64 to_free = 0;
4915         unsigned dropped;
4916
4917         /* If we are a free space inode we need to not flush since we will be in
4918          * the middle of a transaction commit.  We also don't need the delalloc
4919          * mutex since we won't race with anybody.  We need this mostly to make
4920          * lockdep shut its filthy mouth.
4921          */
4922         if (btrfs_is_free_space_inode(inode)) {
4923                 flush = BTRFS_RESERVE_NO_FLUSH;
4924                 delalloc_lock = false;
4925         }
4926
4927         if (flush != BTRFS_RESERVE_NO_FLUSH &&
4928             btrfs_transaction_in_commit(root->fs_info))
4929                 schedule_timeout(1);
4930
4931         if (delalloc_lock)
4932                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4933
4934         num_bytes = ALIGN(num_bytes, root->sectorsize);
4935
4936         spin_lock(&BTRFS_I(inode)->lock);
4937         BTRFS_I(inode)->outstanding_extents++;
4938
4939         if (BTRFS_I(inode)->outstanding_extents >
4940             BTRFS_I(inode)->reserved_extents)
4941                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4942                         BTRFS_I(inode)->reserved_extents;
4943
4944         /*
4945          * Add an item to reserve for updating the inode when we complete the
4946          * delalloc io.
4947          */
4948         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4949                       &BTRFS_I(inode)->runtime_flags)) {
4950                 nr_extents++;
4951                 extra_reserve = 1;
4952         }
4953
4954         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4955         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4956         csum_bytes = BTRFS_I(inode)->csum_bytes;
4957         spin_unlock(&BTRFS_I(inode)->lock);
4958
4959         if (root->fs_info->quota_enabled) {
4960                 ret = btrfs_qgroup_reserve(root, num_bytes +
4961                                            nr_extents * root->leafsize);
4962                 if (ret)
4963                         goto out_fail;
4964         }
4965
4966         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4967         if (unlikely(ret)) {
4968                 if (root->fs_info->quota_enabled)
4969                         btrfs_qgroup_free(root, num_bytes +
4970                                                 nr_extents * root->leafsize);
4971                 goto out_fail;
4972         }
4973
4974         spin_lock(&BTRFS_I(inode)->lock);
4975         if (extra_reserve) {
4976                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4977                         &BTRFS_I(inode)->runtime_flags);
4978                 nr_extents--;
4979         }
4980         BTRFS_I(inode)->reserved_extents += nr_extents;
4981         spin_unlock(&BTRFS_I(inode)->lock);
4982
4983         if (delalloc_lock)
4984                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4985
4986         if (to_reserve)
4987                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4988                                               btrfs_ino(inode), to_reserve, 1);
4989         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4990
4991         return 0;
4992
4993 out_fail:
4994         spin_lock(&BTRFS_I(inode)->lock);
4995         dropped = drop_outstanding_extent(inode);
4996         /*
4997          * If the inodes csum_bytes is the same as the original
4998          * csum_bytes then we know we haven't raced with any free()ers
4999          * so we can just reduce our inodes csum bytes and carry on.
5000          */
5001         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5002                 calc_csum_metadata_size(inode, num_bytes, 0);
5003         } else {
5004                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5005                 u64 bytes;
5006
5007                 /*
5008                  * This is tricky, but first we need to figure out how much we
5009                  * free'd from any free-ers that occured during this
5010                  * reservation, so we reset ->csum_bytes to the csum_bytes
5011                  * before we dropped our lock, and then call the free for the
5012                  * number of bytes that were freed while we were trying our
5013                  * reservation.
5014                  */
5015                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5016                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5017                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5018
5019
5020                 /*
5021                  * Now we need to see how much we would have freed had we not
5022                  * been making this reservation and our ->csum_bytes were not
5023                  * artificially inflated.
5024                  */
5025                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5026                 bytes = csum_bytes - orig_csum_bytes;
5027                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5028
5029                 /*
5030                  * Now reset ->csum_bytes to what it should be.  If bytes is
5031                  * more than to_free then we would have free'd more space had we
5032                  * not had an artificially high ->csum_bytes, so we need to free
5033                  * the remainder.  If bytes is the same or less then we don't
5034                  * need to do anything, the other free-ers did the correct
5035                  * thing.
5036                  */
5037                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5038                 if (bytes > to_free)
5039                         to_free = bytes - to_free;
5040                 else
5041                         to_free = 0;
5042         }
5043         spin_unlock(&BTRFS_I(inode)->lock);
5044         if (dropped)
5045                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5046
5047         if (to_free) {
5048                 btrfs_block_rsv_release(root, block_rsv, to_free);
5049                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5050                                               btrfs_ino(inode), to_free, 0);
5051         }
5052         if (delalloc_lock)
5053                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5054         return ret;
5055 }
5056
5057 /**
5058  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5059  * @inode: the inode to release the reservation for
5060  * @num_bytes: the number of bytes we're releasing
5061  *
5062  * This will release the metadata reservation for an inode.  This can be called
5063  * once we complete IO for a given set of bytes to release their metadata
5064  * reservations.
5065  */
5066 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5067 {
5068         struct btrfs_root *root = BTRFS_I(inode)->root;
5069         u64 to_free = 0;
5070         unsigned dropped;
5071
5072         num_bytes = ALIGN(num_bytes, root->sectorsize);
5073         spin_lock(&BTRFS_I(inode)->lock);
5074         dropped = drop_outstanding_extent(inode);
5075
5076         if (num_bytes)
5077                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5078         spin_unlock(&BTRFS_I(inode)->lock);
5079         if (dropped > 0)
5080                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5081
5082         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5083                                       btrfs_ino(inode), to_free, 0);
5084         if (root->fs_info->quota_enabled) {
5085                 btrfs_qgroup_free(root, num_bytes +
5086                                         dropped * root->leafsize);
5087         }
5088
5089         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5090                                 to_free);
5091 }
5092
5093 /**
5094  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5095  * @inode: inode we're writing to
5096  * @num_bytes: the number of bytes we want to allocate
5097  *
5098  * This will do the following things
5099  *
5100  * o reserve space in the data space info for num_bytes
5101  * o reserve space in the metadata space info based on number of outstanding
5102  *   extents and how much csums will be needed
5103  * o add to the inodes ->delalloc_bytes
5104  * o add it to the fs_info's delalloc inodes list.
5105  *
5106  * This will return 0 for success and -ENOSPC if there is no space left.
5107  */
5108 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5109 {
5110         int ret;
5111
5112         ret = btrfs_check_data_free_space(inode, num_bytes);
5113         if (ret)
5114                 return ret;
5115
5116         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5117         if (ret) {
5118                 btrfs_free_reserved_data_space(inode, num_bytes);
5119                 return ret;
5120         }
5121
5122         return 0;
5123 }
5124
5125 /**
5126  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5127  * @inode: inode we're releasing space for
5128  * @num_bytes: the number of bytes we want to free up
5129  *
5130  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5131  * called in the case that we don't need the metadata AND data reservations
5132  * anymore.  So if there is an error or we insert an inline extent.
5133  *
5134  * This function will release the metadata space that was not used and will
5135  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5136  * list if there are no delalloc bytes left.
5137  */
5138 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5139 {
5140         btrfs_delalloc_release_metadata(inode, num_bytes);
5141         btrfs_free_reserved_data_space(inode, num_bytes);
5142 }
5143
5144 static int update_block_group(struct btrfs_root *root,
5145                               u64 bytenr, u64 num_bytes, int alloc)
5146 {
5147         struct btrfs_block_group_cache *cache = NULL;
5148         struct btrfs_fs_info *info = root->fs_info;
5149         u64 total = num_bytes;
5150         u64 old_val;
5151         u64 byte_in_group;
5152         int factor;
5153
5154         /* block accounting for super block */
5155         spin_lock(&info->delalloc_root_lock);
5156         old_val = btrfs_super_bytes_used(info->super_copy);
5157         if (alloc)
5158                 old_val += num_bytes;
5159         else
5160                 old_val -= num_bytes;
5161         btrfs_set_super_bytes_used(info->super_copy, old_val);
5162         spin_unlock(&info->delalloc_root_lock);
5163
5164         while (total) {
5165                 cache = btrfs_lookup_block_group(info, bytenr);
5166                 if (!cache)
5167                         return -ENOENT;
5168                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5169                                     BTRFS_BLOCK_GROUP_RAID1 |
5170                                     BTRFS_BLOCK_GROUP_RAID10))
5171                         factor = 2;
5172                 else
5173                         factor = 1;
5174                 /*
5175                  * If this block group has free space cache written out, we
5176                  * need to make sure to load it if we are removing space.  This
5177                  * is because we need the unpinning stage to actually add the
5178                  * space back to the block group, otherwise we will leak space.
5179                  */
5180                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5181                         cache_block_group(cache, 1);
5182
5183                 byte_in_group = bytenr - cache->key.objectid;
5184                 WARN_ON(byte_in_group > cache->key.offset);
5185
5186                 spin_lock(&cache->space_info->lock);
5187                 spin_lock(&cache->lock);
5188
5189                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5190                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5191                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5192
5193                 cache->dirty = 1;
5194                 old_val = btrfs_block_group_used(&cache->item);
5195                 num_bytes = min(total, cache->key.offset - byte_in_group);
5196                 if (alloc) {
5197                         old_val += num_bytes;
5198                         btrfs_set_block_group_used(&cache->item, old_val);
5199                         cache->reserved -= num_bytes;
5200                         cache->space_info->bytes_reserved -= num_bytes;
5201                         cache->space_info->bytes_used += num_bytes;
5202                         cache->space_info->disk_used += num_bytes * factor;
5203                         spin_unlock(&cache->lock);
5204                         spin_unlock(&cache->space_info->lock);
5205                 } else {
5206                         old_val -= num_bytes;
5207                         btrfs_set_block_group_used(&cache->item, old_val);
5208                         cache->pinned += num_bytes;
5209                         cache->space_info->bytes_pinned += num_bytes;
5210                         cache->space_info->bytes_used -= num_bytes;
5211                         cache->space_info->disk_used -= num_bytes * factor;
5212                         spin_unlock(&cache->lock);
5213                         spin_unlock(&cache->space_info->lock);
5214
5215                         set_extent_dirty(info->pinned_extents,
5216                                          bytenr, bytenr + num_bytes - 1,
5217                                          GFP_NOFS | __GFP_NOFAIL);
5218                 }
5219                 btrfs_put_block_group(cache);
5220                 total -= num_bytes;
5221                 bytenr += num_bytes;
5222         }
5223         return 0;
5224 }
5225
5226 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5227 {
5228         struct btrfs_block_group_cache *cache;
5229         u64 bytenr;
5230
5231         spin_lock(&root->fs_info->block_group_cache_lock);
5232         bytenr = root->fs_info->first_logical_byte;
5233         spin_unlock(&root->fs_info->block_group_cache_lock);
5234
5235         if (bytenr < (u64)-1)
5236                 return bytenr;
5237
5238         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5239         if (!cache)
5240                 return 0;
5241
5242         bytenr = cache->key.objectid;
5243         btrfs_put_block_group(cache);
5244
5245         return bytenr;
5246 }
5247
5248 static int pin_down_extent(struct btrfs_root *root,
5249                            struct btrfs_block_group_cache *cache,
5250                            u64 bytenr, u64 num_bytes, int reserved)
5251 {
5252         spin_lock(&cache->space_info->lock);
5253         spin_lock(&cache->lock);
5254         cache->pinned += num_bytes;
5255         cache->space_info->bytes_pinned += num_bytes;
5256         if (reserved) {
5257                 cache->reserved -= num_bytes;
5258                 cache->space_info->bytes_reserved -= num_bytes;
5259         }
5260         spin_unlock(&cache->lock);
5261         spin_unlock(&cache->space_info->lock);
5262
5263         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5264                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5265         return 0;
5266 }
5267
5268 /*
5269  * this function must be called within transaction
5270  */
5271 int btrfs_pin_extent(struct btrfs_root *root,
5272                      u64 bytenr, u64 num_bytes, int reserved)
5273 {
5274         struct btrfs_block_group_cache *cache;
5275
5276         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5277         BUG_ON(!cache); /* Logic error */
5278
5279         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5280
5281         btrfs_put_block_group(cache);
5282         return 0;
5283 }
5284
5285 /*
5286  * this function must be called within transaction
5287  */
5288 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5289                                     u64 bytenr, u64 num_bytes)
5290 {
5291         struct btrfs_block_group_cache *cache;
5292         int ret;
5293
5294         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5295         if (!cache)
5296                 return -EINVAL;
5297
5298         /*
5299          * pull in the free space cache (if any) so that our pin
5300          * removes the free space from the cache.  We have load_only set
5301          * to one because the slow code to read in the free extents does check
5302          * the pinned extents.
5303          */
5304         cache_block_group(cache, 1);
5305
5306         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5307
5308         /* remove us from the free space cache (if we're there at all) */
5309         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5310         btrfs_put_block_group(cache);
5311         return ret;
5312 }
5313
5314 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5315 {
5316         int ret;
5317         struct btrfs_block_group_cache *block_group;
5318         struct btrfs_caching_control *caching_ctl;
5319
5320         block_group = btrfs_lookup_block_group(root->fs_info, start);
5321         if (!block_group)
5322                 return -EINVAL;
5323
5324         cache_block_group(block_group, 0);
5325         caching_ctl = get_caching_control(block_group);
5326
5327         if (!caching_ctl) {
5328                 /* Logic error */
5329                 BUG_ON(!block_group_cache_done(block_group));
5330                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5331         } else {
5332                 mutex_lock(&caching_ctl->mutex);
5333
5334                 if (start >= caching_ctl->progress) {
5335                         ret = add_excluded_extent(root, start, num_bytes);
5336                 } else if (start + num_bytes <= caching_ctl->progress) {
5337                         ret = btrfs_remove_free_space(block_group,
5338                                                       start, num_bytes);
5339                 } else {
5340                         num_bytes = caching_ctl->progress - start;
5341                         ret = btrfs_remove_free_space(block_group,
5342                                                       start, num_bytes);
5343                         if (ret)
5344                                 goto out_lock;
5345
5346                         num_bytes = (start + num_bytes) -
5347                                 caching_ctl->progress;
5348                         start = caching_ctl->progress;
5349                         ret = add_excluded_extent(root, start, num_bytes);
5350                 }
5351 out_lock:
5352                 mutex_unlock(&caching_ctl->mutex);
5353                 put_caching_control(caching_ctl);
5354         }
5355         btrfs_put_block_group(block_group);
5356         return ret;
5357 }
5358
5359 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5360                                  struct extent_buffer *eb)
5361 {
5362         struct btrfs_file_extent_item *item;
5363         struct btrfs_key key;
5364         int found_type;
5365         int i;
5366
5367         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5368                 return 0;
5369
5370         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5371                 btrfs_item_key_to_cpu(eb, &key, i);
5372                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5373                         continue;
5374                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5375                 found_type = btrfs_file_extent_type(eb, item);
5376                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5377                         continue;
5378                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5379                         continue;
5380                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5381                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5382                 __exclude_logged_extent(log, key.objectid, key.offset);
5383         }
5384
5385         return 0;
5386 }
5387
5388 /**
5389  * btrfs_update_reserved_bytes - update the block_group and space info counters
5390  * @cache:      The cache we are manipulating
5391  * @num_bytes:  The number of bytes in question
5392  * @reserve:    One of the reservation enums
5393  *
5394  * This is called by the allocator when it reserves space, or by somebody who is
5395  * freeing space that was never actually used on disk.  For example if you
5396  * reserve some space for a new leaf in transaction A and before transaction A
5397  * commits you free that leaf, you call this with reserve set to 0 in order to
5398  * clear the reservation.
5399  *
5400  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5401  * ENOSPC accounting.  For data we handle the reservation through clearing the
5402  * delalloc bits in the io_tree.  We have to do this since we could end up
5403  * allocating less disk space for the amount of data we have reserved in the
5404  * case of compression.
5405  *
5406  * If this is a reservation and the block group has become read only we cannot
5407  * make the reservation and return -EAGAIN, otherwise this function always
5408  * succeeds.
5409  */
5410 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5411                                        u64 num_bytes, int reserve)
5412 {
5413         struct btrfs_space_info *space_info = cache->space_info;
5414         int ret = 0;
5415
5416         spin_lock(&space_info->lock);
5417         spin_lock(&cache->lock);
5418         if (reserve != RESERVE_FREE) {
5419                 if (cache->ro) {
5420                         ret = -EAGAIN;
5421                 } else {
5422                         cache->reserved += num_bytes;
5423                         space_info->bytes_reserved += num_bytes;
5424                         if (reserve == RESERVE_ALLOC) {
5425                                 trace_btrfs_space_reservation(cache->fs_info,
5426                                                 "space_info", space_info->flags,
5427                                                 num_bytes, 0);
5428                                 space_info->bytes_may_use -= num_bytes;
5429                         }
5430                 }
5431         } else {
5432                 if (cache->ro)
5433                         space_info->bytes_readonly += num_bytes;
5434                 cache->reserved -= num_bytes;
5435                 space_info->bytes_reserved -= num_bytes;
5436         }
5437         spin_unlock(&cache->lock);
5438         spin_unlock(&space_info->lock);
5439         return ret;
5440 }
5441
5442 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5443                                 struct btrfs_root *root)
5444 {
5445         struct btrfs_fs_info *fs_info = root->fs_info;
5446         struct btrfs_caching_control *next;
5447         struct btrfs_caching_control *caching_ctl;
5448         struct btrfs_block_group_cache *cache;
5449         struct btrfs_space_info *space_info;
5450
5451         down_write(&fs_info->extent_commit_sem);
5452
5453         list_for_each_entry_safe(caching_ctl, next,
5454                                  &fs_info->caching_block_groups, list) {
5455                 cache = caching_ctl->block_group;
5456                 if (block_group_cache_done(cache)) {
5457                         cache->last_byte_to_unpin = (u64)-1;
5458                         list_del_init(&caching_ctl->list);
5459                         put_caching_control(caching_ctl);
5460                 } else {
5461                         cache->last_byte_to_unpin = caching_ctl->progress;
5462                 }
5463         }
5464
5465         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5466                 fs_info->pinned_extents = &fs_info->freed_extents[1];
5467         else
5468                 fs_info->pinned_extents = &fs_info->freed_extents[0];
5469
5470         up_write(&fs_info->extent_commit_sem);
5471
5472         list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5473                 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5474
5475         update_global_block_rsv(fs_info);
5476 }
5477
5478 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5479 {
5480         struct btrfs_fs_info *fs_info = root->fs_info;
5481         struct btrfs_block_group_cache *cache = NULL;
5482         struct btrfs_space_info *space_info;
5483         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5484         u64 len;
5485         bool readonly;
5486
5487         while (start <= end) {
5488                 readonly = false;
5489                 if (!cache ||
5490                     start >= cache->key.objectid + cache->key.offset) {
5491                         if (cache)
5492                                 btrfs_put_block_group(cache);
5493                         cache = btrfs_lookup_block_group(fs_info, start);
5494                         BUG_ON(!cache); /* Logic error */
5495                 }
5496
5497                 len = cache->key.objectid + cache->key.offset - start;
5498                 len = min(len, end + 1 - start);
5499
5500                 if (start < cache->last_byte_to_unpin) {
5501                         len = min(len, cache->last_byte_to_unpin - start);
5502                         btrfs_add_free_space(cache, start, len);
5503                 }
5504
5505                 start += len;
5506                 space_info = cache->space_info;
5507
5508                 spin_lock(&space_info->lock);
5509                 spin_lock(&cache->lock);
5510                 cache->pinned -= len;
5511                 space_info->bytes_pinned -= len;
5512                 if (cache->ro) {
5513                         space_info->bytes_readonly += len;
5514                         readonly = true;
5515                 }
5516                 spin_unlock(&cache->lock);
5517                 if (!readonly && global_rsv->space_info == space_info) {
5518                         spin_lock(&global_rsv->lock);
5519                         if (!global_rsv->full) {
5520                                 len = min(len, global_rsv->size -
5521                                           global_rsv->reserved);
5522                                 global_rsv->reserved += len;
5523                                 space_info->bytes_may_use += len;
5524                                 if (global_rsv->reserved >= global_rsv->size)
5525                                         global_rsv->full = 1;
5526                         }
5527                         spin_unlock(&global_rsv->lock);
5528                 }
5529                 spin_unlock(&space_info->lock);
5530         }
5531
5532         if (cache)
5533                 btrfs_put_block_group(cache);
5534         return 0;
5535 }
5536
5537 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5538                                struct btrfs_root *root)
5539 {
5540         struct btrfs_fs_info *fs_info = root->fs_info;
5541         struct extent_io_tree *unpin;
5542         u64 start;
5543         u64 end;
5544         int ret;
5545
5546         if (trans->aborted)
5547                 return 0;
5548
5549         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
5550                 unpin = &fs_info->freed_extents[1];
5551         else
5552                 unpin = &fs_info->freed_extents[0];
5553
5554         while (1) {
5555                 ret = find_first_extent_bit(unpin, 0, &start, &end,
5556                                             EXTENT_DIRTY, NULL);
5557                 if (ret)
5558                         break;
5559
5560                 if (btrfs_test_opt(root, DISCARD))
5561                         ret = btrfs_discard_extent(root, start,
5562                                                    end + 1 - start, NULL);
5563
5564                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5565                 unpin_extent_range(root, start, end);
5566                 cond_resched();
5567         }
5568
5569         return 0;
5570 }
5571
5572 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
5573                              u64 owner, u64 root_objectid)
5574 {
5575         struct btrfs_space_info *space_info;
5576         u64 flags;
5577
5578         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5579                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
5580                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
5581                 else
5582                         flags = BTRFS_BLOCK_GROUP_METADATA;
5583         } else {
5584                 flags = BTRFS_BLOCK_GROUP_DATA;
5585         }
5586
5587         space_info = __find_space_info(fs_info, flags);
5588         BUG_ON(!space_info); /* Logic bug */
5589         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
5590 }
5591
5592
5593 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5594                                 struct btrfs_root *root,
5595                                 u64 bytenr, u64 num_bytes, u64 parent,
5596                                 u64 root_objectid, u64 owner_objectid,
5597                                 u64 owner_offset, int refs_to_drop,
5598                                 struct btrfs_delayed_extent_op *extent_op)
5599 {
5600         struct btrfs_key key;
5601         struct btrfs_path *path;
5602         struct btrfs_fs_info *info = root->fs_info;
5603         struct btrfs_root *extent_root = info->extent_root;
5604         struct extent_buffer *leaf;
5605         struct btrfs_extent_item *ei;
5606         struct btrfs_extent_inline_ref *iref;
5607         int ret;
5608         int is_data;
5609         int extent_slot = 0;
5610         int found_extent = 0;
5611         int num_to_del = 1;
5612         u32 item_size;
5613         u64 refs;
5614         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
5615                                                  SKINNY_METADATA);
5616
5617         path = btrfs_alloc_path();
5618         if (!path)
5619                 return -ENOMEM;
5620
5621         path->reada = 1;
5622         path->leave_spinning = 1;
5623
5624         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
5625         BUG_ON(!is_data && refs_to_drop != 1);
5626
5627         if (is_data)
5628                 skinny_metadata = 0;
5629
5630         ret = lookup_extent_backref(trans, extent_root, path, &iref,
5631                                     bytenr, num_bytes, parent,
5632                                     root_objectid, owner_objectid,
5633                                     owner_offset);
5634         if (ret == 0) {
5635                 extent_slot = path->slots[0];
5636                 while (extent_slot >= 0) {
5637                         btrfs_item_key_to_cpu(path->nodes[0], &key,
5638                                               extent_slot);
5639                         if (key.objectid != bytenr)
5640                                 break;
5641                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5642                             key.offset == num_bytes) {
5643                                 found_extent = 1;
5644                                 break;
5645                         }
5646                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
5647                             key.offset == owner_objectid) {
5648                                 found_extent = 1;
5649                                 break;
5650                         }
5651                         if (path->slots[0] - extent_slot > 5)
5652                                 break;
5653                         extent_slot--;
5654                 }
5655 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5656                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
5657                 if (found_extent && item_size < sizeof(*ei))
5658                         found_extent = 0;
5659 #endif
5660                 if (!found_extent) {
5661                         BUG_ON(iref);
5662                         ret = remove_extent_backref(trans, extent_root, path,
5663                                                     NULL, refs_to_drop,
5664                                                     is_data);
5665                         if (ret) {
5666                                 btrfs_abort_transaction(trans, extent_root, ret);
5667                                 goto out;
5668                         }
5669                         btrfs_release_path(path);
5670                         path->leave_spinning = 1;
5671
5672                         key.objectid = bytenr;
5673                         key.type = BTRFS_EXTENT_ITEM_KEY;
5674                         key.offset = num_bytes;
5675
5676                         if (!is_data && skinny_metadata) {
5677                                 key.type = BTRFS_METADATA_ITEM_KEY;
5678                                 key.offset = owner_objectid;
5679                         }
5680
5681                         ret = btrfs_search_slot(trans, extent_root,
5682                                                 &key, path, -1, 1);
5683                         if (ret > 0 && skinny_metadata && path->slots[0]) {
5684                                 /*
5685                                  * Couldn't find our skinny metadata item,
5686                                  * see if we have ye olde extent item.
5687                                  */
5688                                 path->slots[0]--;
5689                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
5690                                                       path->slots[0]);
5691                                 if (key.objectid == bytenr &&
5692                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
5693                                     key.offset == num_bytes)
5694                                         ret = 0;
5695                         }
5696
5697                         if (ret > 0 && skinny_metadata) {
5698                                 skinny_metadata = false;
5699                                 key.type = BTRFS_EXTENT_ITEM_KEY;
5700                                 key.offset = num_bytes;
5701                                 btrfs_release_path(path);
5702                                 ret = btrfs_search_slot(trans, extent_root,
5703                                                         &key, path, -1, 1);
5704                         }
5705
5706                         if (ret) {
5707                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5708                                         ret, bytenr);
5709                                 if (ret > 0)
5710                                         btrfs_print_leaf(extent_root,
5711                                                          path->nodes[0]);
5712                         }
5713                         if (ret < 0) {
5714                                 btrfs_abort_transaction(trans, extent_root, ret);
5715                                 goto out;
5716                         }
5717                         extent_slot = path->slots[0];
5718                 }
5719         } else if (ret == -ENOENT) {
5720                 btrfs_print_leaf(extent_root, path->nodes[0]);
5721                 WARN_ON(1);
5722                 btrfs_err(info,
5723                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5724                         bytenr, parent, root_objectid, owner_objectid,
5725                         owner_offset);
5726         } else {
5727                 btrfs_abort_transaction(trans, extent_root, ret);
5728                 goto out;
5729         }
5730
5731         leaf = path->nodes[0];
5732         item_size = btrfs_item_size_nr(leaf, extent_slot);
5733 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5734         if (item_size < sizeof(*ei)) {
5735                 BUG_ON(found_extent || extent_slot != path->slots[0]);
5736                 ret = convert_extent_item_v0(trans, extent_root, path,
5737                                              owner_objectid, 0);
5738                 if (ret < 0) {
5739                         btrfs_abort_transaction(trans, extent_root, ret);
5740                         goto out;
5741                 }
5742
5743                 btrfs_release_path(path);
5744                 path->leave_spinning = 1;
5745
5746                 key.objectid = bytenr;
5747                 key.type = BTRFS_EXTENT_ITEM_KEY;
5748                 key.offset = num_bytes;
5749
5750                 ret = btrfs_search_slot(trans, extent_root, &key, path,
5751                                         -1, 1);
5752                 if (ret) {
5753                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5754                                 ret, bytenr);
5755                         btrfs_print_leaf(extent_root, path->nodes[0]);
5756                 }
5757                 if (ret < 0) {
5758                         btrfs_abort_transaction(trans, extent_root, ret);
5759                         goto out;
5760                 }
5761
5762                 extent_slot = path->slots[0];
5763                 leaf = path->nodes[0];
5764                 item_size = btrfs_item_size_nr(leaf, extent_slot);
5765         }
5766 #endif
5767         BUG_ON(item_size < sizeof(*ei));
5768         ei = btrfs_item_ptr(leaf, extent_slot,
5769                             struct btrfs_extent_item);
5770         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
5771             key.type == BTRFS_EXTENT_ITEM_KEY) {
5772                 struct btrfs_tree_block_info *bi;
5773                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
5774                 bi = (struct btrfs_tree_block_info *)(ei + 1);
5775                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5776         }
5777
5778         refs = btrfs_extent_refs(leaf, ei);
5779         if (refs < refs_to_drop) {
5780                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
5781                           "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
5782                 ret = -EINVAL;
5783                 btrfs_abort_transaction(trans, extent_root, ret);
5784                 goto out;
5785         }
5786         refs -= refs_to_drop;
5787
5788         if (refs > 0) {
5789                 if (extent_op)
5790                         __run_delayed_extent_op(extent_op, leaf, ei);
5791                 /*
5792                  * In the case of inline back ref, reference count will
5793                  * be updated by remove_extent_backref
5794                  */
5795                 if (iref) {
5796                         BUG_ON(!found_extent);
5797                 } else {
5798                         btrfs_set_extent_refs(leaf, ei, refs);
5799                         btrfs_mark_buffer_dirty(leaf);
5800                 }
5801                 if (found_extent) {
5802                         ret = remove_extent_backref(trans, extent_root, path,
5803                                                     iref, refs_to_drop,
5804                                                     is_data);
5805                         if (ret) {
5806                                 btrfs_abort_transaction(trans, extent_root, ret);
5807                                 goto out;
5808                         }
5809                 }
5810                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
5811                                  root_objectid);
5812         } else {
5813                 if (found_extent) {
5814                         BUG_ON(is_data && refs_to_drop !=
5815                                extent_data_ref_count(root, path, iref));
5816                         if (iref) {
5817                                 BUG_ON(path->slots[0] != extent_slot);
5818                         } else {
5819                                 BUG_ON(path->slots[0] != extent_slot + 1);
5820                                 path->slots[0] = extent_slot;
5821                                 num_to_del = 2;
5822                         }
5823                 }
5824
5825                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5826                                       num_to_del);
5827                 if (ret) {
5828                         btrfs_abort_transaction(trans, extent_root, ret);
5829                         goto out;
5830                 }
5831                 btrfs_release_path(path);
5832
5833                 if (is_data) {
5834                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5835                         if (ret) {
5836                                 btrfs_abort_transaction(trans, extent_root, ret);
5837                                 goto out;
5838                         }
5839                 }
5840
5841                 ret = update_block_group(root, bytenr, num_bytes, 0);
5842                 if (ret) {
5843                         btrfs_abort_transaction(trans, extent_root, ret);
5844                         goto out;
5845                 }
5846         }
5847 out:
5848         btrfs_free_path(path);
5849         return ret;
5850 }
5851
5852 /*
5853  * when we free an block, it is possible (and likely) that we free the last
5854  * delayed ref for that extent as well.  This searches the delayed ref tree for
5855  * a given extent, and if there are no other delayed refs to be processed, it
5856  * removes it from the tree.
5857  */
5858 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5859                                       struct btrfs_root *root, u64 bytenr)
5860 {
5861         struct btrfs_delayed_ref_head *head;
5862         struct btrfs_delayed_ref_root *delayed_refs;
5863         struct btrfs_delayed_ref_node *ref;
5864         struct rb_node *node;
5865         int ret = 0;
5866
5867         delayed_refs = &trans->transaction->delayed_refs;
5868         spin_lock(&delayed_refs->lock);
5869         head = btrfs_find_delayed_ref_head(trans, bytenr);
5870         if (!head)
5871                 goto out;
5872
5873         node = rb_prev(&head->node.rb_node);
5874         if (!node)
5875                 goto out;
5876
5877         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5878
5879         /* there are still entries for this ref, we can't drop it */
5880         if (ref->bytenr == bytenr)
5881                 goto out;
5882
5883         if (head->extent_op) {
5884                 if (!head->must_insert_reserved)
5885                         goto out;
5886                 btrfs_free_delayed_extent_op(head->extent_op);
5887                 head->extent_op = NULL;
5888         }
5889
5890         /*
5891          * waiting for the lock here would deadlock.  If someone else has it
5892          * locked they are already in the process of dropping it anyway
5893          */
5894         if (!mutex_trylock(&head->mutex))
5895                 goto out;
5896
5897         /*
5898          * at this point we have a head with no other entries.  Go
5899          * ahead and process it.
5900          */
5901         head->node.in_tree = 0;
5902         rb_erase(&head->node.rb_node, &delayed_refs->root);
5903
5904         delayed_refs->num_entries--;
5905
5906         /*
5907          * we don't take a ref on the node because we're removing it from the
5908          * tree, so we just steal the ref the tree was holding.
5909          */
5910         delayed_refs->num_heads--;
5911         if (list_empty(&head->cluster))
5912                 delayed_refs->num_heads_ready--;
5913
5914         list_del_init(&head->cluster);
5915         spin_unlock(&delayed_refs->lock);
5916
5917         BUG_ON(head->extent_op);
5918         if (head->must_insert_reserved)
5919                 ret = 1;
5920
5921         mutex_unlock(&head->mutex);
5922         btrfs_put_delayed_ref(&head->node);
5923         return ret;
5924 out:
5925         spin_unlock(&delayed_refs->lock);
5926         return 0;
5927 }
5928
5929 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5930                            struct btrfs_root *root,
5931                            struct extent_buffer *buf,
5932                            u64 parent, int last_ref)
5933 {
5934         struct btrfs_block_group_cache *cache = NULL;
5935         int pin = 1;
5936         int ret;
5937
5938         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5939                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5940                                         buf->start, buf->len,
5941                                         parent, root->root_key.objectid,
5942                                         btrfs_header_level(buf),
5943                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
5944                 BUG_ON(ret); /* -ENOMEM */
5945         }
5946
5947         if (!last_ref)
5948                 return;
5949
5950         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5951
5952         if (btrfs_header_generation(buf) == trans->transid) {
5953                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5954                         ret = check_ref_cleanup(trans, root, buf->start);
5955                         if (!ret)
5956                                 goto out;
5957                 }
5958
5959                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5960                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5961                         goto out;
5962                 }
5963
5964                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5965
5966                 btrfs_add_free_space(cache, buf->start, buf->len);
5967                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5968                 pin = 0;
5969         }
5970 out:
5971         if (pin)
5972                 add_pinned_bytes(root->fs_info, buf->len,
5973                                  btrfs_header_level(buf),
5974                                  root->root_key.objectid);
5975
5976         /*
5977          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5978          * anymore.
5979          */
5980         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5981         btrfs_put_block_group(cache);
5982 }
5983
5984 /* Can return -ENOMEM */
5985 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5986                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5987                       u64 owner, u64 offset, int for_cow)
5988 {
5989         int ret;
5990         struct btrfs_fs_info *fs_info = root->fs_info;
5991
5992         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
5993
5994         /*
5995          * tree log blocks never actually go into the extent allocation
5996          * tree, just update pinning info and exit early.
5997          */
5998         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5999                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6000                 /* unlocks the pinned mutex */
6001                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6002                 ret = 0;
6003         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6004                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6005                                         num_bytes,
6006                                         parent, root_objectid, (int)owner,
6007                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6008         } else {
6009                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6010                                                 num_bytes,
6011                                                 parent, root_objectid, owner,
6012                                                 offset, BTRFS_DROP_DELAYED_REF,
6013                                                 NULL, for_cow);
6014         }
6015         return ret;
6016 }
6017
6018 static u64 stripe_align(struct btrfs_root *root,
6019                         struct btrfs_block_group_cache *cache,
6020                         u64 val, u64 num_bytes)
6021 {
6022         u64 ret = ALIGN(val, root->stripesize);
6023         return ret;
6024 }
6025
6026 /*
6027  * when we wait for progress in the block group caching, its because
6028  * our allocation attempt failed at least once.  So, we must sleep
6029  * and let some progress happen before we try again.
6030  *
6031  * This function will sleep at least once waiting for new free space to
6032  * show up, and then it will check the block group free space numbers
6033  * for our min num_bytes.  Another option is to have it go ahead
6034  * and look in the rbtree for a free extent of a given size, but this
6035  * is a good start.
6036  *
6037  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6038  * any of the information in this block group.
6039  */
6040 static noinline void
6041 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6042                                 u64 num_bytes)
6043 {
6044         struct btrfs_caching_control *caching_ctl;
6045
6046         caching_ctl = get_caching_control(cache);
6047         if (!caching_ctl)
6048                 return;
6049
6050         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6051                    (cache->free_space_ctl->free_space >= num_bytes));
6052
6053         put_caching_control(caching_ctl);
6054 }
6055
6056 static noinline int
6057 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6058 {
6059         struct btrfs_caching_control *caching_ctl;
6060         int ret = 0;
6061
6062         caching_ctl = get_caching_control(cache);
6063         if (!caching_ctl)
6064                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6065
6066         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6067         if (cache->cached == BTRFS_CACHE_ERROR)
6068                 ret = -EIO;
6069         put_caching_control(caching_ctl);
6070         return ret;
6071 }
6072
6073 int __get_raid_index(u64 flags)
6074 {
6075         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6076                 return BTRFS_RAID_RAID10;
6077         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6078                 return BTRFS_RAID_RAID1;
6079         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6080                 return BTRFS_RAID_DUP;
6081         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6082                 return BTRFS_RAID_RAID0;
6083         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6084                 return BTRFS_RAID_RAID5;
6085         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6086                 return BTRFS_RAID_RAID6;
6087
6088         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6089 }
6090
6091 static int get_block_group_index(struct btrfs_block_group_cache *cache)
6092 {
6093         return __get_raid_index(cache->flags);
6094 }
6095
6096 enum btrfs_loop_type {
6097         LOOP_CACHING_NOWAIT = 0,
6098         LOOP_CACHING_WAIT = 1,
6099         LOOP_ALLOC_CHUNK = 2,
6100         LOOP_NO_EMPTY_SIZE = 3,
6101 };
6102
6103 /*
6104  * walks the btree of allocated extents and find a hole of a given size.
6105  * The key ins is changed to record the hole:
6106  * ins->objectid == start position
6107  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6108  * ins->offset == the size of the hole.
6109  * Any available blocks before search_start are skipped.
6110  *
6111  * If there is no suitable free space, we will record the max size of
6112  * the free space extent currently.
6113  */
6114 static noinline int find_free_extent(struct btrfs_root *orig_root,
6115                                      u64 num_bytes, u64 empty_size,
6116                                      u64 hint_byte, struct btrfs_key *ins,
6117                                      u64 flags)
6118 {
6119         int ret = 0;
6120         struct btrfs_root *root = orig_root->fs_info->extent_root;
6121         struct btrfs_free_cluster *last_ptr = NULL;
6122         struct btrfs_block_group_cache *block_group = NULL;
6123         struct btrfs_block_group_cache *used_block_group;
6124         u64 search_start = 0;
6125         u64 max_extent_size = 0;
6126         int empty_cluster = 2 * 1024 * 1024;
6127         struct btrfs_space_info *space_info;
6128         int loop = 0;
6129         int index = __get_raid_index(flags);
6130         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6131                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6132         bool found_uncached_bg = false;
6133         bool failed_cluster_refill = false;
6134         bool failed_alloc = false;
6135         bool use_cluster = true;
6136         bool have_caching_bg = false;
6137
6138         WARN_ON(num_bytes < root->sectorsize);
6139         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6140         ins->objectid = 0;
6141         ins->offset = 0;
6142
6143         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6144
6145         space_info = __find_space_info(root->fs_info, flags);
6146         if (!space_info) {
6147                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6148                 return -ENOSPC;
6149         }
6150
6151         /*
6152          * If the space info is for both data and metadata it means we have a
6153          * small filesystem and we can't use the clustering stuff.
6154          */
6155         if (btrfs_mixed_space_info(space_info))
6156                 use_cluster = false;
6157
6158         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6159                 last_ptr = &root->fs_info->meta_alloc_cluster;
6160                 if (!btrfs_test_opt(root, SSD))
6161                         empty_cluster = 64 * 1024;
6162         }
6163
6164         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6165             btrfs_test_opt(root, SSD)) {
6166                 last_ptr = &root->fs_info->data_alloc_cluster;
6167         }
6168
6169         if (last_ptr) {
6170                 spin_lock(&last_ptr->lock);
6171                 if (last_ptr->block_group)
6172                         hint_byte = last_ptr->window_start;
6173                 spin_unlock(&last_ptr->lock);
6174         }
6175
6176         search_start = max(search_start, first_logical_byte(root, 0));
6177         search_start = max(search_start, hint_byte);
6178
6179         if (!last_ptr)
6180                 empty_cluster = 0;
6181
6182         if (search_start == hint_byte) {
6183                 block_group = btrfs_lookup_block_group(root->fs_info,
6184                                                        search_start);
6185                 used_block_group = block_group;
6186                 /*
6187                  * we don't want to use the block group if it doesn't match our
6188                  * allocation bits, or if its not cached.
6189                  *
6190                  * However if we are re-searching with an ideal block group
6191                  * picked out then we don't care that the block group is cached.
6192                  */
6193                 if (block_group && block_group_bits(block_group, flags) &&
6194                     block_group->cached != BTRFS_CACHE_NO) {
6195                         down_read(&space_info->groups_sem);
6196                         if (list_empty(&block_group->list) ||
6197                             block_group->ro) {
6198                                 /*
6199                                  * someone is removing this block group,
6200                                  * we can't jump into the have_block_group
6201                                  * target because our list pointers are not
6202                                  * valid
6203                                  */
6204                                 btrfs_put_block_group(block_group);
6205                                 up_read(&space_info->groups_sem);
6206                         } else {
6207                                 index = get_block_group_index(block_group);
6208                                 goto have_block_group;
6209                         }
6210                 } else if (block_group) {
6211                         btrfs_put_block_group(block_group);
6212                 }
6213         }
6214 search:
6215         have_caching_bg = false;
6216         down_read(&space_info->groups_sem);
6217         list_for_each_entry(block_group, &space_info->block_groups[index],
6218                             list) {
6219                 u64 offset;
6220                 int cached;
6221
6222                 used_block_group = block_group;
6223                 btrfs_get_block_group(block_group);
6224                 search_start = block_group->key.objectid;
6225
6226                 /*
6227                  * this can happen if we end up cycling through all the
6228                  * raid types, but we want to make sure we only allocate
6229                  * for the proper type.
6230                  */
6231                 if (!block_group_bits(block_group, flags)) {
6232                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6233                                 BTRFS_BLOCK_GROUP_RAID1 |
6234                                 BTRFS_BLOCK_GROUP_RAID5 |
6235                                 BTRFS_BLOCK_GROUP_RAID6 |
6236                                 BTRFS_BLOCK_GROUP_RAID10;
6237
6238                         /*
6239                          * if they asked for extra copies and this block group
6240                          * doesn't provide them, bail.  This does allow us to
6241                          * fill raid0 from raid1.
6242                          */
6243                         if ((flags & extra) && !(block_group->flags & extra))
6244                                 goto loop;
6245                 }
6246
6247 have_block_group:
6248                 cached = block_group_cache_done(block_group);
6249                 if (unlikely(!cached)) {
6250                         found_uncached_bg = true;
6251                         ret = cache_block_group(block_group, 0);
6252                         BUG_ON(ret < 0);
6253                         ret = 0;
6254                 }
6255
6256                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6257                         goto loop;
6258                 if (unlikely(block_group->ro))
6259                         goto loop;
6260
6261                 /*
6262                  * Ok we want to try and use the cluster allocator, so
6263                  * lets look there
6264                  */
6265                 if (last_ptr) {
6266                         unsigned long aligned_cluster;
6267                         /*
6268                          * the refill lock keeps out other
6269                          * people trying to start a new cluster
6270                          */
6271                         spin_lock(&last_ptr->refill_lock);
6272                         used_block_group = last_ptr->block_group;
6273                         if (used_block_group != block_group &&
6274                             (!used_block_group ||
6275                              used_block_group->ro ||
6276                              !block_group_bits(used_block_group, flags))) {
6277                                 used_block_group = block_group;
6278                                 goto refill_cluster;
6279                         }
6280
6281                         if (used_block_group != block_group)
6282                                 btrfs_get_block_group(used_block_group);
6283
6284                         offset = btrfs_alloc_from_cluster(used_block_group,
6285                                                 last_ptr,
6286                                                 num_bytes,
6287                                                 used_block_group->key.objectid,
6288                                                 &max_extent_size);
6289                         if (offset) {
6290                                 /* we have a block, we're done */
6291                                 spin_unlock(&last_ptr->refill_lock);
6292                                 trace_btrfs_reserve_extent_cluster(root,
6293                                         block_group, search_start, num_bytes);
6294                                 goto checks;
6295                         }
6296
6297                         WARN_ON(last_ptr->block_group != used_block_group);
6298                         if (used_block_group != block_group) {
6299                                 btrfs_put_block_group(used_block_group);
6300                                 used_block_group = block_group;
6301                         }
6302 refill_cluster:
6303                         BUG_ON(used_block_group != block_group);
6304                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
6305                          * set up a new clusters, so lets just skip it
6306                          * and let the allocator find whatever block
6307                          * it can find.  If we reach this point, we
6308                          * will have tried the cluster allocator
6309                          * plenty of times and not have found
6310                          * anything, so we are likely way too
6311                          * fragmented for the clustering stuff to find
6312                          * anything.
6313                          *
6314                          * However, if the cluster is taken from the
6315                          * current block group, release the cluster
6316                          * first, so that we stand a better chance of
6317                          * succeeding in the unclustered
6318                          * allocation.  */
6319                         if (loop >= LOOP_NO_EMPTY_SIZE &&
6320                             last_ptr->block_group != block_group) {
6321                                 spin_unlock(&last_ptr->refill_lock);
6322                                 goto unclustered_alloc;
6323                         }
6324
6325                         /*
6326                          * this cluster didn't work out, free it and
6327                          * start over
6328                          */
6329                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6330
6331                         if (loop >= LOOP_NO_EMPTY_SIZE) {
6332                                 spin_unlock(&last_ptr->refill_lock);
6333                                 goto unclustered_alloc;
6334                         }
6335
6336                         aligned_cluster = max_t(unsigned long,
6337                                                 empty_cluster + empty_size,
6338                                               block_group->full_stripe_len);
6339
6340                         /* allocate a cluster in this block group */
6341                         ret = btrfs_find_space_cluster(root, block_group,
6342                                                        last_ptr, search_start,
6343                                                        num_bytes,
6344                                                        aligned_cluster);
6345                         if (ret == 0) {
6346                                 /*
6347                                  * now pull our allocation out of this
6348                                  * cluster
6349                                  */
6350                                 offset = btrfs_alloc_from_cluster(block_group,
6351                                                         last_ptr,
6352                                                         num_bytes,
6353                                                         search_start,
6354                                                         &max_extent_size);
6355                                 if (offset) {
6356                                         /* we found one, proceed */
6357                                         spin_unlock(&last_ptr->refill_lock);
6358                                         trace_btrfs_reserve_extent_cluster(root,
6359                                                 block_group, search_start,
6360                                                 num_bytes);
6361                                         goto checks;
6362                                 }
6363                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
6364                                    && !failed_cluster_refill) {
6365                                 spin_unlock(&last_ptr->refill_lock);
6366
6367                                 failed_cluster_refill = true;
6368                                 wait_block_group_cache_progress(block_group,
6369                                        num_bytes + empty_cluster + empty_size);
6370                                 goto have_block_group;
6371                         }
6372
6373                         /*
6374                          * at this point we either didn't find a cluster
6375                          * or we weren't able to allocate a block from our
6376                          * cluster.  Free the cluster we've been trying
6377                          * to use, and go to the next block group
6378                          */
6379                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
6380                         spin_unlock(&last_ptr->refill_lock);
6381                         goto loop;
6382                 }
6383
6384 unclustered_alloc:
6385                 spin_lock(&block_group->free_space_ctl->tree_lock);
6386                 if (cached &&
6387                     block_group->free_space_ctl->free_space <
6388                     num_bytes + empty_cluster + empty_size) {
6389                         if (block_group->free_space_ctl->free_space >
6390                             max_extent_size)
6391                                 max_extent_size =
6392                                         block_group->free_space_ctl->free_space;
6393                         spin_unlock(&block_group->free_space_ctl->tree_lock);
6394                         goto loop;
6395                 }
6396                 spin_unlock(&block_group->free_space_ctl->tree_lock);
6397
6398                 offset = btrfs_find_space_for_alloc(block_group, search_start,
6399                                                     num_bytes, empty_size,
6400                                                     &max_extent_size);
6401                 /*
6402                  * If we didn't find a chunk, and we haven't failed on this
6403                  * block group before, and this block group is in the middle of
6404                  * caching and we are ok with waiting, then go ahead and wait
6405                  * for progress to be made, and set failed_alloc to true.
6406                  *
6407                  * If failed_alloc is true then we've already waited on this
6408                  * block group once and should move on to the next block group.
6409                  */
6410                 if (!offset && !failed_alloc && !cached &&
6411                     loop > LOOP_CACHING_NOWAIT) {
6412                         wait_block_group_cache_progress(block_group,
6413                                                 num_bytes + empty_size);
6414                         failed_alloc = true;
6415                         goto have_block_group;
6416                 } else if (!offset) {
6417                         if (!cached)
6418                                 have_caching_bg = true;
6419                         goto loop;
6420                 }
6421 checks:
6422                 search_start = stripe_align(root, used_block_group,
6423                                             offset, num_bytes);
6424
6425                 /* move on to the next group */
6426                 if (search_start + num_bytes >
6427                     used_block_group->key.objectid + used_block_group->key.offset) {
6428                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6429                         goto loop;
6430                 }
6431
6432                 if (offset < search_start)
6433                         btrfs_add_free_space(used_block_group, offset,
6434                                              search_start - offset);
6435                 BUG_ON(offset > search_start);
6436
6437                 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6438                                                   alloc_type);
6439                 if (ret == -EAGAIN) {
6440                         btrfs_add_free_space(used_block_group, offset, num_bytes);
6441                         goto loop;
6442                 }
6443
6444                 /* we are all good, lets return */
6445                 ins->objectid = search_start;
6446                 ins->offset = num_bytes;
6447
6448                 trace_btrfs_reserve_extent(orig_root, block_group,
6449                                            search_start, num_bytes);
6450                 if (used_block_group != block_group)
6451                         btrfs_put_block_group(used_block_group);
6452                 btrfs_put_block_group(block_group);
6453                 break;
6454 loop:
6455                 failed_cluster_refill = false;
6456                 failed_alloc = false;
6457                 BUG_ON(index != get_block_group_index(block_group));
6458                 if (used_block_group != block_group)
6459                         btrfs_put_block_group(used_block_group);
6460                 btrfs_put_block_group(block_group);
6461         }
6462         up_read(&space_info->groups_sem);
6463
6464         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
6465                 goto search;
6466
6467         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
6468                 goto search;
6469
6470         /*
6471          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
6472          *                      caching kthreads as we move along
6473          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
6474          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
6475          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
6476          *                      again
6477          */
6478         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6479                 index = 0;
6480                 loop++;
6481                 if (loop == LOOP_ALLOC_CHUNK) {
6482                         struct btrfs_trans_handle *trans;
6483
6484                         trans = btrfs_join_transaction(root);
6485                         if (IS_ERR(trans)) {
6486                                 ret = PTR_ERR(trans);
6487                                 goto out;
6488                         }
6489
6490                         ret = do_chunk_alloc(trans, root, flags,
6491                                              CHUNK_ALLOC_FORCE);
6492                         /*
6493                          * Do not bail out on ENOSPC since we
6494                          * can do more things.
6495                          */
6496                         if (ret < 0 && ret != -ENOSPC)
6497                                 btrfs_abort_transaction(trans,
6498                                                         root, ret);
6499                         else
6500                                 ret = 0;
6501                         btrfs_end_transaction(trans, root);
6502                         if (ret)
6503                                 goto out;
6504                 }
6505
6506                 if (loop == LOOP_NO_EMPTY_SIZE) {
6507                         empty_size = 0;
6508                         empty_cluster = 0;
6509                 }
6510
6511                 goto search;
6512         } else if (!ins->objectid) {
6513                 ret = -ENOSPC;
6514         } else if (ins->objectid) {
6515                 ret = 0;
6516         }
6517 out:
6518         if (ret == -ENOSPC)
6519                 ins->offset = max_extent_size;
6520         return ret;
6521 }
6522
6523 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
6524                             int dump_block_groups)
6525 {
6526         struct btrfs_block_group_cache *cache;
6527         int index = 0;
6528
6529         spin_lock(&info->lock);
6530         printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6531                info->flags,
6532                info->total_bytes - info->bytes_used - info->bytes_pinned -
6533                info->bytes_reserved - info->bytes_readonly,
6534                (info->full) ? "" : "not ");
6535         printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
6536                "reserved=%llu, may_use=%llu, readonly=%llu\n",
6537                info->total_bytes, info->bytes_used, info->bytes_pinned,
6538                info->bytes_reserved, info->bytes_may_use,
6539                info->bytes_readonly);
6540         spin_unlock(&info->lock);
6541
6542         if (!dump_block_groups)
6543                 return;
6544
6545         down_read(&info->groups_sem);
6546 again:
6547         list_for_each_entry(cache, &info->block_groups[index], list) {
6548                 spin_lock(&cache->lock);
6549                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6550                        cache->key.objectid, cache->key.offset,
6551                        btrfs_block_group_used(&cache->item), cache->pinned,
6552                        cache->reserved, cache->ro ? "[readonly]" : "");
6553                 btrfs_dump_free_space(cache, bytes);
6554                 spin_unlock(&cache->lock);
6555         }
6556         if (++index < BTRFS_NR_RAID_TYPES)
6557                 goto again;
6558         up_read(&info->groups_sem);
6559 }
6560
6561 int btrfs_reserve_extent(struct btrfs_root *root,
6562                          u64 num_bytes, u64 min_alloc_size,
6563                          u64 empty_size, u64 hint_byte,
6564                          struct btrfs_key *ins, int is_data)
6565 {
6566         bool final_tried = false;
6567         u64 flags;
6568         int ret;
6569
6570         flags = btrfs_get_alloc_profile(root, is_data);
6571 again:
6572         WARN_ON(num_bytes < root->sectorsize);
6573         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
6574                                flags);
6575
6576         if (ret == -ENOSPC) {
6577                 if (!final_tried && ins->offset) {
6578                         num_bytes = min(num_bytes >> 1, ins->offset);
6579                         num_bytes = round_down(num_bytes, root->sectorsize);
6580                         num_bytes = max(num_bytes, min_alloc_size);
6581                         if (num_bytes == min_alloc_size)
6582                                 final_tried = true;
6583                         goto again;
6584                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6585                         struct btrfs_space_info *sinfo;
6586
6587                         sinfo = __find_space_info(root->fs_info, flags);
6588                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6589                                 flags, num_bytes);
6590                         if (sinfo)
6591                                 dump_space_info(sinfo, num_bytes, 1);
6592                 }
6593         }
6594
6595         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6596
6597         return ret;
6598 }
6599
6600 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6601                                         u64 start, u64 len, int pin)
6602 {
6603         struct btrfs_block_group_cache *cache;
6604         int ret = 0;
6605
6606         cache = btrfs_lookup_block_group(root->fs_info, start);
6607         if (!cache) {
6608                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
6609                         start);
6610                 return -ENOSPC;
6611         }
6612
6613         if (btrfs_test_opt(root, DISCARD))
6614                 ret = btrfs_discard_extent(root, start, len, NULL);
6615
6616         if (pin)
6617                 pin_down_extent(root, cache, start, len, 1);
6618         else {
6619                 btrfs_add_free_space(cache, start, len);
6620                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
6621         }
6622         btrfs_put_block_group(cache);
6623
6624         trace_btrfs_reserved_extent_free(root, start, len);
6625
6626         return ret;
6627 }
6628
6629 int btrfs_free_reserved_extent(struct btrfs_root *root,
6630                                         u64 start, u64 len)
6631 {
6632         return __btrfs_free_reserved_extent(root, start, len, 0);
6633 }
6634
6635 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
6636                                        u64 start, u64 len)
6637 {
6638         return __btrfs_free_reserved_extent(root, start, len, 1);
6639 }
6640
6641 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6642                                       struct btrfs_root *root,
6643                                       u64 parent, u64 root_objectid,
6644                                       u64 flags, u64 owner, u64 offset,
6645                                       struct btrfs_key *ins, int ref_mod)
6646 {
6647         int ret;
6648         struct btrfs_fs_info *fs_info = root->fs_info;
6649         struct btrfs_extent_item *extent_item;
6650         struct btrfs_extent_inline_ref *iref;
6651         struct btrfs_path *path;
6652         struct extent_buffer *leaf;
6653         int type;
6654         u32 size;
6655
6656         if (parent > 0)
6657                 type = BTRFS_SHARED_DATA_REF_KEY;
6658         else
6659                 type = BTRFS_EXTENT_DATA_REF_KEY;
6660
6661         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6662
6663         path = btrfs_alloc_path();
6664         if (!path)
6665                 return -ENOMEM;
6666
6667         path->leave_spinning = 1;
6668         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6669                                       ins, size);
6670         if (ret) {
6671                 btrfs_free_path(path);
6672                 return ret;
6673         }
6674
6675         leaf = path->nodes[0];
6676         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6677                                      struct btrfs_extent_item);
6678         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6679         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6680         btrfs_set_extent_flags(leaf, extent_item,
6681                                flags | BTRFS_EXTENT_FLAG_DATA);
6682
6683         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6684         btrfs_set_extent_inline_ref_type(leaf, iref, type);
6685         if (parent > 0) {
6686                 struct btrfs_shared_data_ref *ref;
6687                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
6688                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6689                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6690         } else {
6691                 struct btrfs_extent_data_ref *ref;
6692                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
6693                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6694                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6695                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6696                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6697         }
6698
6699         btrfs_mark_buffer_dirty(path->nodes[0]);
6700         btrfs_free_path(path);
6701
6702         ret = update_block_group(root, ins->objectid, ins->offset, 1);
6703         if (ret) { /* -ENOENT, logic error */
6704                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6705                         ins->objectid, ins->offset);
6706                 BUG();
6707         }
6708         return ret;
6709 }
6710
6711 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6712                                      struct btrfs_root *root,
6713                                      u64 parent, u64 root_objectid,
6714                                      u64 flags, struct btrfs_disk_key *key,
6715                                      int level, struct btrfs_key *ins)
6716 {
6717         int ret;
6718         struct btrfs_fs_info *fs_info = root->fs_info;
6719         struct btrfs_extent_item *extent_item;
6720         struct btrfs_tree_block_info *block_info;
6721         struct btrfs_extent_inline_ref *iref;
6722         struct btrfs_path *path;
6723         struct extent_buffer *leaf;
6724         u32 size = sizeof(*extent_item) + sizeof(*iref);
6725         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6726                                                  SKINNY_METADATA);
6727
6728         if (!skinny_metadata)
6729                 size += sizeof(*block_info);
6730
6731         path = btrfs_alloc_path();
6732         if (!path)
6733                 return -ENOMEM;
6734
6735         path->leave_spinning = 1;
6736         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
6737                                       ins, size);
6738         if (ret) {
6739                 btrfs_free_path(path);
6740                 return ret;
6741         }
6742
6743         leaf = path->nodes[0];
6744         extent_item = btrfs_item_ptr(leaf, path->slots[0],
6745                                      struct btrfs_extent_item);
6746         btrfs_set_extent_refs(leaf, extent_item, 1);
6747         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
6748         btrfs_set_extent_flags(leaf, extent_item,
6749                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6750
6751         if (skinny_metadata) {
6752                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
6753         } else {
6754                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
6755                 btrfs_set_tree_block_key(leaf, block_info, key);
6756                 btrfs_set_tree_block_level(leaf, block_info, level);
6757                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
6758         }
6759
6760         if (parent > 0) {
6761                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
6762                 btrfs_set_extent_inline_ref_type(leaf, iref,
6763                                                  BTRFS_SHARED_BLOCK_REF_KEY);
6764                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6765         } else {
6766                 btrfs_set_extent_inline_ref_type(leaf, iref,
6767                                                  BTRFS_TREE_BLOCK_REF_KEY);
6768                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6769         }
6770
6771         btrfs_mark_buffer_dirty(leaf);
6772         btrfs_free_path(path);
6773
6774         ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6775         if (ret) { /* -ENOENT, logic error */
6776                 btrfs_err(fs_info, "update block group failed for %llu %llu",
6777                         ins->objectid, ins->offset);
6778                 BUG();
6779         }
6780         return ret;
6781 }
6782
6783 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
6784                                      struct btrfs_root *root,
6785                                      u64 root_objectid, u64 owner,
6786                                      u64 offset, struct btrfs_key *ins)
6787 {
6788         int ret;
6789
6790         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
6791
6792         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
6793                                          ins->offset, 0,
6794                                          root_objectid, owner, offset,
6795                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6796         return ret;
6797 }
6798
6799 /*
6800  * this is used by the tree logging recovery code.  It records that
6801  * an extent has been allocated and makes sure to clear the free
6802  * space cache bits as well
6803  */
6804 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6805                                    struct btrfs_root *root,
6806                                    u64 root_objectid, u64 owner, u64 offset,
6807                                    struct btrfs_key *ins)
6808 {
6809         int ret;
6810         struct btrfs_block_group_cache *block_group;
6811
6812         /*
6813          * Mixed block groups will exclude before processing the log so we only
6814          * need to do the exlude dance if this fs isn't mixed.
6815          */
6816         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
6817                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6818                 if (ret)
6819                         return ret;
6820         }
6821
6822         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
6823         if (!block_group)
6824                 return -EINVAL;
6825
6826         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6827                                           RESERVE_ALLOC_NO_ACCOUNT);
6828         BUG_ON(ret); /* logic error */
6829         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6830                                          0, owner, offset, ins, 1);
6831         btrfs_put_block_group(block_group);
6832         return ret;
6833 }
6834
6835 static struct extent_buffer *
6836 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6837                       u64 bytenr, u32 blocksize, int level)
6838 {
6839         struct extent_buffer *buf;
6840
6841         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6842         if (!buf)
6843                 return ERR_PTR(-ENOMEM);
6844         btrfs_set_header_generation(buf, trans->transid);
6845         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6846         btrfs_tree_lock(buf);
6847         clean_tree_block(trans, root, buf);
6848         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6849
6850         btrfs_set_lock_blocking(buf);
6851         btrfs_set_buffer_uptodate(buf);
6852
6853         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6854                 /*
6855                  * we allow two log transactions at a time, use different
6856                  * EXENT bit to differentiate dirty pages.
6857                  */
6858                 if (root->log_transid % 2 == 0)
6859                         set_extent_dirty(&root->dirty_log_pages, buf->start,
6860                                         buf->start + buf->len - 1, GFP_NOFS);
6861                 else
6862                         set_extent_new(&root->dirty_log_pages, buf->start,
6863                                         buf->start + buf->len - 1, GFP_NOFS);
6864         } else {
6865                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6866                          buf->start + buf->len - 1, GFP_NOFS);
6867         }
6868         trans->blocks_used++;
6869         /* this returns a buffer locked for blocking */
6870         return buf;
6871 }
6872
6873 static struct btrfs_block_rsv *
6874 use_block_rsv(struct btrfs_trans_handle *trans,
6875               struct btrfs_root *root, u32 blocksize)
6876 {
6877         struct btrfs_block_rsv *block_rsv;
6878         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6879         int ret;
6880         bool global_updated = false;
6881
6882         block_rsv = get_block_rsv(trans, root);
6883
6884         if (unlikely(block_rsv->size == 0))
6885                 goto try_reserve;
6886 again:
6887         ret = block_rsv_use_bytes(block_rsv, blocksize);
6888         if (!ret)
6889                 return block_rsv;
6890
6891         if (block_rsv->failfast)
6892                 return ERR_PTR(ret);
6893
6894         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
6895                 global_updated = true;
6896                 update_global_block_rsv(root->fs_info);
6897                 goto again;
6898         }
6899
6900         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6901                 static DEFINE_RATELIMIT_STATE(_rs,
6902                                 DEFAULT_RATELIMIT_INTERVAL * 10,
6903                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
6904                 if (__ratelimit(&_rs))
6905                         WARN(1, KERN_DEBUG
6906                                 "btrfs: block rsv returned %d\n", ret);
6907         }
6908 try_reserve:
6909         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6910                                      BTRFS_RESERVE_NO_FLUSH);
6911         if (!ret)
6912                 return block_rsv;
6913         /*
6914          * If we couldn't reserve metadata bytes try and use some from
6915          * the global reserve if its space type is the same as the global
6916          * reservation.
6917          */
6918         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
6919             block_rsv->space_info == global_rsv->space_info) {
6920                 ret = block_rsv_use_bytes(global_rsv, blocksize);
6921                 if (!ret)
6922                         return global_rsv;
6923         }
6924         return ERR_PTR(ret);
6925 }
6926
6927 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
6928                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
6929 {
6930         block_rsv_add_bytes(block_rsv, blocksize, 0);
6931         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6932 }
6933
6934 /*
6935  * finds a free extent and does all the dirty work required for allocation
6936  * returns the key for the extent through ins, and a tree buffer for
6937  * the first block of the extent through buf.
6938  *
6939  * returns the tree buffer or NULL.
6940  */
6941 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6942                                         struct btrfs_root *root, u32 blocksize,
6943                                         u64 parent, u64 root_objectid,
6944                                         struct btrfs_disk_key *key, int level,
6945                                         u64 hint, u64 empty_size)
6946 {
6947         struct btrfs_key ins;
6948         struct btrfs_block_rsv *block_rsv;
6949         struct extent_buffer *buf;
6950         u64 flags = 0;
6951         int ret;
6952         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6953                                                  SKINNY_METADATA);
6954
6955         block_rsv = use_block_rsv(trans, root, blocksize);
6956         if (IS_ERR(block_rsv))
6957                 return ERR_CAST(block_rsv);
6958
6959         ret = btrfs_reserve_extent(root, blocksize, blocksize,
6960                                    empty_size, hint, &ins, 0);
6961         if (ret) {
6962                 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6963                 return ERR_PTR(ret);
6964         }
6965
6966         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6967                                     blocksize, level);
6968         BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6969
6970         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6971                 if (parent == 0)
6972                         parent = ins.objectid;
6973                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6974         } else
6975                 BUG_ON(parent > 0);
6976
6977         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6978                 struct btrfs_delayed_extent_op *extent_op;
6979                 extent_op = btrfs_alloc_delayed_extent_op();
6980                 BUG_ON(!extent_op); /* -ENOMEM */
6981                 if (key)
6982                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
6983                 else
6984                         memset(&extent_op->key, 0, sizeof(extent_op->key));
6985                 extent_op->flags_to_set = flags;
6986                 if (skinny_metadata)
6987                         extent_op->update_key = 0;
6988                 else
6989                         extent_op->update_key = 1;
6990                 extent_op->update_flags = 1;
6991                 extent_op->is_data = 0;
6992                 extent_op->level = level;
6993
6994                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6995                                         ins.objectid,
6996                                         ins.offset, parent, root_objectid,
6997                                         level, BTRFS_ADD_DELAYED_EXTENT,
6998                                         extent_op, 0);
6999                 BUG_ON(ret); /* -ENOMEM */
7000         }
7001         return buf;
7002 }
7003
7004 struct walk_control {
7005         u64 refs[BTRFS_MAX_LEVEL];
7006         u64 flags[BTRFS_MAX_LEVEL];
7007         struct btrfs_key update_progress;
7008         int stage;
7009         int level;
7010         int shared_level;
7011         int update_ref;
7012         int keep_locks;
7013         int reada_slot;
7014         int reada_count;
7015         int for_reloc;
7016 };
7017
7018 #define DROP_REFERENCE  1
7019 #define UPDATE_BACKREF  2
7020
7021 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7022                                      struct btrfs_root *root,
7023                                      struct walk_control *wc,
7024                                      struct btrfs_path *path)
7025 {
7026         u64 bytenr;
7027         u64 generation;
7028         u64 refs;
7029         u64 flags;
7030         u32 nritems;
7031         u32 blocksize;
7032         struct btrfs_key key;
7033         struct extent_buffer *eb;
7034         int ret;
7035         int slot;
7036         int nread = 0;
7037
7038         if (path->slots[wc->level] < wc->reada_slot) {
7039                 wc->reada_count = wc->reada_count * 2 / 3;
7040                 wc->reada_count = max(wc->reada_count, 2);
7041         } else {
7042                 wc->reada_count = wc->reada_count * 3 / 2;
7043                 wc->reada_count = min_t(int, wc->reada_count,
7044                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7045         }
7046
7047         eb = path->nodes[wc->level];
7048         nritems = btrfs_header_nritems(eb);
7049         blocksize = btrfs_level_size(root, wc->level - 1);
7050
7051         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7052                 if (nread >= wc->reada_count)
7053                         break;
7054
7055                 cond_resched();
7056                 bytenr = btrfs_node_blockptr(eb, slot);
7057                 generation = btrfs_node_ptr_generation(eb, slot);
7058
7059                 if (slot == path->slots[wc->level])
7060                         goto reada;
7061
7062                 if (wc->stage == UPDATE_BACKREF &&
7063                     generation <= root->root_key.offset)
7064                         continue;
7065
7066                 /* We don't lock the tree block, it's OK to be racy here */
7067                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7068                                                wc->level - 1, 1, &refs,
7069                                                &flags);
7070                 /* We don't care about errors in readahead. */
7071                 if (ret < 0)
7072                         continue;
7073                 BUG_ON(refs == 0);
7074
7075                 if (wc->stage == DROP_REFERENCE) {
7076                         if (refs == 1)
7077                                 goto reada;
7078
7079                         if (wc->level == 1 &&
7080                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7081                                 continue;
7082                         if (!wc->update_ref ||
7083                             generation <= root->root_key.offset)
7084                                 continue;
7085                         btrfs_node_key_to_cpu(eb, &key, slot);
7086                         ret = btrfs_comp_cpu_keys(&key,
7087                                                   &wc->update_progress);
7088                         if (ret < 0)
7089                                 continue;
7090                 } else {
7091                         if (wc->level == 1 &&
7092                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7093                                 continue;
7094                 }
7095 reada:
7096                 ret = readahead_tree_block(root, bytenr, blocksize,
7097                                            generation);
7098                 if (ret)
7099                         break;
7100                 nread++;
7101         }
7102         wc->reada_slot = slot;
7103 }
7104
7105 /*
7106  * helper to process tree block while walking down the tree.
7107  *
7108  * when wc->stage == UPDATE_BACKREF, this function updates
7109  * back refs for pointers in the block.
7110  *
7111  * NOTE: return value 1 means we should stop walking down.
7112  */
7113 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7114                                    struct btrfs_root *root,
7115                                    struct btrfs_path *path,
7116                                    struct walk_control *wc, int lookup_info)
7117 {
7118         int level = wc->level;
7119         struct extent_buffer *eb = path->nodes[level];
7120         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7121         int ret;
7122
7123         if (wc->stage == UPDATE_BACKREF &&
7124             btrfs_header_owner(eb) != root->root_key.objectid)
7125                 return 1;
7126
7127         /*
7128          * when reference count of tree block is 1, it won't increase
7129          * again. once full backref flag is set, we never clear it.
7130          */
7131         if (lookup_info &&
7132             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
7133              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7134                 BUG_ON(!path->locks[level]);
7135                 ret = btrfs_lookup_extent_info(trans, root,
7136                                                eb->start, level, 1,
7137                                                &wc->refs[level],
7138                                                &wc->flags[level]);
7139                 BUG_ON(ret == -ENOMEM);
7140                 if (ret)
7141                         return ret;
7142                 BUG_ON(wc->refs[level] == 0);
7143         }
7144
7145         if (wc->stage == DROP_REFERENCE) {
7146                 if (wc->refs[level] > 1)
7147                         return 1;
7148
7149                 if (path->locks[level] && !wc->keep_locks) {
7150                         btrfs_tree_unlock_rw(eb, path->locks[level]);
7151                         path->locks[level] = 0;
7152                 }
7153                 return 0;
7154         }
7155
7156         /* wc->stage == UPDATE_BACKREF */
7157         if (!(wc->flags[level] & flag)) {
7158                 BUG_ON(!path->locks[level]);
7159                 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7160                 BUG_ON(ret); /* -ENOMEM */
7161                 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7162                 BUG_ON(ret); /* -ENOMEM */
7163                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7164                                                   eb->len, flag,
7165                                                   btrfs_header_level(eb), 0);
7166                 BUG_ON(ret); /* -ENOMEM */
7167                 wc->flags[level] |= flag;
7168         }
7169
7170         /*
7171          * the block is shared by multiple trees, so it's not good to
7172          * keep the tree lock
7173          */
7174         if (path->locks[level] && level > 0) {
7175                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7176                 path->locks[level] = 0;
7177         }
7178         return 0;
7179 }
7180
7181 /*
7182  * helper to process tree block pointer.
7183  *
7184  * when wc->stage == DROP_REFERENCE, this function checks
7185  * reference count of the block pointed to. if the block
7186  * is shared and we need update back refs for the subtree
7187  * rooted at the block, this function changes wc->stage to
7188  * UPDATE_BACKREF. if the block is shared and there is no
7189  * need to update back, this function drops the reference
7190  * to the block.
7191  *
7192  * NOTE: return value 1 means we should stop walking down.
7193  */
7194 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
7195                                  struct btrfs_root *root,
7196                                  struct btrfs_path *path,
7197                                  struct walk_control *wc, int *lookup_info)
7198 {
7199         u64 bytenr;
7200         u64 generation;
7201         u64 parent;
7202         u32 blocksize;
7203         struct btrfs_key key;
7204         struct extent_buffer *next;
7205         int level = wc->level;
7206         int reada = 0;
7207         int ret = 0;
7208
7209         generation = btrfs_node_ptr_generation(path->nodes[level],
7210                                                path->slots[level]);
7211         /*
7212          * if the lower level block was created before the snapshot
7213          * was created, we know there is no need to update back refs
7214          * for the subtree
7215          */
7216         if (wc->stage == UPDATE_BACKREF &&
7217             generation <= root->root_key.offset) {
7218                 *lookup_info = 1;
7219                 return 1;
7220         }
7221
7222         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
7223         blocksize = btrfs_level_size(root, level - 1);
7224
7225         next = btrfs_find_tree_block(root, bytenr, blocksize);
7226         if (!next) {
7227                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7228                 if (!next)
7229                         return -ENOMEM;
7230                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
7231                                                level - 1);
7232                 reada = 1;
7233         }
7234         btrfs_tree_lock(next);
7235         btrfs_set_lock_blocking(next);
7236
7237         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7238                                        &wc->refs[level - 1],
7239                                        &wc->flags[level - 1]);
7240         if (ret < 0) {
7241                 btrfs_tree_unlock(next);
7242                 return ret;
7243         }
7244
7245         if (unlikely(wc->refs[level - 1] == 0)) {
7246                 btrfs_err(root->fs_info, "Missing references.");
7247                 BUG();
7248         }
7249         *lookup_info = 0;
7250
7251         if (wc->stage == DROP_REFERENCE) {
7252                 if (wc->refs[level - 1] > 1) {
7253                         if (level == 1 &&
7254                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7255                                 goto skip;
7256
7257                         if (!wc->update_ref ||
7258                             generation <= root->root_key.offset)
7259                                 goto skip;
7260
7261                         btrfs_node_key_to_cpu(path->nodes[level], &key,
7262                                               path->slots[level]);
7263                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
7264                         if (ret < 0)
7265                                 goto skip;
7266
7267                         wc->stage = UPDATE_BACKREF;
7268                         wc->shared_level = level - 1;
7269                 }
7270         } else {
7271                 if (level == 1 &&
7272                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7273                         goto skip;
7274         }
7275
7276         if (!btrfs_buffer_uptodate(next, generation, 0)) {
7277                 btrfs_tree_unlock(next);
7278                 free_extent_buffer(next);
7279                 next = NULL;
7280                 *lookup_info = 1;
7281         }
7282
7283         if (!next) {
7284                 if (reada && level == 1)
7285                         reada_walk_down(trans, root, wc, path);
7286                 next = read_tree_block(root, bytenr, blocksize, generation);
7287                 if (!next || !extent_buffer_uptodate(next)) {
7288                         free_extent_buffer(next);
7289                         return -EIO;
7290                 }
7291                 btrfs_tree_lock(next);
7292                 btrfs_set_lock_blocking(next);
7293         }
7294
7295         level--;
7296         BUG_ON(level != btrfs_header_level(next));
7297         path->nodes[level] = next;
7298         path->slots[level] = 0;
7299         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7300         wc->level = level;
7301         if (wc->level == 1)
7302                 wc->reada_slot = 0;
7303         return 0;
7304 skip:
7305         wc->refs[level - 1] = 0;
7306         wc->flags[level - 1] = 0;
7307         if (wc->stage == DROP_REFERENCE) {
7308                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
7309                         parent = path->nodes[level]->start;
7310                 } else {
7311                         BUG_ON(root->root_key.objectid !=
7312                                btrfs_header_owner(path->nodes[level]));
7313                         parent = 0;
7314                 }
7315
7316                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
7317                                 root->root_key.objectid, level - 1, 0, 0);
7318                 BUG_ON(ret); /* -ENOMEM */
7319         }
7320         btrfs_tree_unlock(next);
7321         free_extent_buffer(next);
7322         *lookup_info = 1;
7323         return 1;
7324 }
7325
7326 /*
7327  * helper to process tree block while walking up the tree.
7328  *
7329  * when wc->stage == DROP_REFERENCE, this function drops
7330  * reference count on the block.
7331  *
7332  * when wc->stage == UPDATE_BACKREF, this function changes
7333  * wc->stage back to DROP_REFERENCE if we changed wc->stage
7334  * to UPDATE_BACKREF previously while processing the block.
7335  *
7336  * NOTE: return value 1 means we should stop walking up.
7337  */
7338 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
7339                                  struct btrfs_root *root,
7340                                  struct btrfs_path *path,
7341                                  struct walk_control *wc)
7342 {
7343         int ret;
7344         int level = wc->level;
7345         struct extent_buffer *eb = path->nodes[level];
7346         u64 parent = 0;
7347
7348         if (wc->stage == UPDATE_BACKREF) {
7349                 BUG_ON(wc->shared_level < level);
7350                 if (level < wc->shared_level)
7351                         goto out;
7352
7353                 ret = find_next_key(path, level + 1, &wc->update_progress);
7354                 if (ret > 0)
7355                         wc->update_ref = 0;
7356
7357                 wc->stage = DROP_REFERENCE;
7358                 wc->shared_level = -1;
7359                 path->slots[level] = 0;
7360
7361                 /*
7362                  * check reference count again if the block isn't locked.
7363                  * we should start walking down the tree again if reference
7364                  * count is one.
7365                  */
7366                 if (!path->locks[level]) {
7367                         BUG_ON(level == 0);
7368                         btrfs_tree_lock(eb);
7369                         btrfs_set_lock_blocking(eb);
7370                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7371
7372                         ret = btrfs_lookup_extent_info(trans, root,
7373                                                        eb->start, level, 1,
7374                                                        &wc->refs[level],
7375                                                        &wc->flags[level]);
7376                         if (ret < 0) {
7377                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7378                                 path->locks[level] = 0;
7379                                 return ret;
7380                         }
7381                         BUG_ON(wc->refs[level] == 0);
7382                         if (wc->refs[level] == 1) {
7383                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7384                                 path->locks[level] = 0;
7385                                 return 1;
7386                         }
7387                 }
7388         }
7389
7390         /* wc->stage == DROP_REFERENCE */
7391         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7392
7393         if (wc->refs[level] == 1) {
7394                 if (level == 0) {
7395                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7396                                 ret = btrfs_dec_ref(trans, root, eb, 1,
7397                                                     wc->for_reloc);
7398                         else
7399                                 ret = btrfs_dec_ref(trans, root, eb, 0,
7400                                                     wc->for_reloc);
7401                         BUG_ON(ret); /* -ENOMEM */
7402                 }
7403                 /* make block locked assertion in clean_tree_block happy */
7404                 if (!path->locks[level] &&
7405                     btrfs_header_generation(eb) == trans->transid) {
7406                         btrfs_tree_lock(eb);
7407                         btrfs_set_lock_blocking(eb);
7408                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7409                 }
7410                 clean_tree_block(trans, root, eb);
7411         }
7412
7413         if (eb == root->node) {
7414                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7415                         parent = eb->start;
7416                 else
7417                         BUG_ON(root->root_key.objectid !=
7418                                btrfs_header_owner(eb));
7419         } else {
7420                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
7421                         parent = path->nodes[level + 1]->start;
7422                 else
7423                         BUG_ON(root->root_key.objectid !=
7424                                btrfs_header_owner(path->nodes[level + 1]));
7425         }
7426
7427         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7428 out:
7429         wc->refs[level] = 0;
7430         wc->flags[level] = 0;
7431         return 0;
7432 }
7433
7434 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
7435                                    struct btrfs_root *root,
7436                                    struct btrfs_path *path,
7437                                    struct walk_control *wc)
7438 {
7439         int level = wc->level;
7440         int lookup_info = 1;
7441         int ret;
7442
7443         while (level >= 0) {
7444                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
7445                 if (ret > 0)
7446                         break;
7447
7448                 if (level == 0)
7449                         break;
7450
7451                 if (path->slots[level] >=
7452                     btrfs_header_nritems(path->nodes[level]))
7453                         break;
7454
7455                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
7456                 if (ret > 0) {
7457                         path->slots[level]++;
7458                         continue;
7459                 } else if (ret < 0)
7460                         return ret;
7461                 level = wc->level;
7462         }
7463         return 0;
7464 }
7465
7466 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7467                                  struct btrfs_root *root,
7468                                  struct btrfs_path *path,
7469                                  struct walk_control *wc, int max_level)
7470 {
7471         int level = wc->level;
7472         int ret;
7473
7474         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
7475         while (level < max_level && path->nodes[level]) {
7476                 wc->level = level;
7477                 if (path->slots[level] + 1 <
7478                     btrfs_header_nritems(path->nodes[level])) {
7479                         path->slots[level]++;
7480                         return 0;
7481                 } else {
7482                         ret = walk_up_proc(trans, root, path, wc);
7483                         if (ret > 0)
7484                                 return 0;
7485
7486                         if (path->locks[level]) {
7487                                 btrfs_tree_unlock_rw(path->nodes[level],
7488                                                      path->locks[level]);
7489                                 path->locks[level] = 0;
7490                         }
7491                         free_extent_buffer(path->nodes[level]);
7492                         path->nodes[level] = NULL;
7493                         level++;
7494                 }
7495         }
7496         return 1;
7497 }
7498
7499 /*
7500  * drop a subvolume tree.
7501  *
7502  * this function traverses the tree freeing any blocks that only
7503  * referenced by the tree.
7504  *
7505  * when a shared tree block is found. this function decreases its
7506  * reference count by one. if update_ref is true, this function
7507  * also make sure backrefs for the shared block and all lower level
7508  * blocks are properly updated.
7509  *
7510  * If called with for_reloc == 0, may exit early with -EAGAIN
7511  */
7512 int btrfs_drop_snapshot(struct btrfs_root *root,
7513                          struct btrfs_block_rsv *block_rsv, int update_ref,
7514                          int for_reloc)
7515 {
7516         struct btrfs_path *path;
7517         struct btrfs_trans_handle *trans;
7518         struct btrfs_root *tree_root = root->fs_info->tree_root;
7519         struct btrfs_root_item *root_item = &root->root_item;
7520         struct walk_control *wc;
7521         struct btrfs_key key;
7522         int err = 0;
7523         int ret;
7524         int level;
7525         bool root_dropped = false;
7526
7527         path = btrfs_alloc_path();
7528         if (!path) {
7529                 err = -ENOMEM;
7530                 goto out;
7531         }
7532
7533         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7534         if (!wc) {
7535                 btrfs_free_path(path);
7536                 err = -ENOMEM;
7537                 goto out;
7538         }
7539
7540         trans = btrfs_start_transaction(tree_root, 0);
7541         if (IS_ERR(trans)) {
7542                 err = PTR_ERR(trans);
7543                 goto out_free;
7544         }
7545
7546         if (block_rsv)
7547                 trans->block_rsv = block_rsv;
7548
7549         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7550                 level = btrfs_header_level(root->node);
7551                 path->nodes[level] = btrfs_lock_root_node(root);
7552                 btrfs_set_lock_blocking(path->nodes[level]);
7553                 path->slots[level] = 0;
7554                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7555                 memset(&wc->update_progress, 0,
7556                        sizeof(wc->update_progress));
7557         } else {
7558                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7559                 memcpy(&wc->update_progress, &key,
7560                        sizeof(wc->update_progress));
7561
7562                 level = root_item->drop_level;
7563                 BUG_ON(level == 0);
7564                 path->lowest_level = level;
7565                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7566                 path->lowest_level = 0;
7567                 if (ret < 0) {
7568                         err = ret;
7569                         goto out_end_trans;
7570                 }
7571                 WARN_ON(ret > 0);
7572
7573                 /*
7574                  * unlock our path, this is safe because only this
7575                  * function is allowed to delete this snapshot
7576                  */
7577                 btrfs_unlock_up_safe(path, 0);
7578
7579                 level = btrfs_header_level(root->node);
7580                 while (1) {
7581                         btrfs_tree_lock(path->nodes[level]);
7582                         btrfs_set_lock_blocking(path->nodes[level]);
7583                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7584
7585                         ret = btrfs_lookup_extent_info(trans, root,
7586                                                 path->nodes[level]->start,
7587                                                 level, 1, &wc->refs[level],
7588                                                 &wc->flags[level]);
7589                         if (ret < 0) {
7590                                 err = ret;
7591                                 goto out_end_trans;
7592                         }
7593                         BUG_ON(wc->refs[level] == 0);
7594
7595                         if (level == root_item->drop_level)
7596                                 break;
7597
7598                         btrfs_tree_unlock(path->nodes[level]);
7599                         path->locks[level] = 0;
7600                         WARN_ON(wc->refs[level] != 1);
7601                         level--;
7602                 }
7603         }
7604
7605         wc->level = level;
7606         wc->shared_level = -1;
7607         wc->stage = DROP_REFERENCE;
7608         wc->update_ref = update_ref;
7609         wc->keep_locks = 0;
7610         wc->for_reloc = for_reloc;
7611         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7612
7613         while (1) {
7614
7615                 ret = walk_down_tree(trans, root, path, wc);
7616                 if (ret < 0) {
7617                         err = ret;
7618                         break;
7619                 }
7620
7621                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7622                 if (ret < 0) {
7623                         err = ret;
7624                         break;
7625                 }
7626
7627                 if (ret > 0) {
7628                         BUG_ON(wc->stage != DROP_REFERENCE);
7629                         break;
7630                 }
7631
7632                 if (wc->stage == DROP_REFERENCE) {
7633                         level = wc->level;
7634                         btrfs_node_key(path->nodes[level],
7635                                        &root_item->drop_progress,
7636                                        path->slots[level]);
7637                         root_item->drop_level = level;
7638                 }
7639
7640                 BUG_ON(wc->level == 0);
7641                 if (btrfs_should_end_transaction(trans, tree_root) ||
7642                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7643                         ret = btrfs_update_root(trans, tree_root,
7644                                                 &root->root_key,
7645                                                 root_item);
7646                         if (ret) {
7647                                 btrfs_abort_transaction(trans, tree_root, ret);
7648                                 err = ret;
7649                                 goto out_end_trans;
7650                         }
7651
7652                         btrfs_end_transaction_throttle(trans, tree_root);
7653                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
7654                                 pr_debug("btrfs: drop snapshot early exit\n");
7655                                 err = -EAGAIN;
7656                                 goto out_free;
7657                         }
7658
7659                         trans = btrfs_start_transaction(tree_root, 0);
7660                         if (IS_ERR(trans)) {
7661                                 err = PTR_ERR(trans);
7662                                 goto out_free;
7663                         }
7664                         if (block_rsv)
7665                                 trans->block_rsv = block_rsv;
7666                 }
7667         }
7668         btrfs_release_path(path);
7669         if (err)
7670                 goto out_end_trans;
7671
7672         ret = btrfs_del_root(trans, tree_root, &root->root_key);
7673         if (ret) {
7674                 btrfs_abort_transaction(trans, tree_root, ret);
7675                 goto out_end_trans;
7676         }
7677
7678         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7679                 ret = btrfs_find_root(tree_root, &root->root_key, path,
7680                                       NULL, NULL);
7681                 if (ret < 0) {
7682                         btrfs_abort_transaction(trans, tree_root, ret);
7683                         err = ret;
7684                         goto out_end_trans;
7685                 } else if (ret > 0) {
7686                         /* if we fail to delete the orphan item this time
7687                          * around, it'll get picked up the next time.
7688                          *
7689                          * The most common failure here is just -ENOENT.
7690                          */
7691                         btrfs_del_orphan_item(trans, tree_root,
7692                                               root->root_key.objectid);
7693                 }
7694         }
7695
7696         if (root->in_radix) {
7697                 btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7698         } else {
7699                 free_extent_buffer(root->node);
7700                 free_extent_buffer(root->commit_root);
7701                 btrfs_put_fs_root(root);
7702         }
7703         root_dropped = true;
7704 out_end_trans:
7705         btrfs_end_transaction_throttle(trans, tree_root);
7706 out_free:
7707         kfree(wc);
7708         btrfs_free_path(path);
7709 out:
7710         /*
7711          * So if we need to stop dropping the snapshot for whatever reason we
7712          * need to make sure to add it back to the dead root list so that we
7713          * keep trying to do the work later.  This also cleans up roots if we
7714          * don't have it in the radix (like when we recover after a power fail
7715          * or unmount) so we don't leak memory.
7716          */
7717         if (!for_reloc && root_dropped == false)
7718                 btrfs_add_dead_root(root);
7719         if (err)
7720                 btrfs_std_error(root->fs_info, err);
7721         return err;
7722 }
7723
7724 /*
7725  * drop subtree rooted at tree block 'node'.
7726  *
7727  * NOTE: this function will unlock and release tree block 'node'
7728  * only used by relocation code
7729  */
7730 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
7731                         struct btrfs_root *root,
7732                         struct extent_buffer *node,
7733                         struct extent_buffer *parent)
7734 {
7735         struct btrfs_path *path;
7736         struct walk_control *wc;
7737         int level;
7738         int parent_level;
7739         int ret = 0;
7740         int wret;
7741
7742         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7743
7744         path = btrfs_alloc_path();
7745         if (!path)
7746                 return -ENOMEM;
7747
7748         wc = kzalloc(sizeof(*wc), GFP_NOFS);
7749         if (!wc) {
7750                 btrfs_free_path(path);
7751                 return -ENOMEM;
7752         }
7753
7754         btrfs_assert_tree_locked(parent);
7755         parent_level = btrfs_header_level(parent);
7756         extent_buffer_get(parent);
7757         path->nodes[parent_level] = parent;
7758         path->slots[parent_level] = btrfs_header_nritems(parent);
7759
7760         btrfs_assert_tree_locked(node);
7761         level = btrfs_header_level(node);
7762         path->nodes[level] = node;
7763         path->slots[level] = 0;
7764         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7765
7766         wc->refs[parent_level] = 1;
7767         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
7768         wc->level = level;
7769         wc->shared_level = -1;
7770         wc->stage = DROP_REFERENCE;
7771         wc->update_ref = 0;
7772         wc->keep_locks = 1;
7773         wc->for_reloc = 1;
7774         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7775
7776         while (1) {
7777                 wret = walk_down_tree(trans, root, path, wc);
7778                 if (wret < 0) {
7779                         ret = wret;
7780                         break;
7781                 }
7782
7783                 wret = walk_up_tree(trans, root, path, wc, parent_level);
7784                 if (wret < 0)
7785                         ret = wret;
7786                 if (wret != 0)
7787                         break;
7788         }
7789
7790         kfree(wc);
7791         btrfs_free_path(path);
7792         return ret;
7793 }
7794
7795 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7796 {
7797         u64 num_devices;
7798         u64 stripped;
7799
7800         /*
7801          * if restripe for this chunk_type is on pick target profile and
7802          * return, otherwise do the usual balance
7803          */
7804         stripped = get_restripe_target(root->fs_info, flags);
7805         if (stripped)
7806                 return extended_to_chunk(stripped);
7807
7808         /*
7809          * we add in the count of missing devices because we want
7810          * to make sure that any RAID levels on a degraded FS
7811          * continue to be honored.
7812          */
7813         num_devices = root->fs_info->fs_devices->rw_devices +
7814                 root->fs_info->fs_devices->missing_devices;
7815
7816         stripped = BTRFS_BLOCK_GROUP_RAID0 |
7817                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7818                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7819
7820         if (num_devices == 1) {
7821                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7822                 stripped = flags & ~stripped;
7823
7824                 /* turn raid0 into single device chunks */
7825                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7826                         return stripped;
7827
7828                 /* turn mirroring into duplication */
7829                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7830                              BTRFS_BLOCK_GROUP_RAID10))
7831                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7832         } else {
7833                 /* they already had raid on here, just return */
7834                 if (flags & stripped)
7835                         return flags;
7836
7837                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7838                 stripped = flags & ~stripped;
7839
7840                 /* switch duplicated blocks with raid1 */
7841                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7842                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7843
7844                 /* this is drive concat, leave it alone */
7845         }
7846
7847         return flags;
7848 }
7849
7850 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
7851 {
7852         struct btrfs_space_info *sinfo = cache->space_info;
7853         u64 num_bytes;
7854         u64 min_allocable_bytes;
7855         int ret = -ENOSPC;
7856
7857
7858         /*
7859          * We need some metadata space and system metadata space for
7860          * allocating chunks in some corner cases until we force to set
7861          * it to be readonly.
7862          */
7863         if ((sinfo->flags &
7864              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7865             !force)
7866                 min_allocable_bytes = 1 * 1024 * 1024;
7867         else
7868                 min_allocable_bytes = 0;
7869
7870         spin_lock(&sinfo->lock);
7871         spin_lock(&cache->lock);
7872
7873         if (cache->ro) {
7874                 ret = 0;
7875                 goto out;
7876         }
7877
7878         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7879                     cache->bytes_super - btrfs_block_group_used(&cache->item);
7880
7881         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7882             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7883             min_allocable_bytes <= sinfo->total_bytes) {
7884                 sinfo->bytes_readonly += num_bytes;
7885                 cache->ro = 1;
7886                 ret = 0;
7887         }
7888 out:
7889         spin_unlock(&cache->lock);
7890         spin_unlock(&sinfo->lock);
7891         return ret;
7892 }
7893
7894 int btrfs_set_block_group_ro(struct btrfs_root *root,
7895                              struct btrfs_block_group_cache *cache)
7896
7897 {
7898         struct btrfs_trans_handle *trans;
7899         u64 alloc_flags;
7900         int ret;
7901
7902         BUG_ON(cache->ro);
7903
7904         trans = btrfs_join_transaction(root);
7905         if (IS_ERR(trans))
7906                 return PTR_ERR(trans);
7907
7908         alloc_flags = update_block_group_flags(root, cache->flags);
7909         if (alloc_flags != cache->flags) {
7910                 ret = do_chunk_alloc(trans, root, alloc_flags,
7911                                      CHUNK_ALLOC_FORCE);
7912                 if (ret < 0)
7913                         goto out;
7914         }
7915
7916         ret = set_block_group_ro(cache, 0);
7917         if (!ret)
7918                 goto out;
7919         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7920         ret = do_chunk_alloc(trans, root, alloc_flags,
7921                              CHUNK_ALLOC_FORCE);
7922         if (ret < 0)
7923                 goto out;
7924         ret = set_block_group_ro(cache, 0);
7925 out:
7926         btrfs_end_transaction(trans, root);
7927         return ret;
7928 }
7929
7930 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7931                             struct btrfs_root *root, u64 type)
7932 {
7933         u64 alloc_flags = get_alloc_profile(root, type);
7934         return do_chunk_alloc(trans, root, alloc_flags,
7935                               CHUNK_ALLOC_FORCE);
7936 }
7937
7938 /*
7939  * helper to account the unused space of all the readonly block group in the
7940  * list. takes mirrors into account.
7941  */
7942 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7943 {
7944         struct btrfs_block_group_cache *block_group;
7945         u64 free_bytes = 0;
7946         int factor;
7947
7948         list_for_each_entry(block_group, groups_list, list) {
7949                 spin_lock(&block_group->lock);
7950
7951                 if (!block_group->ro) {
7952                         spin_unlock(&block_group->lock);
7953                         continue;
7954                 }
7955
7956                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7957                                           BTRFS_BLOCK_GROUP_RAID10 |
7958                                           BTRFS_BLOCK_GROUP_DUP))
7959                         factor = 2;
7960                 else
7961                         factor = 1;
7962
7963                 free_bytes += (block_group->key.offset -
7964                                btrfs_block_group_used(&block_group->item)) *
7965                                factor;
7966
7967                 spin_unlock(&block_group->lock);
7968         }
7969
7970         return free_bytes;
7971 }
7972
7973 /*
7974  * helper to account the unused space of all the readonly block group in the
7975  * space_info. takes mirrors into account.
7976  */
7977 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7978 {
7979         int i;
7980         u64 free_bytes = 0;
7981
7982         spin_lock(&sinfo->lock);
7983
7984         for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7985                 if (!list_empty(&sinfo->block_groups[i]))
7986                         free_bytes += __btrfs_get_ro_block_group_free_space(
7987                                                 &sinfo->block_groups[i]);
7988
7989         spin_unlock(&sinfo->lock);
7990
7991         return free_bytes;
7992 }
7993
7994 void btrfs_set_block_group_rw(struct btrfs_root *root,
7995                               struct btrfs_block_group_cache *cache)
7996 {
7997         struct btrfs_space_info *sinfo = cache->space_info;
7998         u64 num_bytes;
7999
8000         BUG_ON(!cache->ro);
8001
8002         spin_lock(&sinfo->lock);
8003         spin_lock(&cache->lock);
8004         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8005                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8006         sinfo->bytes_readonly -= num_bytes;
8007         cache->ro = 0;
8008         spin_unlock(&cache->lock);
8009         spin_unlock(&sinfo->lock);
8010 }
8011
8012 /*
8013  * checks to see if its even possible to relocate this block group.
8014  *
8015  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8016  * ok to go ahead and try.
8017  */
8018 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8019 {
8020         struct btrfs_block_group_cache *block_group;
8021         struct btrfs_space_info *space_info;
8022         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8023         struct btrfs_device *device;
8024         struct btrfs_trans_handle *trans;
8025         u64 min_free;
8026         u64 dev_min = 1;
8027         u64 dev_nr = 0;
8028         u64 target;
8029         int index;
8030         int full = 0;
8031         int ret = 0;
8032
8033         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8034
8035         /* odd, couldn't find the block group, leave it alone */
8036         if (!block_group)
8037                 return -1;
8038
8039         min_free = btrfs_block_group_used(&block_group->item);
8040
8041         /* no bytes used, we're good */
8042         if (!min_free)
8043                 goto out;
8044
8045         space_info = block_group->space_info;
8046         spin_lock(&space_info->lock);
8047
8048         full = space_info->full;
8049
8050         /*
8051          * if this is the last block group we have in this space, we can't
8052          * relocate it unless we're able to allocate a new chunk below.
8053          *
8054          * Otherwise, we need to make sure we have room in the space to handle
8055          * all of the extents from this block group.  If we can, we're good
8056          */
8057         if ((space_info->total_bytes != block_group->key.offset) &&
8058             (space_info->bytes_used + space_info->bytes_reserved +
8059              space_info->bytes_pinned + space_info->bytes_readonly +
8060              min_free < space_info->total_bytes)) {
8061                 spin_unlock(&space_info->lock);
8062                 goto out;
8063         }
8064         spin_unlock(&space_info->lock);
8065
8066         /*
8067          * ok we don't have enough space, but maybe we have free space on our
8068          * devices to allocate new chunks for relocation, so loop through our
8069          * alloc devices and guess if we have enough space.  if this block
8070          * group is going to be restriped, run checks against the target
8071          * profile instead of the current one.
8072          */
8073         ret = -1;
8074
8075         /*
8076          * index:
8077          *      0: raid10
8078          *      1: raid1
8079          *      2: dup
8080          *      3: raid0
8081          *      4: single
8082          */
8083         target = get_restripe_target(root->fs_info, block_group->flags);
8084         if (target) {
8085                 index = __get_raid_index(extended_to_chunk(target));
8086         } else {
8087                 /*
8088                  * this is just a balance, so if we were marked as full
8089                  * we know there is no space for a new chunk
8090                  */
8091                 if (full)
8092                         goto out;
8093
8094                 index = get_block_group_index(block_group);
8095         }
8096
8097         if (index == BTRFS_RAID_RAID10) {
8098                 dev_min = 4;
8099                 /* Divide by 2 */
8100                 min_free >>= 1;
8101         } else if (index == BTRFS_RAID_RAID1) {
8102                 dev_min = 2;
8103         } else if (index == BTRFS_RAID_DUP) {
8104                 /* Multiply by 2 */
8105                 min_free <<= 1;
8106         } else if (index == BTRFS_RAID_RAID0) {
8107                 dev_min = fs_devices->rw_devices;
8108                 do_div(min_free, dev_min);
8109         }
8110
8111         /* We need to do this so that we can look at pending chunks */
8112         trans = btrfs_join_transaction(root);
8113         if (IS_ERR(trans)) {
8114                 ret = PTR_ERR(trans);
8115                 goto out;
8116         }
8117
8118         mutex_lock(&root->fs_info->chunk_mutex);
8119         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8120                 u64 dev_offset;
8121
8122                 /*
8123                  * check to make sure we can actually find a chunk with enough
8124                  * space to fit our block group in.
8125                  */
8126                 if (device->total_bytes > device->bytes_used + min_free &&
8127                     !device->is_tgtdev_for_dev_replace) {
8128                         ret = find_free_dev_extent(trans, device, min_free,
8129                                                    &dev_offset, NULL);
8130                         if (!ret)
8131                                 dev_nr++;
8132
8133                         if (dev_nr >= dev_min)
8134                                 break;
8135
8136                         ret = -1;
8137                 }
8138         }
8139         mutex_unlock(&root->fs_info->chunk_mutex);
8140         btrfs_end_transaction(trans, root);
8141 out:
8142         btrfs_put_block_group(block_group);
8143         return ret;
8144 }
8145
8146 static int find_first_block_group(struct btrfs_root *root,
8147                 struct btrfs_path *path, struct btrfs_key *key)
8148 {
8149         int ret = 0;
8150         struct btrfs_key found_key;
8151         struct extent_buffer *leaf;
8152         int slot;
8153
8154         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8155         if (ret < 0)
8156                 goto out;
8157
8158         while (1) {
8159                 slot = path->slots[0];
8160                 leaf = path->nodes[0];
8161                 if (slot >= btrfs_header_nritems(leaf)) {
8162                         ret = btrfs_next_leaf(root, path);
8163                         if (ret == 0)
8164                                 continue;
8165                         if (ret < 0)
8166                                 goto out;
8167                         break;
8168                 }
8169                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8170
8171                 if (found_key.objectid >= key->objectid &&
8172                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8173                         ret = 0;
8174                         goto out;
8175                 }
8176                 path->slots[0]++;
8177         }
8178 out:
8179         return ret;
8180 }
8181
8182 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8183 {
8184         struct btrfs_block_group_cache *block_group;
8185         u64 last = 0;
8186
8187         while (1) {
8188                 struct inode *inode;
8189
8190                 block_group = btrfs_lookup_first_block_group(info, last);
8191                 while (block_group) {
8192                         spin_lock(&block_group->lock);
8193                         if (block_group->iref)
8194                                 break;
8195                         spin_unlock(&block_group->lock);
8196                         block_group = next_block_group(info->tree_root,
8197                                                        block_group);
8198                 }
8199                 if (!block_group) {
8200                         if (last == 0)
8201                                 break;
8202                         last = 0;
8203                         continue;
8204                 }
8205
8206                 inode = block_group->inode;
8207                 block_group->iref = 0;
8208                 block_group->inode = NULL;
8209                 spin_unlock(&block_group->lock);
8210                 iput(inode);
8211                 last = block_group->key.objectid + block_group->key.offset;
8212                 btrfs_put_block_group(block_group);
8213         }
8214 }
8215
8216 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8217 {
8218         struct btrfs_block_group_cache *block_group;
8219         struct btrfs_space_info *space_info;
8220         struct btrfs_caching_control *caching_ctl;
8221         struct rb_node *n;
8222
8223         down_write(&info->extent_commit_sem);
8224         while (!list_empty(&info->caching_block_groups)) {
8225                 caching_ctl = list_entry(info->caching_block_groups.next,
8226                                          struct btrfs_caching_control, list);
8227                 list_del(&caching_ctl->list);
8228                 put_caching_control(caching_ctl);
8229         }
8230         up_write(&info->extent_commit_sem);
8231
8232         spin_lock(&info->block_group_cache_lock);
8233         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8234                 block_group = rb_entry(n, struct btrfs_block_group_cache,
8235                                        cache_node);
8236                 rb_erase(&block_group->cache_node,
8237                          &info->block_group_cache_tree);
8238                 spin_unlock(&info->block_group_cache_lock);
8239
8240                 down_write(&block_group->space_info->groups_sem);
8241                 list_del(&block_group->list);
8242                 up_write(&block_group->space_info->groups_sem);
8243
8244                 if (block_group->cached == BTRFS_CACHE_STARTED)
8245                         wait_block_group_cache_done(block_group);
8246
8247                 /*
8248                  * We haven't cached this block group, which means we could
8249                  * possibly have excluded extents on this block group.
8250                  */
8251                 if (block_group->cached == BTRFS_CACHE_NO ||
8252                     block_group->cached == BTRFS_CACHE_ERROR)
8253                         free_excluded_extents(info->extent_root, block_group);
8254
8255                 btrfs_remove_free_space_cache(block_group);
8256                 btrfs_put_block_group(block_group);
8257
8258                 spin_lock(&info->block_group_cache_lock);
8259         }
8260         spin_unlock(&info->block_group_cache_lock);
8261
8262         /* now that all the block groups are freed, go through and
8263          * free all the space_info structs.  This is only called during
8264          * the final stages of unmount, and so we know nobody is
8265          * using them.  We call synchronize_rcu() once before we start,
8266          * just to be on the safe side.
8267          */
8268         synchronize_rcu();
8269
8270         release_global_block_rsv(info);
8271
8272         while(!list_empty(&info->space_info)) {
8273                 space_info = list_entry(info->space_info.next,
8274                                         struct btrfs_space_info,
8275                                         list);
8276                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8277                         if (space_info->bytes_pinned > 0 ||
8278                             space_info->bytes_reserved > 0 ||
8279                             space_info->bytes_may_use > 0) {
8280                                 WARN_ON(1);
8281                                 dump_space_info(space_info, 0, 0);
8282                         }
8283                 }
8284                 percpu_counter_destroy(&space_info->total_bytes_pinned);
8285                 list_del(&space_info->list);
8286                 kfree(space_info);
8287         }
8288         return 0;
8289 }
8290
8291 static void __link_block_group(struct btrfs_space_info *space_info,
8292                                struct btrfs_block_group_cache *cache)
8293 {
8294         int index = get_block_group_index(cache);
8295
8296         down_write(&space_info->groups_sem);
8297         list_add_tail(&cache->list, &space_info->block_groups[index]);
8298         up_write(&space_info->groups_sem);
8299 }
8300
8301 int btrfs_read_block_groups(struct btrfs_root *root)
8302 {
8303         struct btrfs_path *path;
8304         int ret;
8305         struct btrfs_block_group_cache *cache;
8306         struct btrfs_fs_info *info = root->fs_info;
8307         struct btrfs_space_info *space_info;
8308         struct btrfs_key key;
8309         struct btrfs_key found_key;
8310         struct extent_buffer *leaf;
8311         int need_clear = 0;
8312         u64 cache_gen;
8313
8314         root = info->extent_root;
8315         key.objectid = 0;
8316         key.offset = 0;
8317         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8318         path = btrfs_alloc_path();
8319         if (!path)
8320                 return -ENOMEM;
8321         path->reada = 1;
8322
8323         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8324         if (btrfs_test_opt(root, SPACE_CACHE) &&
8325             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8326                 need_clear = 1;
8327         if (btrfs_test_opt(root, CLEAR_CACHE))
8328                 need_clear = 1;
8329
8330         while (1) {
8331                 ret = find_first_block_group(root, path, &key);
8332                 if (ret > 0)
8333                         break;
8334                 if (ret != 0)
8335                         goto error;
8336                 leaf = path->nodes[0];
8337                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8338                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8339                 if (!cache) {
8340                         ret = -ENOMEM;
8341                         goto error;
8342                 }
8343                 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8344                                                 GFP_NOFS);
8345                 if (!cache->free_space_ctl) {
8346                         kfree(cache);
8347                         ret = -ENOMEM;
8348                         goto error;
8349                 }
8350
8351                 atomic_set(&cache->count, 1);
8352                 spin_lock_init(&cache->lock);
8353                 cache->fs_info = info;
8354                 INIT_LIST_HEAD(&cache->list);
8355                 INIT_LIST_HEAD(&cache->cluster_list);
8356
8357                 if (need_clear) {
8358                         /*
8359                          * When we mount with old space cache, we need to
8360                          * set BTRFS_DC_CLEAR and set dirty flag.
8361                          *
8362                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
8363                          *    truncate the old free space cache inode and
8364                          *    setup a new one.
8365                          * b) Setting 'dirty flag' makes sure that we flush
8366                          *    the new space cache info onto disk.
8367                          */
8368                         cache->disk_cache_state = BTRFS_DC_CLEAR;
8369                         if (btrfs_test_opt(root, SPACE_CACHE))
8370                                 cache->dirty = 1;
8371                 }
8372
8373                 read_extent_buffer(leaf, &cache->item,
8374                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
8375                                    sizeof(cache->item));
8376                 memcpy(&cache->key, &found_key, sizeof(found_key));
8377
8378                 key.objectid = found_key.objectid + found_key.offset;
8379                 btrfs_release_path(path);
8380                 cache->flags = btrfs_block_group_flags(&cache->item);
8381                 cache->sectorsize = root->sectorsize;
8382                 cache->full_stripe_len = btrfs_full_stripe_len(root,
8383                                                &root->fs_info->mapping_tree,
8384                                                found_key.objectid);
8385                 btrfs_init_free_space_ctl(cache);
8386
8387                 /*
8388                  * We need to exclude the super stripes now so that the space
8389                  * info has super bytes accounted for, otherwise we'll think
8390                  * we have more space than we actually do.
8391                  */
8392                 ret = exclude_super_stripes(root, cache);
8393                 if (ret) {
8394                         /*
8395                          * We may have excluded something, so call this just in
8396                          * case.
8397                          */
8398                         free_excluded_extents(root, cache);
8399                         kfree(cache->free_space_ctl);
8400                         kfree(cache);
8401                         goto error;
8402                 }
8403
8404                 /*
8405                  * check for two cases, either we are full, and therefore
8406                  * don't need to bother with the caching work since we won't
8407                  * find any space, or we are empty, and we can just add all
8408                  * the space in and be done with it.  This saves us _alot_ of
8409                  * time, particularly in the full case.
8410                  */
8411                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8412                         cache->last_byte_to_unpin = (u64)-1;
8413                         cache->cached = BTRFS_CACHE_FINISHED;
8414                         free_excluded_extents(root, cache);
8415                 } else if (btrfs_block_group_used(&cache->item) == 0) {
8416                         cache->last_byte_to_unpin = (u64)-1;
8417                         cache->cached = BTRFS_CACHE_FINISHED;
8418                         add_new_free_space(cache, root->fs_info,
8419                                            found_key.objectid,
8420                                            found_key.objectid +
8421                                            found_key.offset);
8422                         free_excluded_extents(root, cache);
8423                 }
8424
8425                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8426                 if (ret) {
8427                         btrfs_remove_free_space_cache(cache);
8428                         btrfs_put_block_group(cache);
8429                         goto error;
8430                 }
8431
8432                 ret = update_space_info(info, cache->flags, found_key.offset,
8433                                         btrfs_block_group_used(&cache->item),
8434                                         &space_info);
8435                 if (ret) {
8436                         btrfs_remove_free_space_cache(cache);
8437                         spin_lock(&info->block_group_cache_lock);
8438                         rb_erase(&cache->cache_node,
8439                                  &info->block_group_cache_tree);
8440                         spin_unlock(&info->block_group_cache_lock);
8441                         btrfs_put_block_group(cache);
8442                         goto error;
8443                 }
8444
8445                 cache->space_info = space_info;
8446                 spin_lock(&cache->space_info->lock);
8447                 cache->space_info->bytes_readonly += cache->bytes_super;
8448                 spin_unlock(&cache->space_info->lock);
8449
8450                 __link_block_group(space_info, cache);
8451
8452                 set_avail_alloc_bits(root->fs_info, cache->flags);
8453                 if (btrfs_chunk_readonly(root, cache->key.objectid))
8454                         set_block_group_ro(cache, 1);
8455         }
8456
8457         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8458                 if (!(get_alloc_profile(root, space_info->flags) &
8459                       (BTRFS_BLOCK_GROUP_RAID10 |
8460                        BTRFS_BLOCK_GROUP_RAID1 |
8461                        BTRFS_BLOCK_GROUP_RAID5 |
8462                        BTRFS_BLOCK_GROUP_RAID6 |
8463                        BTRFS_BLOCK_GROUP_DUP)))
8464                         continue;
8465                 /*
8466                  * avoid allocating from un-mirrored block group if there are
8467                  * mirrored block groups.
8468                  */
8469                 list_for_each_entry(cache,
8470                                 &space_info->block_groups[BTRFS_RAID_RAID0],
8471                                 list)
8472                         set_block_group_ro(cache, 1);
8473                 list_for_each_entry(cache,
8474                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
8475                                 list)
8476                         set_block_group_ro(cache, 1);
8477         }
8478
8479         init_global_block_rsv(info);
8480         ret = 0;
8481 error:
8482         btrfs_free_path(path);
8483         return ret;
8484 }
8485
8486 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
8487                                        struct btrfs_root *root)
8488 {
8489         struct btrfs_block_group_cache *block_group, *tmp;
8490         struct btrfs_root *extent_root = root->fs_info->extent_root;
8491         struct btrfs_block_group_item item;
8492         struct btrfs_key key;
8493         int ret = 0;
8494
8495         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
8496                                  new_bg_list) {
8497                 list_del_init(&block_group->new_bg_list);
8498
8499                 if (ret)
8500                         continue;
8501
8502                 spin_lock(&block_group->lock);
8503                 memcpy(&item, &block_group->item, sizeof(item));
8504                 memcpy(&key, &block_group->key, sizeof(key));
8505                 spin_unlock(&block_group->lock);
8506
8507                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
8508                                         sizeof(item));
8509                 if (ret)
8510                         btrfs_abort_transaction(trans, extent_root, ret);
8511                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
8512                                                key.objectid, key.offset);
8513                 if (ret)
8514                         btrfs_abort_transaction(trans, extent_root, ret);
8515         }
8516 }
8517
8518 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8519                            struct btrfs_root *root, u64 bytes_used,
8520                            u64 type, u64 chunk_objectid, u64 chunk_offset,
8521                            u64 size)
8522 {
8523         int ret;
8524         struct btrfs_root *extent_root;
8525         struct btrfs_block_group_cache *cache;
8526
8527         extent_root = root->fs_info->extent_root;
8528
8529         root->fs_info->last_trans_log_full_commit = trans->transid;
8530
8531         cache = kzalloc(sizeof(*cache), GFP_NOFS);
8532         if (!cache)
8533                 return -ENOMEM;
8534         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
8535                                         GFP_NOFS);
8536         if (!cache->free_space_ctl) {
8537                 kfree(cache);
8538                 return -ENOMEM;
8539         }
8540
8541         cache->key.objectid = chunk_offset;
8542         cache->key.offset = size;
8543         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8544         cache->sectorsize = root->sectorsize;
8545         cache->fs_info = root->fs_info;
8546         cache->full_stripe_len = btrfs_full_stripe_len(root,
8547                                                &root->fs_info->mapping_tree,
8548                                                chunk_offset);
8549
8550         atomic_set(&cache->count, 1);
8551         spin_lock_init(&cache->lock);
8552         INIT_LIST_HEAD(&cache->list);
8553         INIT_LIST_HEAD(&cache->cluster_list);
8554         INIT_LIST_HEAD(&cache->new_bg_list);
8555
8556         btrfs_init_free_space_ctl(cache);
8557
8558         btrfs_set_block_group_used(&cache->item, bytes_used);
8559         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8560         cache->flags = type;
8561         btrfs_set_block_group_flags(&cache->item, type);
8562
8563         cache->last_byte_to_unpin = (u64)-1;
8564         cache->cached = BTRFS_CACHE_FINISHED;
8565         ret = exclude_super_stripes(root, cache);
8566         if (ret) {
8567                 /*
8568                  * We may have excluded something, so call this just in
8569                  * case.
8570                  */
8571                 free_excluded_extents(root, cache);
8572                 kfree(cache->free_space_ctl);
8573                 kfree(cache);
8574                 return ret;
8575         }
8576
8577         add_new_free_space(cache, root->fs_info, chunk_offset,
8578                            chunk_offset + size);
8579
8580         free_excluded_extents(root, cache);
8581
8582         ret = btrfs_add_block_group_cache(root->fs_info, cache);
8583         if (ret) {
8584                 btrfs_remove_free_space_cache(cache);
8585                 btrfs_put_block_group(cache);
8586                 return ret;
8587         }
8588
8589         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8590                                 &cache->space_info);
8591         if (ret) {
8592                 btrfs_remove_free_space_cache(cache);
8593                 spin_lock(&root->fs_info->block_group_cache_lock);
8594                 rb_erase(&cache->cache_node,
8595                          &root->fs_info->block_group_cache_tree);
8596                 spin_unlock(&root->fs_info->block_group_cache_lock);
8597                 btrfs_put_block_group(cache);
8598                 return ret;
8599         }
8600         update_global_block_rsv(root->fs_info);
8601
8602         spin_lock(&cache->space_info->lock);
8603         cache->space_info->bytes_readonly += cache->bytes_super;
8604         spin_unlock(&cache->space_info->lock);
8605
8606         __link_block_group(cache->space_info, cache);
8607
8608         list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8609
8610         set_avail_alloc_bits(extent_root->fs_info, type);
8611
8612         return 0;
8613 }
8614
8615 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
8616 {
8617         u64 extra_flags = chunk_to_extended(flags) &
8618                                 BTRFS_EXTENDED_PROFILE_MASK;
8619
8620         write_seqlock(&fs_info->profiles_lock);
8621         if (flags & BTRFS_BLOCK_GROUP_DATA)
8622                 fs_info->avail_data_alloc_bits &= ~extra_flags;
8623         if (flags & BTRFS_BLOCK_GROUP_METADATA)
8624                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
8625         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
8626                 fs_info->avail_system_alloc_bits &= ~extra_flags;
8627         write_sequnlock(&fs_info->profiles_lock);
8628 }
8629
8630 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8631                              struct btrfs_root *root, u64 group_start)
8632 {
8633         struct btrfs_path *path;
8634         struct btrfs_block_group_cache *block_group;
8635         struct btrfs_free_cluster *cluster;
8636         struct btrfs_root *tree_root = root->fs_info->tree_root;
8637         struct btrfs_key key;
8638         struct inode *inode;
8639         int ret;
8640         int index;
8641         int factor;
8642
8643         root = root->fs_info->extent_root;
8644
8645         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8646         BUG_ON(!block_group);
8647         BUG_ON(!block_group->ro);
8648
8649         /*
8650          * Free the reserved super bytes from this block group before
8651          * remove it.
8652          */
8653         free_excluded_extents(root, block_group);
8654
8655         memcpy(&key, &block_group->key, sizeof(key));
8656         index = get_block_group_index(block_group);
8657         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8658                                   BTRFS_BLOCK_GROUP_RAID1 |
8659                                   BTRFS_BLOCK_GROUP_RAID10))
8660                 factor = 2;
8661         else
8662                 factor = 1;
8663
8664         /* make sure this block group isn't part of an allocation cluster */
8665         cluster = &root->fs_info->data_alloc_cluster;
8666         spin_lock(&cluster->refill_lock);
8667         btrfs_return_cluster_to_free_space(block_group, cluster);
8668         spin_unlock(&cluster->refill_lock);
8669
8670         /*
8671          * make sure this block group isn't part of a metadata
8672          * allocation cluster
8673          */
8674         cluster = &root->fs_info->meta_alloc_cluster;
8675         spin_lock(&cluster->refill_lock);
8676         btrfs_return_cluster_to_free_space(block_group, cluster);
8677         spin_unlock(&cluster->refill_lock);
8678
8679         path = btrfs_alloc_path();
8680         if (!path) {
8681                 ret = -ENOMEM;
8682                 goto out;
8683         }
8684
8685         inode = lookup_free_space_inode(tree_root, block_group, path);
8686         if (!IS_ERR(inode)) {
8687                 ret = btrfs_orphan_add(trans, inode);
8688                 if (ret) {
8689                         btrfs_add_delayed_iput(inode);
8690                         goto out;
8691                 }
8692                 clear_nlink(inode);
8693                 /* One for the block groups ref */
8694                 spin_lock(&block_group->lock);
8695                 if (block_group->iref) {
8696                         block_group->iref = 0;
8697                         block_group->inode = NULL;
8698                         spin_unlock(&block_group->lock);
8699                         iput(inode);
8700                 } else {
8701                         spin_unlock(&block_group->lock);
8702                 }
8703                 /* One for our lookup ref */
8704                 btrfs_add_delayed_iput(inode);
8705         }
8706
8707         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8708         key.offset = block_group->key.objectid;
8709         key.type = 0;
8710
8711         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8712         if (ret < 0)
8713                 goto out;
8714         if (ret > 0)
8715                 btrfs_release_path(path);
8716         if (ret == 0) {
8717                 ret = btrfs_del_item(trans, tree_root, path);
8718                 if (ret)
8719                         goto out;
8720                 btrfs_release_path(path);
8721         }
8722
8723         spin_lock(&root->fs_info->block_group_cache_lock);
8724         rb_erase(&block_group->cache_node,
8725                  &root->fs_info->block_group_cache_tree);
8726
8727         if (root->fs_info->first_logical_byte == block_group->key.objectid)
8728                 root->fs_info->first_logical_byte = (u64)-1;
8729         spin_unlock(&root->fs_info->block_group_cache_lock);
8730
8731         down_write(&block_group->space_info->groups_sem);
8732         /*
8733          * we must use list_del_init so people can check to see if they
8734          * are still on the list after taking the semaphore
8735          */
8736         list_del_init(&block_group->list);
8737         if (list_empty(&block_group->space_info->block_groups[index]))
8738                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
8739         up_write(&block_group->space_info->groups_sem);
8740
8741         if (block_group->cached == BTRFS_CACHE_STARTED)
8742                 wait_block_group_cache_done(block_group);
8743
8744         btrfs_remove_free_space_cache(block_group);
8745
8746         spin_lock(&block_group->space_info->lock);
8747         block_group->space_info->total_bytes -= block_group->key.offset;
8748         block_group->space_info->bytes_readonly -= block_group->key.offset;
8749         block_group->space_info->disk_total -= block_group->key.offset * factor;
8750         spin_unlock(&block_group->space_info->lock);
8751
8752         memcpy(&key, &block_group->key, sizeof(key));
8753
8754         btrfs_clear_space_info_full(root->fs_info);
8755
8756         btrfs_put_block_group(block_group);
8757         btrfs_put_block_group(block_group);
8758
8759         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8760         if (ret > 0)
8761                 ret = -EIO;
8762         if (ret < 0)
8763                 goto out;
8764
8765         ret = btrfs_del_item(trans, root, path);
8766 out:
8767         btrfs_free_path(path);
8768         return ret;
8769 }
8770
8771 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
8772 {
8773         struct btrfs_space_info *space_info;
8774         struct btrfs_super_block *disk_super;
8775         u64 features;
8776         u64 flags;
8777         int mixed = 0;
8778         int ret;
8779
8780         disk_super = fs_info->super_copy;
8781         if (!btrfs_super_root(disk_super))
8782                 return 1;
8783
8784         features = btrfs_super_incompat_flags(disk_super);
8785         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
8786                 mixed = 1;
8787
8788         flags = BTRFS_BLOCK_GROUP_SYSTEM;
8789         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8790         if (ret)
8791                 goto out;
8792
8793         if (mixed) {
8794                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
8795                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8796         } else {
8797                 flags = BTRFS_BLOCK_GROUP_METADATA;
8798                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8799                 if (ret)
8800                         goto out;
8801
8802                 flags = BTRFS_BLOCK_GROUP_DATA;
8803                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8804         }
8805 out:
8806         return ret;
8807 }
8808
8809 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8810 {
8811         return unpin_extent_range(root, start, end);
8812 }
8813
8814 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8815                                u64 num_bytes, u64 *actual_bytes)
8816 {
8817         return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8818 }
8819
8820 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8821 {
8822         struct btrfs_fs_info *fs_info = root->fs_info;
8823         struct btrfs_block_group_cache *cache = NULL;
8824         u64 group_trimmed;
8825         u64 start;
8826         u64 end;
8827         u64 trimmed = 0;
8828         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8829         int ret = 0;
8830
8831         /*
8832          * try to trim all FS space, our block group may start from non-zero.
8833          */
8834         if (range->len == total_bytes)
8835                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8836         else
8837                 cache = btrfs_lookup_block_group(fs_info, range->start);
8838
8839         while (cache) {
8840                 if (cache->key.objectid >= (range->start + range->len)) {
8841                         btrfs_put_block_group(cache);
8842                         break;
8843                 }
8844
8845                 start = max(range->start, cache->key.objectid);
8846                 end = min(range->start + range->len,
8847                                 cache->key.objectid + cache->key.offset);
8848
8849                 if (end - start >= range->minlen) {
8850                         if (!block_group_cache_done(cache)) {
8851                                 ret = cache_block_group(cache, 0);
8852                                 if (ret) {
8853                                         btrfs_put_block_group(cache);
8854                                         break;
8855                                 }
8856                                 ret = wait_block_group_cache_done(cache);
8857                                 if (ret) {
8858                                         btrfs_put_block_group(cache);
8859                                         break;
8860                                 }
8861                         }
8862                         ret = btrfs_trim_block_group(cache,
8863                                                      &group_trimmed,
8864                                                      start,
8865                                                      end,
8866                                                      range->minlen);
8867
8868                         trimmed += group_trimmed;
8869                         if (ret) {
8870                                 btrfs_put_block_group(cache);
8871                                 break;
8872                         }
8873                 }
8874
8875                 cache = next_block_group(fs_info->tree_root, cache);
8876         }
8877
8878         range->len = trimmed;
8879         return ret;
8880 }