]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/btrfs/delayed-ref.c
Btrfs: remove unnecessary ref heads rb tree search
[karo-tx-linux.git] / fs / btrfs / delayed-ref.c
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25
26 struct kmem_cache *btrfs_delayed_ref_head_cachep;
27 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
28 struct kmem_cache *btrfs_delayed_data_ref_cachep;
29 struct kmem_cache *btrfs_delayed_extent_op_cachep;
30 /*
31  * delayed back reference update tracking.  For subvolume trees
32  * we queue up extent allocations and backref maintenance for
33  * delayed processing.   This avoids deep call chains where we
34  * add extents in the middle of btrfs_search_slot, and it allows
35  * us to buffer up frequently modified backrefs in an rb tree instead
36  * of hammering updates on the extent allocation tree.
37  */
38
39 /*
40  * compare two delayed tree backrefs with same bytenr and type
41  */
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
43                           struct btrfs_delayed_tree_ref *ref1, int type)
44 {
45         if (type == BTRFS_TREE_BLOCK_REF_KEY) {
46                 if (ref1->root < ref2->root)
47                         return -1;
48                 if (ref1->root > ref2->root)
49                         return 1;
50         } else {
51                 if (ref1->parent < ref2->parent)
52                         return -1;
53                 if (ref1->parent > ref2->parent)
54                         return 1;
55         }
56         return 0;
57 }
58
59 /*
60  * compare two delayed data backrefs with same bytenr and type
61  */
62 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
63                           struct btrfs_delayed_data_ref *ref1)
64 {
65         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
66                 if (ref1->root < ref2->root)
67                         return -1;
68                 if (ref1->root > ref2->root)
69                         return 1;
70                 if (ref1->objectid < ref2->objectid)
71                         return -1;
72                 if (ref1->objectid > ref2->objectid)
73                         return 1;
74                 if (ref1->offset < ref2->offset)
75                         return -1;
76                 if (ref1->offset > ref2->offset)
77                         return 1;
78         } else {
79                 if (ref1->parent < ref2->parent)
80                         return -1;
81                 if (ref1->parent > ref2->parent)
82                         return 1;
83         }
84         return 0;
85 }
86
87 /*
88  * entries in the rb tree are ordered by the byte number of the extent,
89  * type of the delayed backrefs and content of delayed backrefs.
90  */
91 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
92                       struct btrfs_delayed_ref_node *ref1,
93                       bool compare_seq)
94 {
95         if (ref1->bytenr < ref2->bytenr)
96                 return -1;
97         if (ref1->bytenr > ref2->bytenr)
98                 return 1;
99         if (ref1->is_head && ref2->is_head)
100                 return 0;
101         if (ref2->is_head)
102                 return -1;
103         if (ref1->is_head)
104                 return 1;
105         if (ref1->type < ref2->type)
106                 return -1;
107         if (ref1->type > ref2->type)
108                 return 1;
109         /* merging of sequenced refs is not allowed */
110         if (compare_seq) {
111                 if (ref1->seq < ref2->seq)
112                         return -1;
113                 if (ref1->seq > ref2->seq)
114                         return 1;
115         }
116         if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
117             ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
118                 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
119                                       btrfs_delayed_node_to_tree_ref(ref1),
120                                       ref1->type);
121         } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
122                    ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
123                 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
124                                       btrfs_delayed_node_to_data_ref(ref1));
125         }
126         BUG();
127         return 0;
128 }
129
130 /*
131  * insert a new ref into the rbtree.  This returns any existing refs
132  * for the same (bytenr,parent) tuple, or NULL if the new node was properly
133  * inserted.
134  */
135 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
136                                                   struct rb_node *node)
137 {
138         struct rb_node **p = &root->rb_node;
139         struct rb_node *parent_node = NULL;
140         struct btrfs_delayed_ref_node *entry;
141         struct btrfs_delayed_ref_node *ins;
142         int cmp;
143
144         ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
145         while (*p) {
146                 parent_node = *p;
147                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
148                                  rb_node);
149
150                 cmp = comp_entry(entry, ins, 1);
151                 if (cmp < 0)
152                         p = &(*p)->rb_left;
153                 else if (cmp > 0)
154                         p = &(*p)->rb_right;
155                 else
156                         return entry;
157         }
158
159         rb_link_node(node, parent_node, p);
160         rb_insert_color(node, root);
161         return NULL;
162 }
163
164 /* insert a new ref to head ref rbtree */
165 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
166                                                    struct rb_node *node)
167 {
168         struct rb_node **p = &root->rb_node;
169         struct rb_node *parent_node = NULL;
170         struct btrfs_delayed_ref_head *entry;
171         struct btrfs_delayed_ref_head *ins;
172         u64 bytenr;
173
174         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
175         bytenr = ins->node.bytenr;
176         while (*p) {
177                 parent_node = *p;
178                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
179                                  href_node);
180
181                 if (bytenr < entry->node.bytenr)
182                         p = &(*p)->rb_left;
183                 else if (bytenr > entry->node.bytenr)
184                         p = &(*p)->rb_right;
185                 else
186                         return entry;
187         }
188
189         rb_link_node(node, parent_node, p);
190         rb_insert_color(node, root);
191         return NULL;
192 }
193
194 /*
195  * find an head entry based on bytenr. This returns the delayed ref
196  * head if it was able to find one, or NULL if nothing was in that spot.
197  * If return_bigger is given, the next bigger entry is returned if no exact
198  * match is found.
199  */
200 static struct btrfs_delayed_ref_head *
201 find_ref_head(struct rb_root *root, u64 bytenr,
202               struct btrfs_delayed_ref_head **last, int return_bigger)
203 {
204         struct rb_node *n;
205         struct btrfs_delayed_ref_head *entry;
206         int cmp = 0;
207
208         n = root->rb_node;
209         entry = NULL;
210         while (n) {
211                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
212                 if (last)
213                         *last = entry;
214
215                 if (bytenr < entry->node.bytenr)
216                         cmp = -1;
217                 else if (bytenr > entry->node.bytenr)
218                         cmp = 1;
219                 else
220                         cmp = 0;
221
222                 if (cmp < 0)
223                         n = n->rb_left;
224                 else if (cmp > 0)
225                         n = n->rb_right;
226                 else
227                         return entry;
228         }
229         if (entry && return_bigger) {
230                 if (cmp > 0) {
231                         n = rb_next(&entry->href_node);
232                         if (!n)
233                                 n = rb_first(root);
234                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
235                                          href_node);
236                         if (last)
237                                 *last = entry;
238                         return entry;
239                 }
240                 return entry;
241         }
242         return NULL;
243 }
244
245 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
246                            struct btrfs_delayed_ref_head *head)
247 {
248         struct btrfs_delayed_ref_root *delayed_refs;
249
250         delayed_refs = &trans->transaction->delayed_refs;
251         assert_spin_locked(&delayed_refs->lock);
252         if (mutex_trylock(&head->mutex))
253                 return 0;
254
255         atomic_inc(&head->node.refs);
256         spin_unlock(&delayed_refs->lock);
257
258         mutex_lock(&head->mutex);
259         spin_lock(&delayed_refs->lock);
260         if (!head->node.in_tree) {
261                 mutex_unlock(&head->mutex);
262                 btrfs_put_delayed_ref(&head->node);
263                 return -EAGAIN;
264         }
265         btrfs_put_delayed_ref(&head->node);
266         return 0;
267 }
268
269 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
270                                     struct btrfs_delayed_ref_root *delayed_refs,
271                                     struct btrfs_delayed_ref_head *head,
272                                     struct btrfs_delayed_ref_node *ref)
273 {
274         if (btrfs_delayed_ref_is_head(ref)) {
275                 head = btrfs_delayed_node_to_head(ref);
276                 rb_erase(&head->href_node, &delayed_refs->href_root);
277         } else {
278                 assert_spin_locked(&head->lock);
279                 rb_erase(&ref->rb_node, &head->ref_root);
280         }
281         ref->in_tree = 0;
282         btrfs_put_delayed_ref(ref);
283         atomic_dec(&delayed_refs->num_entries);
284         if (trans->delayed_ref_updates)
285                 trans->delayed_ref_updates--;
286 }
287
288 static int merge_ref(struct btrfs_trans_handle *trans,
289                      struct btrfs_delayed_ref_root *delayed_refs,
290                      struct btrfs_delayed_ref_head *head,
291                      struct btrfs_delayed_ref_node *ref, u64 seq)
292 {
293         struct rb_node *node;
294         int mod = 0;
295         int done = 0;
296
297         node = rb_next(&ref->rb_node);
298         while (!done && node) {
299                 struct btrfs_delayed_ref_node *next;
300
301                 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
302                 node = rb_next(node);
303                 if (seq && next->seq >= seq)
304                         break;
305                 if (comp_entry(ref, next, 0))
306                         continue;
307
308                 if (ref->action == next->action) {
309                         mod = next->ref_mod;
310                 } else {
311                         if (ref->ref_mod < next->ref_mod) {
312                                 struct btrfs_delayed_ref_node *tmp;
313
314                                 tmp = ref;
315                                 ref = next;
316                                 next = tmp;
317                                 done = 1;
318                         }
319                         mod = -next->ref_mod;
320                 }
321
322                 drop_delayed_ref(trans, delayed_refs, head, next);
323                 ref->ref_mod += mod;
324                 if (ref->ref_mod == 0) {
325                         drop_delayed_ref(trans, delayed_refs, head, ref);
326                         done = 1;
327                 } else {
328                         /*
329                          * You can't have multiples of the same ref on a tree
330                          * block.
331                          */
332                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
333                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
334                 }
335         }
336         return done;
337 }
338
339 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
340                               struct btrfs_fs_info *fs_info,
341                               struct btrfs_delayed_ref_root *delayed_refs,
342                               struct btrfs_delayed_ref_head *head)
343 {
344         struct rb_node *node;
345         u64 seq = 0;
346
347         assert_spin_locked(&head->lock);
348         /*
349          * We don't have too much refs to merge in the case of delayed data
350          * refs.
351          */
352         if (head->is_data)
353                 return;
354
355         spin_lock(&fs_info->tree_mod_seq_lock);
356         if (!list_empty(&fs_info->tree_mod_seq_list)) {
357                 struct seq_list *elem;
358
359                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
360                                         struct seq_list, list);
361                 seq = elem->seq;
362         }
363         spin_unlock(&fs_info->tree_mod_seq_lock);
364
365         node = rb_first(&head->ref_root);
366         while (node) {
367                 struct btrfs_delayed_ref_node *ref;
368
369                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
370                                rb_node);
371                 /* We can't merge refs that are outside of our seq count */
372                 if (seq && ref->seq >= seq)
373                         break;
374                 if (merge_ref(trans, delayed_refs, head, ref, seq))
375                         node = rb_first(&head->ref_root);
376                 else
377                         node = rb_next(&ref->rb_node);
378         }
379 }
380
381 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
382                             struct btrfs_delayed_ref_root *delayed_refs,
383                             u64 seq)
384 {
385         struct seq_list *elem;
386         int ret = 0;
387
388         spin_lock(&fs_info->tree_mod_seq_lock);
389         if (!list_empty(&fs_info->tree_mod_seq_list)) {
390                 elem = list_first_entry(&fs_info->tree_mod_seq_list,
391                                         struct seq_list, list);
392                 if (seq >= elem->seq) {
393                         pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
394                                  (u32)(seq >> 32), (u32)seq,
395                                  (u32)(elem->seq >> 32), (u32)elem->seq,
396                                  delayed_refs);
397                         ret = 1;
398                 }
399         }
400
401         spin_unlock(&fs_info->tree_mod_seq_lock);
402         return ret;
403 }
404
405 struct btrfs_delayed_ref_head *
406 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
407 {
408         struct btrfs_delayed_ref_root *delayed_refs;
409         struct btrfs_delayed_ref_head *head;
410         u64 start;
411         bool loop = false;
412
413         delayed_refs = &trans->transaction->delayed_refs;
414
415 again:
416         start = delayed_refs->run_delayed_start;
417         head = find_ref_head(&delayed_refs->href_root, start, NULL, 1);
418         if (!head && !loop) {
419                 delayed_refs->run_delayed_start = 0;
420                 start = 0;
421                 loop = true;
422                 head = find_ref_head(&delayed_refs->href_root, start, NULL, 1);
423                 if (!head)
424                         return NULL;
425         } else if (!head && loop) {
426                 return NULL;
427         }
428
429         while (head->processing) {
430                 struct rb_node *node;
431
432                 node = rb_next(&head->href_node);
433                 if (!node) {
434                         if (loop)
435                                 return NULL;
436                         delayed_refs->run_delayed_start = 0;
437                         start = 0;
438                         loop = true;
439                         goto again;
440                 }
441                 head = rb_entry(node, struct btrfs_delayed_ref_head,
442                                 href_node);
443         }
444
445         head->processing = 1;
446         WARN_ON(delayed_refs->num_heads_ready == 0);
447         delayed_refs->num_heads_ready--;
448         delayed_refs->run_delayed_start = head->node.bytenr +
449                 head->node.num_bytes;
450         return head;
451 }
452
453 /*
454  * helper function to update an extent delayed ref in the
455  * rbtree.  existing and update must both have the same
456  * bytenr and parent
457  *
458  * This may free existing if the update cancels out whatever
459  * operation it was doing.
460  */
461 static noinline void
462 update_existing_ref(struct btrfs_trans_handle *trans,
463                     struct btrfs_delayed_ref_root *delayed_refs,
464                     struct btrfs_delayed_ref_head *head,
465                     struct btrfs_delayed_ref_node *existing,
466                     struct btrfs_delayed_ref_node *update)
467 {
468         if (update->action != existing->action) {
469                 /*
470                  * this is effectively undoing either an add or a
471                  * drop.  We decrement the ref_mod, and if it goes
472                  * down to zero we just delete the entry without
473                  * every changing the extent allocation tree.
474                  */
475                 existing->ref_mod--;
476                 if (existing->ref_mod == 0)
477                         drop_delayed_ref(trans, delayed_refs, head, existing);
478                 else
479                         WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
480                                 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
481         } else {
482                 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
483                         existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
484                 /*
485                  * the action on the existing ref matches
486                  * the action on the ref we're trying to add.
487                  * Bump the ref_mod by one so the backref that
488                  * is eventually added/removed has the correct
489                  * reference count
490                  */
491                 existing->ref_mod += update->ref_mod;
492         }
493 }
494
495 /*
496  * helper function to update the accounting in the head ref
497  * existing and update must have the same bytenr
498  */
499 static noinline void
500 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
501                          struct btrfs_delayed_ref_node *update)
502 {
503         struct btrfs_delayed_ref_head *existing_ref;
504         struct btrfs_delayed_ref_head *ref;
505
506         existing_ref = btrfs_delayed_node_to_head(existing);
507         ref = btrfs_delayed_node_to_head(update);
508         BUG_ON(existing_ref->is_data != ref->is_data);
509
510         if (ref->must_insert_reserved) {
511                 /* if the extent was freed and then
512                  * reallocated before the delayed ref
513                  * entries were processed, we can end up
514                  * with an existing head ref without
515                  * the must_insert_reserved flag set.
516                  * Set it again here
517                  */
518                 existing_ref->must_insert_reserved = ref->must_insert_reserved;
519
520                 /*
521                  * update the num_bytes so we make sure the accounting
522                  * is done correctly
523                  */
524                 existing->num_bytes = update->num_bytes;
525
526         }
527
528         if (ref->extent_op) {
529                 if (!existing_ref->extent_op) {
530                         existing_ref->extent_op = ref->extent_op;
531                 } else {
532                         if (ref->extent_op->update_key) {
533                                 memcpy(&existing_ref->extent_op->key,
534                                        &ref->extent_op->key,
535                                        sizeof(ref->extent_op->key));
536                                 existing_ref->extent_op->update_key = 1;
537                         }
538                         if (ref->extent_op->update_flags) {
539                                 existing_ref->extent_op->flags_to_set |=
540                                         ref->extent_op->flags_to_set;
541                                 existing_ref->extent_op->update_flags = 1;
542                         }
543                         btrfs_free_delayed_extent_op(ref->extent_op);
544                 }
545         }
546         /*
547          * update the reference mod on the head to reflect this new operation,
548          * only need the lock for this case cause we could be processing it
549          * currently, for refs we just added we know we're a-ok.
550          */
551         spin_lock(&existing_ref->lock);
552         existing->ref_mod += update->ref_mod;
553         spin_unlock(&existing_ref->lock);
554 }
555
556 /*
557  * helper function to actually insert a head node into the rbtree.
558  * this does all the dirty work in terms of maintaining the correct
559  * overall modification count.
560  */
561 static noinline struct btrfs_delayed_ref_head *
562 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
563                      struct btrfs_trans_handle *trans,
564                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
565                      u64 num_bytes, int action, int is_data)
566 {
567         struct btrfs_delayed_ref_head *existing;
568         struct btrfs_delayed_ref_head *head_ref = NULL;
569         struct btrfs_delayed_ref_root *delayed_refs;
570         int count_mod = 1;
571         int must_insert_reserved = 0;
572
573         /*
574          * the head node stores the sum of all the mods, so dropping a ref
575          * should drop the sum in the head node by one.
576          */
577         if (action == BTRFS_UPDATE_DELAYED_HEAD)
578                 count_mod = 0;
579         else if (action == BTRFS_DROP_DELAYED_REF)
580                 count_mod = -1;
581
582         /*
583          * BTRFS_ADD_DELAYED_EXTENT means that we need to update
584          * the reserved accounting when the extent is finally added, or
585          * if a later modification deletes the delayed ref without ever
586          * inserting the extent into the extent allocation tree.
587          * ref->must_insert_reserved is the flag used to record
588          * that accounting mods are required.
589          *
590          * Once we record must_insert_reserved, switch the action to
591          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
592          */
593         if (action == BTRFS_ADD_DELAYED_EXTENT)
594                 must_insert_reserved = 1;
595         else
596                 must_insert_reserved = 0;
597
598         delayed_refs = &trans->transaction->delayed_refs;
599
600         /* first set the basic ref node struct up */
601         atomic_set(&ref->refs, 1);
602         ref->bytenr = bytenr;
603         ref->num_bytes = num_bytes;
604         ref->ref_mod = count_mod;
605         ref->type  = 0;
606         ref->action  = 0;
607         ref->is_head = 1;
608         ref->in_tree = 1;
609         ref->seq = 0;
610
611         head_ref = btrfs_delayed_node_to_head(ref);
612         head_ref->must_insert_reserved = must_insert_reserved;
613         head_ref->is_data = is_data;
614         head_ref->ref_root = RB_ROOT;
615         head_ref->processing = 0;
616
617         spin_lock_init(&head_ref->lock);
618         mutex_init(&head_ref->mutex);
619
620         trace_add_delayed_ref_head(ref, head_ref, action);
621
622         existing = htree_insert(&delayed_refs->href_root,
623                                 &head_ref->href_node);
624         if (existing) {
625                 update_existing_head_ref(&existing->node, ref);
626                 /*
627                  * we've updated the existing ref, free the newly
628                  * allocated ref
629                  */
630                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
631                 head_ref = existing;
632         } else {
633                 delayed_refs->num_heads++;
634                 delayed_refs->num_heads_ready++;
635                 atomic_inc(&delayed_refs->num_entries);
636                 trans->delayed_ref_updates++;
637         }
638         return head_ref;
639 }
640
641 /*
642  * helper to insert a delayed tree ref into the rbtree.
643  */
644 static noinline void
645 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
646                      struct btrfs_trans_handle *trans,
647                      struct btrfs_delayed_ref_head *head_ref,
648                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
649                      u64 num_bytes, u64 parent, u64 ref_root, int level,
650                      int action, int for_cow)
651 {
652         struct btrfs_delayed_ref_node *existing;
653         struct btrfs_delayed_tree_ref *full_ref;
654         struct btrfs_delayed_ref_root *delayed_refs;
655         u64 seq = 0;
656
657         if (action == BTRFS_ADD_DELAYED_EXTENT)
658                 action = BTRFS_ADD_DELAYED_REF;
659
660         delayed_refs = &trans->transaction->delayed_refs;
661
662         /* first set the basic ref node struct up */
663         atomic_set(&ref->refs, 1);
664         ref->bytenr = bytenr;
665         ref->num_bytes = num_bytes;
666         ref->ref_mod = 1;
667         ref->action = action;
668         ref->is_head = 0;
669         ref->in_tree = 1;
670
671         if (need_ref_seq(for_cow, ref_root))
672                 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
673         ref->seq = seq;
674
675         full_ref = btrfs_delayed_node_to_tree_ref(ref);
676         full_ref->parent = parent;
677         full_ref->root = ref_root;
678         if (parent)
679                 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
680         else
681                 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
682         full_ref->level = level;
683
684         trace_add_delayed_tree_ref(ref, full_ref, action);
685
686         spin_lock(&head_ref->lock);
687         existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
688         if (existing) {
689                 update_existing_ref(trans, delayed_refs, head_ref, existing,
690                                     ref);
691                 /*
692                  * we've updated the existing ref, free the newly
693                  * allocated ref
694                  */
695                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
696         } else {
697                 atomic_inc(&delayed_refs->num_entries);
698                 trans->delayed_ref_updates++;
699         }
700         spin_unlock(&head_ref->lock);
701 }
702
703 /*
704  * helper to insert a delayed data ref into the rbtree.
705  */
706 static noinline void
707 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
708                      struct btrfs_trans_handle *trans,
709                      struct btrfs_delayed_ref_head *head_ref,
710                      struct btrfs_delayed_ref_node *ref, u64 bytenr,
711                      u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
712                      u64 offset, int action, int for_cow)
713 {
714         struct btrfs_delayed_ref_node *existing;
715         struct btrfs_delayed_data_ref *full_ref;
716         struct btrfs_delayed_ref_root *delayed_refs;
717         u64 seq = 0;
718
719         if (action == BTRFS_ADD_DELAYED_EXTENT)
720                 action = BTRFS_ADD_DELAYED_REF;
721
722         delayed_refs = &trans->transaction->delayed_refs;
723
724         /* first set the basic ref node struct up */
725         atomic_set(&ref->refs, 1);
726         ref->bytenr = bytenr;
727         ref->num_bytes = num_bytes;
728         ref->ref_mod = 1;
729         ref->action = action;
730         ref->is_head = 0;
731         ref->in_tree = 1;
732
733         if (need_ref_seq(for_cow, ref_root))
734                 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
735         ref->seq = seq;
736
737         full_ref = btrfs_delayed_node_to_data_ref(ref);
738         full_ref->parent = parent;
739         full_ref->root = ref_root;
740         if (parent)
741                 ref->type = BTRFS_SHARED_DATA_REF_KEY;
742         else
743                 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
744
745         full_ref->objectid = owner;
746         full_ref->offset = offset;
747
748         trace_add_delayed_data_ref(ref, full_ref, action);
749
750         spin_lock(&head_ref->lock);
751         existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
752         if (existing) {
753                 update_existing_ref(trans, delayed_refs, head_ref, existing,
754                                     ref);
755                 /*
756                  * we've updated the existing ref, free the newly
757                  * allocated ref
758                  */
759                 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
760         } else {
761                 atomic_inc(&delayed_refs->num_entries);
762                 trans->delayed_ref_updates++;
763         }
764         spin_unlock(&head_ref->lock);
765 }
766
767 /*
768  * add a delayed tree ref.  This does all of the accounting required
769  * to make sure the delayed ref is eventually processed before this
770  * transaction commits.
771  */
772 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
773                                struct btrfs_trans_handle *trans,
774                                u64 bytenr, u64 num_bytes, u64 parent,
775                                u64 ref_root,  int level, int action,
776                                struct btrfs_delayed_extent_op *extent_op,
777                                int for_cow)
778 {
779         struct btrfs_delayed_tree_ref *ref;
780         struct btrfs_delayed_ref_head *head_ref;
781         struct btrfs_delayed_ref_root *delayed_refs;
782
783         BUG_ON(extent_op && extent_op->is_data);
784         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
785         if (!ref)
786                 return -ENOMEM;
787
788         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
789         if (!head_ref) {
790                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
791                 return -ENOMEM;
792         }
793
794         head_ref->extent_op = extent_op;
795
796         delayed_refs = &trans->transaction->delayed_refs;
797         spin_lock(&delayed_refs->lock);
798
799         /*
800          * insert both the head node and the new ref without dropping
801          * the spin lock
802          */
803         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
804                                         bytenr, num_bytes, action, 0);
805
806         add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
807                                    num_bytes, parent, ref_root, level, action,
808                                    for_cow);
809         spin_unlock(&delayed_refs->lock);
810         if (need_ref_seq(for_cow, ref_root))
811                 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
812
813         return 0;
814 }
815
816 /*
817  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
818  */
819 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
820                                struct btrfs_trans_handle *trans,
821                                u64 bytenr, u64 num_bytes,
822                                u64 parent, u64 ref_root,
823                                u64 owner, u64 offset, int action,
824                                struct btrfs_delayed_extent_op *extent_op,
825                                int for_cow)
826 {
827         struct btrfs_delayed_data_ref *ref;
828         struct btrfs_delayed_ref_head *head_ref;
829         struct btrfs_delayed_ref_root *delayed_refs;
830
831         BUG_ON(extent_op && !extent_op->is_data);
832         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
833         if (!ref)
834                 return -ENOMEM;
835
836         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
837         if (!head_ref) {
838                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
839                 return -ENOMEM;
840         }
841
842         head_ref->extent_op = extent_op;
843
844         delayed_refs = &trans->transaction->delayed_refs;
845         spin_lock(&delayed_refs->lock);
846
847         /*
848          * insert both the head node and the new ref without dropping
849          * the spin lock
850          */
851         head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
852                                         bytenr, num_bytes, action, 1);
853
854         add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
855                                    num_bytes, parent, ref_root, owner, offset,
856                                    action, for_cow);
857         spin_unlock(&delayed_refs->lock);
858         if (need_ref_seq(for_cow, ref_root))
859                 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
860
861         return 0;
862 }
863
864 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
865                                 struct btrfs_trans_handle *trans,
866                                 u64 bytenr, u64 num_bytes,
867                                 struct btrfs_delayed_extent_op *extent_op)
868 {
869         struct btrfs_delayed_ref_head *head_ref;
870         struct btrfs_delayed_ref_root *delayed_refs;
871
872         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
873         if (!head_ref)
874                 return -ENOMEM;
875
876         head_ref->extent_op = extent_op;
877
878         delayed_refs = &trans->transaction->delayed_refs;
879         spin_lock(&delayed_refs->lock);
880
881         add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
882                                    num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
883                                    extent_op->is_data);
884
885         spin_unlock(&delayed_refs->lock);
886         return 0;
887 }
888
889 /*
890  * this does a simple search for the head node for a given extent.
891  * It must be called with the delayed ref spinlock held, and it returns
892  * the head node if any where found, or NULL if not.
893  */
894 struct btrfs_delayed_ref_head *
895 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
896 {
897         struct btrfs_delayed_ref_root *delayed_refs;
898
899         delayed_refs = &trans->transaction->delayed_refs;
900         return find_ref_head(&delayed_refs->href_root, bytenr, NULL, 0);
901 }
902
903 void btrfs_delayed_ref_exit(void)
904 {
905         if (btrfs_delayed_ref_head_cachep)
906                 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
907         if (btrfs_delayed_tree_ref_cachep)
908                 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
909         if (btrfs_delayed_data_ref_cachep)
910                 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
911         if (btrfs_delayed_extent_op_cachep)
912                 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
913 }
914
915 int btrfs_delayed_ref_init(void)
916 {
917         btrfs_delayed_ref_head_cachep = kmem_cache_create(
918                                 "btrfs_delayed_ref_head",
919                                 sizeof(struct btrfs_delayed_ref_head), 0,
920                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
921         if (!btrfs_delayed_ref_head_cachep)
922                 goto fail;
923
924         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
925                                 "btrfs_delayed_tree_ref",
926                                 sizeof(struct btrfs_delayed_tree_ref), 0,
927                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
928         if (!btrfs_delayed_tree_ref_cachep)
929                 goto fail;
930
931         btrfs_delayed_data_ref_cachep = kmem_cache_create(
932                                 "btrfs_delayed_data_ref",
933                                 sizeof(struct btrfs_delayed_data_ref), 0,
934                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
935         if (!btrfs_delayed_data_ref_cachep)
936                 goto fail;
937
938         btrfs_delayed_extent_op_cachep = kmem_cache_create(
939                                 "btrfs_delayed_extent_op",
940                                 sizeof(struct btrfs_delayed_extent_op), 0,
941                                 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
942         if (!btrfs_delayed_extent_op_cachep)
943                 goto fail;
944
945         return 0;
946 fail:
947         btrfs_delayed_ref_exit();
948         return -ENOMEM;
949 }