2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
23 #include "transaction.h"
24 #include "print-tree.h"
27 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
28 *root, struct btrfs_path *path, int level);
29 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_key *ins_key,
31 struct btrfs_path *path, int data_size, int extend);
32 static int push_node_left(struct btrfs_trans_handle *trans,
33 struct btrfs_root *root, struct extent_buffer *dst,
34 struct extent_buffer *src, int empty);
35 static int balance_node_right(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root,
37 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf);
39 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot);
42 struct btrfs_path *btrfs_alloc_path(void)
44 struct btrfs_path *path;
45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
52 * set all locked nodes in the path to blocking locks. This should
53 * be done before scheduling
55 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
59 if (p->nodes[i] && p->locks[i])
60 btrfs_set_lock_blocking(p->nodes[i]);
65 * reset all the locked nodes in the patch to spinning locks.
67 * held is used to keep lockdep happy, when lockdep is enabled
68 * we set held to a blocking lock before we go around and
69 * retake all the spinlocks in the path. You can safely use NULL
72 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
73 struct extent_buffer *held)
77 #ifdef CONFIG_DEBUG_LOCK_ALLOC
78 /* lockdep really cares that we take all of these spinlocks
79 * in the right order. If any of the locks in the path are not
80 * currently blocking, it is going to complain. So, make really
81 * really sure by forcing the path to blocking before we clear
85 btrfs_set_lock_blocking(held);
86 btrfs_set_path_blocking(p);
89 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
90 if (p->nodes[i] && p->locks[i])
91 btrfs_clear_lock_blocking(p->nodes[i]);
94 #ifdef CONFIG_DEBUG_LOCK_ALLOC
96 btrfs_clear_lock_blocking(held);
100 /* this also releases the path */
101 void btrfs_free_path(struct btrfs_path *p)
105 btrfs_release_path(p);
106 kmem_cache_free(btrfs_path_cachep, p);
110 * path release drops references on the extent buffers in the path
111 * and it drops any locks held by this path
113 * It is safe to call this on paths that no locks or extent buffers held.
115 noinline void btrfs_release_path(struct btrfs_path *p)
119 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
124 btrfs_tree_unlock(p->nodes[i]);
127 free_extent_buffer(p->nodes[i]);
133 * safely gets a reference on the root node of a tree. A lock
134 * is not taken, so a concurrent writer may put a different node
135 * at the root of the tree. See btrfs_lock_root_node for the
138 * The extent buffer returned by this has a reference taken, so
139 * it won't disappear. It may stop being the root of the tree
140 * at any time because there are no locks held.
142 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
144 struct extent_buffer *eb;
147 eb = rcu_dereference(root->node);
148 extent_buffer_get(eb);
153 /* loop around taking references on and locking the root node of the
154 * tree until you end up with a lock on the root. A locked buffer
155 * is returned, with a reference held.
157 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
159 struct extent_buffer *eb;
162 eb = btrfs_root_node(root);
164 if (eb == root->node)
166 btrfs_tree_unlock(eb);
167 free_extent_buffer(eb);
172 /* cowonly root (everything not a reference counted cow subvolume), just get
173 * put onto a simple dirty list. transaction.c walks this to make sure they
174 * get properly updated on disk.
176 static void add_root_to_dirty_list(struct btrfs_root *root)
178 if (root->track_dirty && list_empty(&root->dirty_list)) {
179 list_add(&root->dirty_list,
180 &root->fs_info->dirty_cowonly_roots);
185 * used by snapshot creation to make a copy of a root for a tree with
186 * a given objectid. The buffer with the new root node is returned in
187 * cow_ret, and this func returns zero on success or a negative error code.
189 int btrfs_copy_root(struct btrfs_trans_handle *trans,
190 struct btrfs_root *root,
191 struct extent_buffer *buf,
192 struct extent_buffer **cow_ret, u64 new_root_objectid)
194 struct extent_buffer *cow;
197 struct btrfs_disk_key disk_key;
199 WARN_ON(root->ref_cows && trans->transid !=
200 root->fs_info->running_transaction->transid);
201 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
203 level = btrfs_header_level(buf);
205 btrfs_item_key(buf, &disk_key, 0);
207 btrfs_node_key(buf, &disk_key, 0);
209 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
210 new_root_objectid, &disk_key, level,
215 copy_extent_buffer(cow, buf, 0, 0, cow->len);
216 btrfs_set_header_bytenr(cow, cow->start);
217 btrfs_set_header_generation(cow, trans->transid);
218 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
219 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
220 BTRFS_HEADER_FLAG_RELOC);
221 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
222 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
224 btrfs_set_header_owner(cow, new_root_objectid);
226 write_extent_buffer(cow, root->fs_info->fsid,
227 (unsigned long)btrfs_header_fsid(cow),
230 WARN_ON(btrfs_header_generation(buf) > trans->transid);
231 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
232 ret = btrfs_inc_ref(trans, root, cow, 1);
234 ret = btrfs_inc_ref(trans, root, cow, 0);
239 btrfs_mark_buffer_dirty(cow);
245 * check if the tree block can be shared by multiple trees
247 int btrfs_block_can_be_shared(struct btrfs_root *root,
248 struct extent_buffer *buf)
251 * Tree blocks not in refernece counted trees and tree roots
252 * are never shared. If a block was allocated after the last
253 * snapshot and the block was not allocated by tree relocation,
254 * we know the block is not shared.
256 if (root->ref_cows &&
257 buf != root->node && buf != root->commit_root &&
258 (btrfs_header_generation(buf) <=
259 btrfs_root_last_snapshot(&root->root_item) ||
260 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
262 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
263 if (root->ref_cows &&
264 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
270 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
271 struct btrfs_root *root,
272 struct extent_buffer *buf,
273 struct extent_buffer *cow,
283 * Backrefs update rules:
285 * Always use full backrefs for extent pointers in tree block
286 * allocated by tree relocation.
288 * If a shared tree block is no longer referenced by its owner
289 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
290 * use full backrefs for extent pointers in tree block.
292 * If a tree block is been relocating
293 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
294 * use full backrefs for extent pointers in tree block.
295 * The reason for this is some operations (such as drop tree)
296 * are only allowed for blocks use full backrefs.
299 if (btrfs_block_can_be_shared(root, buf)) {
300 ret = btrfs_lookup_extent_info(trans, root, buf->start,
301 buf->len, &refs, &flags);
306 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
307 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
308 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
313 owner = btrfs_header_owner(buf);
314 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
315 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
318 if ((owner == root->root_key.objectid ||
319 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
320 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
321 ret = btrfs_inc_ref(trans, root, buf, 1);
324 if (root->root_key.objectid ==
325 BTRFS_TREE_RELOC_OBJECTID) {
326 ret = btrfs_dec_ref(trans, root, buf, 0);
328 ret = btrfs_inc_ref(trans, root, cow, 1);
331 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
334 if (root->root_key.objectid ==
335 BTRFS_TREE_RELOC_OBJECTID)
336 ret = btrfs_inc_ref(trans, root, cow, 1);
338 ret = btrfs_inc_ref(trans, root, cow, 0);
341 if (new_flags != 0) {
342 ret = btrfs_set_disk_extent_flags(trans, root,
349 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
350 if (root->root_key.objectid ==
351 BTRFS_TREE_RELOC_OBJECTID)
352 ret = btrfs_inc_ref(trans, root, cow, 1);
354 ret = btrfs_inc_ref(trans, root, cow, 0);
356 ret = btrfs_dec_ref(trans, root, buf, 1);
359 clean_tree_block(trans, root, buf);
366 * does the dirty work in cow of a single block. The parent block (if
367 * supplied) is updated to point to the new cow copy. The new buffer is marked
368 * dirty and returned locked. If you modify the block it needs to be marked
371 * search_start -- an allocation hint for the new block
373 * empty_size -- a hint that you plan on doing more cow. This is the size in
374 * bytes the allocator should try to find free next to the block it returns.
375 * This is just a hint and may be ignored by the allocator.
377 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
378 struct btrfs_root *root,
379 struct extent_buffer *buf,
380 struct extent_buffer *parent, int parent_slot,
381 struct extent_buffer **cow_ret,
382 u64 search_start, u64 empty_size)
384 struct btrfs_disk_key disk_key;
385 struct extent_buffer *cow;
394 btrfs_assert_tree_locked(buf);
396 WARN_ON(root->ref_cows && trans->transid !=
397 root->fs_info->running_transaction->transid);
398 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
400 level = btrfs_header_level(buf);
403 btrfs_item_key(buf, &disk_key, 0);
405 btrfs_node_key(buf, &disk_key, 0);
407 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
409 parent_start = parent->start;
415 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
416 root->root_key.objectid, &disk_key,
417 level, search_start, empty_size);
421 /* cow is set to blocking by btrfs_init_new_buffer */
423 copy_extent_buffer(cow, buf, 0, 0, cow->len);
424 btrfs_set_header_bytenr(cow, cow->start);
425 btrfs_set_header_generation(cow, trans->transid);
426 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
427 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
428 BTRFS_HEADER_FLAG_RELOC);
429 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
430 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
432 btrfs_set_header_owner(cow, root->root_key.objectid);
434 write_extent_buffer(cow, root->fs_info->fsid,
435 (unsigned long)btrfs_header_fsid(cow),
438 update_ref_for_cow(trans, root, buf, cow, &last_ref);
441 btrfs_reloc_cow_block(trans, root, buf, cow);
443 if (buf == root->node) {
444 WARN_ON(parent && parent != buf);
445 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
446 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
447 parent_start = buf->start;
451 extent_buffer_get(cow);
452 rcu_assign_pointer(root->node, cow);
454 btrfs_free_tree_block(trans, root, buf, parent_start,
456 free_extent_buffer(buf);
457 add_root_to_dirty_list(root);
459 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
460 parent_start = parent->start;
464 WARN_ON(trans->transid != btrfs_header_generation(parent));
465 btrfs_set_node_blockptr(parent, parent_slot,
467 btrfs_set_node_ptr_generation(parent, parent_slot,
469 btrfs_mark_buffer_dirty(parent);
470 btrfs_free_tree_block(trans, root, buf, parent_start,
474 btrfs_tree_unlock(buf);
475 free_extent_buffer(buf);
476 btrfs_mark_buffer_dirty(cow);
481 static inline int should_cow_block(struct btrfs_trans_handle *trans,
482 struct btrfs_root *root,
483 struct extent_buffer *buf)
485 if (btrfs_header_generation(buf) == trans->transid &&
486 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
487 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
488 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
494 * cows a single block, see __btrfs_cow_block for the real work.
495 * This version of it has extra checks so that a block isn't cow'd more than
496 * once per transaction, as long as it hasn't been written yet
498 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
499 struct btrfs_root *root, struct extent_buffer *buf,
500 struct extent_buffer *parent, int parent_slot,
501 struct extent_buffer **cow_ret)
506 if (trans->transaction != root->fs_info->running_transaction) {
507 printk(KERN_CRIT "trans %llu running %llu\n",
508 (unsigned long long)trans->transid,
510 root->fs_info->running_transaction->transid);
513 if (trans->transid != root->fs_info->generation) {
514 printk(KERN_CRIT "trans %llu running %llu\n",
515 (unsigned long long)trans->transid,
516 (unsigned long long)root->fs_info->generation);
520 if (!should_cow_block(trans, root, buf)) {
525 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
528 btrfs_set_lock_blocking(parent);
529 btrfs_set_lock_blocking(buf);
531 ret = __btrfs_cow_block(trans, root, buf, parent,
532 parent_slot, cow_ret, search_start, 0);
534 trace_btrfs_cow_block(root, buf, *cow_ret);
540 * helper function for defrag to decide if two blocks pointed to by a
541 * node are actually close by
543 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
545 if (blocknr < other && other - (blocknr + blocksize) < 32768)
547 if (blocknr > other && blocknr - (other + blocksize) < 32768)
553 * compare two keys in a memcmp fashion
555 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
559 btrfs_disk_key_to_cpu(&k1, disk);
561 return btrfs_comp_cpu_keys(&k1, k2);
565 * same as comp_keys only with two btrfs_key's
567 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
569 if (k1->objectid > k2->objectid)
571 if (k1->objectid < k2->objectid)
573 if (k1->type > k2->type)
575 if (k1->type < k2->type)
577 if (k1->offset > k2->offset)
579 if (k1->offset < k2->offset)
585 * this is used by the defrag code to go through all the
586 * leaves pointed to by a node and reallocate them so that
587 * disk order is close to key order
589 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
590 struct btrfs_root *root, struct extent_buffer *parent,
591 int start_slot, int cache_only, u64 *last_ret,
592 struct btrfs_key *progress)
594 struct extent_buffer *cur;
597 u64 search_start = *last_ret;
607 int progress_passed = 0;
608 struct btrfs_disk_key disk_key;
610 parent_level = btrfs_header_level(parent);
611 if (cache_only && parent_level != 1)
614 if (trans->transaction != root->fs_info->running_transaction)
616 if (trans->transid != root->fs_info->generation)
619 parent_nritems = btrfs_header_nritems(parent);
620 blocksize = btrfs_level_size(root, parent_level - 1);
621 end_slot = parent_nritems;
623 if (parent_nritems == 1)
626 btrfs_set_lock_blocking(parent);
628 for (i = start_slot; i < end_slot; i++) {
631 if (!parent->map_token) {
632 map_extent_buffer(parent,
633 btrfs_node_key_ptr_offset(i),
634 sizeof(struct btrfs_key_ptr),
635 &parent->map_token, &parent->kaddr,
636 &parent->map_start, &parent->map_len,
639 btrfs_node_key(parent, &disk_key, i);
640 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
644 blocknr = btrfs_node_blockptr(parent, i);
645 gen = btrfs_node_ptr_generation(parent, i);
647 last_block = blocknr;
650 other = btrfs_node_blockptr(parent, i - 1);
651 close = close_blocks(blocknr, other, blocksize);
653 if (!close && i < end_slot - 2) {
654 other = btrfs_node_blockptr(parent, i + 1);
655 close = close_blocks(blocknr, other, blocksize);
658 last_block = blocknr;
661 if (parent->map_token) {
662 unmap_extent_buffer(parent, parent->map_token,
664 parent->map_token = NULL;
667 cur = btrfs_find_tree_block(root, blocknr, blocksize);
669 uptodate = btrfs_buffer_uptodate(cur, gen);
672 if (!cur || !uptodate) {
674 free_extent_buffer(cur);
678 cur = read_tree_block(root, blocknr,
682 } else if (!uptodate) {
683 btrfs_read_buffer(cur, gen);
686 if (search_start == 0)
687 search_start = last_block;
689 btrfs_tree_lock(cur);
690 btrfs_set_lock_blocking(cur);
691 err = __btrfs_cow_block(trans, root, cur, parent, i,
694 (end_slot - i) * blocksize));
696 btrfs_tree_unlock(cur);
697 free_extent_buffer(cur);
700 search_start = cur->start;
701 last_block = cur->start;
702 *last_ret = search_start;
703 btrfs_tree_unlock(cur);
704 free_extent_buffer(cur);
706 if (parent->map_token) {
707 unmap_extent_buffer(parent, parent->map_token,
709 parent->map_token = NULL;
715 * The leaf data grows from end-to-front in the node.
716 * this returns the address of the start of the last item,
717 * which is the stop of the leaf data stack
719 static inline unsigned int leaf_data_end(struct btrfs_root *root,
720 struct extent_buffer *leaf)
722 u32 nr = btrfs_header_nritems(leaf);
724 return BTRFS_LEAF_DATA_SIZE(root);
725 return btrfs_item_offset_nr(leaf, nr - 1);
730 * search for key in the extent_buffer. The items start at offset p,
731 * and they are item_size apart. There are 'max' items in p.
733 * the slot in the array is returned via slot, and it points to
734 * the place where you would insert key if it is not found in
737 * slot may point to max if the key is bigger than all of the keys
739 static noinline int generic_bin_search(struct extent_buffer *eb,
741 int item_size, struct btrfs_key *key,
748 struct btrfs_disk_key *tmp = NULL;
749 struct btrfs_disk_key unaligned;
750 unsigned long offset;
751 char *map_token = NULL;
753 unsigned long map_start = 0;
754 unsigned long map_len = 0;
758 mid = (low + high) / 2;
759 offset = p + mid * item_size;
761 if (!map_token || offset < map_start ||
762 (offset + sizeof(struct btrfs_disk_key)) >
763 map_start + map_len) {
765 unmap_extent_buffer(eb, map_token, KM_USER0);
769 err = map_private_extent_buffer(eb, offset,
770 sizeof(struct btrfs_disk_key),
772 &map_start, &map_len, KM_USER0);
775 tmp = (struct btrfs_disk_key *)(kaddr + offset -
778 read_extent_buffer(eb, &unaligned,
779 offset, sizeof(unaligned));
784 tmp = (struct btrfs_disk_key *)(kaddr + offset -
787 ret = comp_keys(tmp, key);
796 unmap_extent_buffer(eb, map_token, KM_USER0);
802 unmap_extent_buffer(eb, map_token, KM_USER0);
807 * simple bin_search frontend that does the right thing for
810 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
811 int level, int *slot)
814 return generic_bin_search(eb,
815 offsetof(struct btrfs_leaf, items),
816 sizeof(struct btrfs_item),
817 key, btrfs_header_nritems(eb),
820 return generic_bin_search(eb,
821 offsetof(struct btrfs_node, ptrs),
822 sizeof(struct btrfs_key_ptr),
823 key, btrfs_header_nritems(eb),
829 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
830 int level, int *slot)
832 return bin_search(eb, key, level, slot);
835 static void root_add_used(struct btrfs_root *root, u32 size)
837 spin_lock(&root->accounting_lock);
838 btrfs_set_root_used(&root->root_item,
839 btrfs_root_used(&root->root_item) + size);
840 spin_unlock(&root->accounting_lock);
843 static void root_sub_used(struct btrfs_root *root, u32 size)
845 spin_lock(&root->accounting_lock);
846 btrfs_set_root_used(&root->root_item,
847 btrfs_root_used(&root->root_item) - size);
848 spin_unlock(&root->accounting_lock);
851 /* given a node and slot number, this reads the blocks it points to. The
852 * extent buffer is returned with a reference taken (but unlocked).
853 * NULL is returned on error.
855 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
856 struct extent_buffer *parent, int slot)
858 int level = btrfs_header_level(parent);
861 if (slot >= btrfs_header_nritems(parent))
866 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
867 btrfs_level_size(root, level - 1),
868 btrfs_node_ptr_generation(parent, slot));
872 * node level balancing, used to make sure nodes are in proper order for
873 * item deletion. We balance from the top down, so we have to make sure
874 * that a deletion won't leave an node completely empty later on.
876 static noinline int balance_level(struct btrfs_trans_handle *trans,
877 struct btrfs_root *root,
878 struct btrfs_path *path, int level)
880 struct extent_buffer *right = NULL;
881 struct extent_buffer *mid;
882 struct extent_buffer *left = NULL;
883 struct extent_buffer *parent = NULL;
887 int orig_slot = path->slots[level];
893 mid = path->nodes[level];
895 WARN_ON(!path->locks[level]);
896 WARN_ON(btrfs_header_generation(mid) != trans->transid);
898 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
900 if (level < BTRFS_MAX_LEVEL - 1)
901 parent = path->nodes[level + 1];
902 pslot = path->slots[level + 1];
905 * deal with the case where there is only one pointer in the root
906 * by promoting the node below to a root
909 struct extent_buffer *child;
911 if (btrfs_header_nritems(mid) != 1)
914 /* promote the child to a root */
915 child = read_node_slot(root, mid, 0);
917 btrfs_tree_lock(child);
918 btrfs_set_lock_blocking(child);
919 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
921 btrfs_tree_unlock(child);
922 free_extent_buffer(child);
926 rcu_assign_pointer(root->node, child);
928 add_root_to_dirty_list(root);
929 btrfs_tree_unlock(child);
931 path->locks[level] = 0;
932 path->nodes[level] = NULL;
933 clean_tree_block(trans, root, mid);
934 btrfs_tree_unlock(mid);
935 /* once for the path */
936 free_extent_buffer(mid);
938 root_sub_used(root, mid->len);
939 btrfs_free_tree_block(trans, root, mid, 0, 1);
940 /* once for the root ptr */
941 free_extent_buffer(mid);
944 if (btrfs_header_nritems(mid) >
945 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
948 btrfs_header_nritems(mid);
950 left = read_node_slot(root, parent, pslot - 1);
952 btrfs_tree_lock(left);
953 btrfs_set_lock_blocking(left);
954 wret = btrfs_cow_block(trans, root, left,
955 parent, pslot - 1, &left);
961 right = read_node_slot(root, parent, pslot + 1);
963 btrfs_tree_lock(right);
964 btrfs_set_lock_blocking(right);
965 wret = btrfs_cow_block(trans, root, right,
966 parent, pslot + 1, &right);
973 /* first, try to make some room in the middle buffer */
975 orig_slot += btrfs_header_nritems(left);
976 wret = push_node_left(trans, root, left, mid, 1);
979 btrfs_header_nritems(mid);
983 * then try to empty the right most buffer into the middle
986 wret = push_node_left(trans, root, mid, right, 1);
987 if (wret < 0 && wret != -ENOSPC)
989 if (btrfs_header_nritems(right) == 0) {
990 clean_tree_block(trans, root, right);
991 btrfs_tree_unlock(right);
992 wret = del_ptr(trans, root, path, level + 1, pslot +
996 root_sub_used(root, right->len);
997 btrfs_free_tree_block(trans, root, right, 0, 1);
998 free_extent_buffer(right);
1001 struct btrfs_disk_key right_key;
1002 btrfs_node_key(right, &right_key, 0);
1003 btrfs_set_node_key(parent, &right_key, pslot + 1);
1004 btrfs_mark_buffer_dirty(parent);
1007 if (btrfs_header_nritems(mid) == 1) {
1009 * we're not allowed to leave a node with one item in the
1010 * tree during a delete. A deletion from lower in the tree
1011 * could try to delete the only pointer in this node.
1012 * So, pull some keys from the left.
1013 * There has to be a left pointer at this point because
1014 * otherwise we would have pulled some pointers from the
1018 wret = balance_node_right(trans, root, mid, left);
1024 wret = push_node_left(trans, root, left, mid, 1);
1030 if (btrfs_header_nritems(mid) == 0) {
1031 clean_tree_block(trans, root, mid);
1032 btrfs_tree_unlock(mid);
1033 wret = del_ptr(trans, root, path, level + 1, pslot);
1036 root_sub_used(root, mid->len);
1037 btrfs_free_tree_block(trans, root, mid, 0, 1);
1038 free_extent_buffer(mid);
1041 /* update the parent key to reflect our changes */
1042 struct btrfs_disk_key mid_key;
1043 btrfs_node_key(mid, &mid_key, 0);
1044 btrfs_set_node_key(parent, &mid_key, pslot);
1045 btrfs_mark_buffer_dirty(parent);
1048 /* update the path */
1050 if (btrfs_header_nritems(left) > orig_slot) {
1051 extent_buffer_get(left);
1052 /* left was locked after cow */
1053 path->nodes[level] = left;
1054 path->slots[level + 1] -= 1;
1055 path->slots[level] = orig_slot;
1057 btrfs_tree_unlock(mid);
1058 free_extent_buffer(mid);
1061 orig_slot -= btrfs_header_nritems(left);
1062 path->slots[level] = orig_slot;
1065 /* double check we haven't messed things up */
1067 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1071 btrfs_tree_unlock(right);
1072 free_extent_buffer(right);
1075 if (path->nodes[level] != left)
1076 btrfs_tree_unlock(left);
1077 free_extent_buffer(left);
1082 /* Node balancing for insertion. Here we only split or push nodes around
1083 * when they are completely full. This is also done top down, so we
1084 * have to be pessimistic.
1086 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1087 struct btrfs_root *root,
1088 struct btrfs_path *path, int level)
1090 struct extent_buffer *right = NULL;
1091 struct extent_buffer *mid;
1092 struct extent_buffer *left = NULL;
1093 struct extent_buffer *parent = NULL;
1097 int orig_slot = path->slots[level];
1102 mid = path->nodes[level];
1103 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1105 if (level < BTRFS_MAX_LEVEL - 1)
1106 parent = path->nodes[level + 1];
1107 pslot = path->slots[level + 1];
1112 left = read_node_slot(root, parent, pslot - 1);
1114 /* first, try to make some room in the middle buffer */
1118 btrfs_tree_lock(left);
1119 btrfs_set_lock_blocking(left);
1121 left_nr = btrfs_header_nritems(left);
1122 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1125 ret = btrfs_cow_block(trans, root, left, parent,
1130 wret = push_node_left(trans, root,
1137 struct btrfs_disk_key disk_key;
1138 orig_slot += left_nr;
1139 btrfs_node_key(mid, &disk_key, 0);
1140 btrfs_set_node_key(parent, &disk_key, pslot);
1141 btrfs_mark_buffer_dirty(parent);
1142 if (btrfs_header_nritems(left) > orig_slot) {
1143 path->nodes[level] = left;
1144 path->slots[level + 1] -= 1;
1145 path->slots[level] = orig_slot;
1146 btrfs_tree_unlock(mid);
1147 free_extent_buffer(mid);
1150 btrfs_header_nritems(left);
1151 path->slots[level] = orig_slot;
1152 btrfs_tree_unlock(left);
1153 free_extent_buffer(left);
1157 btrfs_tree_unlock(left);
1158 free_extent_buffer(left);
1160 right = read_node_slot(root, parent, pslot + 1);
1163 * then try to empty the right most buffer into the middle
1168 btrfs_tree_lock(right);
1169 btrfs_set_lock_blocking(right);
1171 right_nr = btrfs_header_nritems(right);
1172 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1175 ret = btrfs_cow_block(trans, root, right,
1181 wret = balance_node_right(trans, root,
1188 struct btrfs_disk_key disk_key;
1190 btrfs_node_key(right, &disk_key, 0);
1191 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1192 btrfs_mark_buffer_dirty(parent);
1194 if (btrfs_header_nritems(mid) <= orig_slot) {
1195 path->nodes[level] = right;
1196 path->slots[level + 1] += 1;
1197 path->slots[level] = orig_slot -
1198 btrfs_header_nritems(mid);
1199 btrfs_tree_unlock(mid);
1200 free_extent_buffer(mid);
1202 btrfs_tree_unlock(right);
1203 free_extent_buffer(right);
1207 btrfs_tree_unlock(right);
1208 free_extent_buffer(right);
1214 * readahead one full node of leaves, finding things that are close
1215 * to the block in 'slot', and triggering ra on them.
1217 static void reada_for_search(struct btrfs_root *root,
1218 struct btrfs_path *path,
1219 int level, int slot, u64 objectid)
1221 struct extent_buffer *node;
1222 struct btrfs_disk_key disk_key;
1227 int direction = path->reada;
1228 struct extent_buffer *eb;
1236 if (!path->nodes[level])
1239 node = path->nodes[level];
1241 search = btrfs_node_blockptr(node, slot);
1242 blocksize = btrfs_level_size(root, level - 1);
1243 eb = btrfs_find_tree_block(root, search, blocksize);
1245 free_extent_buffer(eb);
1251 nritems = btrfs_header_nritems(node);
1254 if (direction < 0) {
1258 } else if (direction > 0) {
1263 if (path->reada < 0 && objectid) {
1264 btrfs_node_key(node, &disk_key, nr);
1265 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1268 search = btrfs_node_blockptr(node, nr);
1269 if ((search <= target && target - search <= 65536) ||
1270 (search > target && search - target <= 65536)) {
1271 readahead_tree_block(root, search, blocksize,
1272 btrfs_node_ptr_generation(node, nr));
1276 if ((nread > 65536 || nscan > 32))
1282 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1285 static noinline int reada_for_balance(struct btrfs_root *root,
1286 struct btrfs_path *path, int level)
1290 struct extent_buffer *parent;
1291 struct extent_buffer *eb;
1298 parent = path->nodes[level + 1];
1302 nritems = btrfs_header_nritems(parent);
1303 slot = path->slots[level + 1];
1304 blocksize = btrfs_level_size(root, level);
1307 block1 = btrfs_node_blockptr(parent, slot - 1);
1308 gen = btrfs_node_ptr_generation(parent, slot - 1);
1309 eb = btrfs_find_tree_block(root, block1, blocksize);
1310 if (eb && btrfs_buffer_uptodate(eb, gen))
1312 free_extent_buffer(eb);
1314 if (slot + 1 < nritems) {
1315 block2 = btrfs_node_blockptr(parent, slot + 1);
1316 gen = btrfs_node_ptr_generation(parent, slot + 1);
1317 eb = btrfs_find_tree_block(root, block2, blocksize);
1318 if (eb && btrfs_buffer_uptodate(eb, gen))
1320 free_extent_buffer(eb);
1322 if (block1 || block2) {
1325 /* release the whole path */
1326 btrfs_release_path(path);
1328 /* read the blocks */
1330 readahead_tree_block(root, block1, blocksize, 0);
1332 readahead_tree_block(root, block2, blocksize, 0);
1335 eb = read_tree_block(root, block1, blocksize, 0);
1336 free_extent_buffer(eb);
1339 eb = read_tree_block(root, block2, blocksize, 0);
1340 free_extent_buffer(eb);
1348 * when we walk down the tree, it is usually safe to unlock the higher layers
1349 * in the tree. The exceptions are when our path goes through slot 0, because
1350 * operations on the tree might require changing key pointers higher up in the
1353 * callers might also have set path->keep_locks, which tells this code to keep
1354 * the lock if the path points to the last slot in the block. This is part of
1355 * walking through the tree, and selecting the next slot in the higher block.
1357 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1358 * if lowest_unlock is 1, level 0 won't be unlocked
1360 static noinline void unlock_up(struct btrfs_path *path, int level,
1364 int skip_level = level;
1366 struct extent_buffer *t;
1368 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1369 if (!path->nodes[i])
1371 if (!path->locks[i])
1373 if (!no_skips && path->slots[i] == 0) {
1377 if (!no_skips && path->keep_locks) {
1380 nritems = btrfs_header_nritems(t);
1381 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1386 if (skip_level < i && i >= lowest_unlock)
1390 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1391 btrfs_tree_unlock(t);
1398 * This releases any locks held in the path starting at level and
1399 * going all the way up to the root.
1401 * btrfs_search_slot will keep the lock held on higher nodes in a few
1402 * corner cases, such as COW of the block at slot zero in the node. This
1403 * ignores those rules, and it should only be called when there are no
1404 * more updates to be done higher up in the tree.
1406 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1410 if (path->keep_locks)
1413 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1414 if (!path->nodes[i])
1416 if (!path->locks[i])
1418 btrfs_tree_unlock(path->nodes[i]);
1424 * helper function for btrfs_search_slot. The goal is to find a block
1425 * in cache without setting the path to blocking. If we find the block
1426 * we return zero and the path is unchanged.
1428 * If we can't find the block, we set the path blocking and do some
1429 * reada. -EAGAIN is returned and the search must be repeated.
1432 read_block_for_search(struct btrfs_trans_handle *trans,
1433 struct btrfs_root *root, struct btrfs_path *p,
1434 struct extent_buffer **eb_ret, int level, int slot,
1435 struct btrfs_key *key)
1440 struct extent_buffer *b = *eb_ret;
1441 struct extent_buffer *tmp;
1444 blocknr = btrfs_node_blockptr(b, slot);
1445 gen = btrfs_node_ptr_generation(b, slot);
1446 blocksize = btrfs_level_size(root, level - 1);
1448 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1450 if (btrfs_buffer_uptodate(tmp, 0)) {
1451 if (btrfs_buffer_uptodate(tmp, gen)) {
1453 * we found an up to date block without
1460 /* the pages were up to date, but we failed
1461 * the generation number check. Do a full
1462 * read for the generation number that is correct.
1463 * We must do this without dropping locks so
1464 * we can trust our generation number
1466 free_extent_buffer(tmp);
1467 tmp = read_tree_block(root, blocknr, blocksize, gen);
1468 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
1472 free_extent_buffer(tmp);
1473 btrfs_release_path(p);
1479 * reduce lock contention at high levels
1480 * of the btree by dropping locks before
1481 * we read. Don't release the lock on the current
1482 * level because we need to walk this node to figure
1483 * out which blocks to read.
1485 btrfs_unlock_up_safe(p, level + 1);
1486 btrfs_set_path_blocking(p);
1488 free_extent_buffer(tmp);
1490 reada_for_search(root, p, level, slot, key->objectid);
1492 btrfs_release_path(p);
1495 tmp = read_tree_block(root, blocknr, blocksize, 0);
1498 * If the read above didn't mark this buffer up to date,
1499 * it will never end up being up to date. Set ret to EIO now
1500 * and give up so that our caller doesn't loop forever
1503 if (!btrfs_buffer_uptodate(tmp, 0))
1505 free_extent_buffer(tmp);
1511 * helper function for btrfs_search_slot. This does all of the checks
1512 * for node-level blocks and does any balancing required based on
1515 * If no extra work was required, zero is returned. If we had to
1516 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1520 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1521 struct btrfs_root *root, struct btrfs_path *p,
1522 struct extent_buffer *b, int level, int ins_len)
1525 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1526 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1529 sret = reada_for_balance(root, p, level);
1533 btrfs_set_path_blocking(p);
1534 sret = split_node(trans, root, p, level);
1535 btrfs_clear_path_blocking(p, NULL);
1542 b = p->nodes[level];
1543 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1544 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
1547 sret = reada_for_balance(root, p, level);
1551 btrfs_set_path_blocking(p);
1552 sret = balance_level(trans, root, p, level);
1553 btrfs_clear_path_blocking(p, NULL);
1559 b = p->nodes[level];
1561 btrfs_release_path(p);
1564 BUG_ON(btrfs_header_nritems(b) == 1);
1575 * look for key in the tree. path is filled in with nodes along the way
1576 * if key is found, we return zero and you can find the item in the leaf
1577 * level of the path (level 0)
1579 * If the key isn't found, the path points to the slot where it should
1580 * be inserted, and 1 is returned. If there are other errors during the
1581 * search a negative error number is returned.
1583 * if ins_len > 0, nodes and leaves will be split as we walk down the
1584 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1587 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1588 *root, struct btrfs_key *key, struct btrfs_path *p, int
1591 struct extent_buffer *b;
1596 int lowest_unlock = 1;
1597 u8 lowest_level = 0;
1599 lowest_level = p->lowest_level;
1600 WARN_ON(lowest_level && ins_len > 0);
1601 WARN_ON(p->nodes[0] != NULL);
1607 if (p->search_commit_root) {
1608 b = root->commit_root;
1609 extent_buffer_get(b);
1610 if (!p->skip_locking)
1613 if (p->skip_locking)
1614 b = btrfs_root_node(root);
1616 b = btrfs_lock_root_node(root);
1620 level = btrfs_header_level(b);
1623 * setup the path here so we can release it under lock
1624 * contention with the cow code
1626 p->nodes[level] = b;
1627 if (!p->skip_locking)
1628 p->locks[level] = 1;
1632 * if we don't really need to cow this block
1633 * then we don't want to set the path blocking,
1634 * so we test it here
1636 if (!should_cow_block(trans, root, b))
1639 btrfs_set_path_blocking(p);
1641 err = btrfs_cow_block(trans, root, b,
1642 p->nodes[level + 1],
1643 p->slots[level + 1], &b);
1650 BUG_ON(!cow && ins_len);
1651 if (level != btrfs_header_level(b))
1653 level = btrfs_header_level(b);
1655 p->nodes[level] = b;
1656 if (!p->skip_locking)
1657 p->locks[level] = 1;
1659 btrfs_clear_path_blocking(p, NULL);
1662 * we have a lock on b and as long as we aren't changing
1663 * the tree, there is no way to for the items in b to change.
1664 * It is safe to drop the lock on our parent before we
1665 * go through the expensive btree search on b.
1667 * If cow is true, then we might be changing slot zero,
1668 * which may require changing the parent. So, we can't
1669 * drop the lock until after we know which slot we're
1673 btrfs_unlock_up_safe(p, level + 1);
1675 ret = bin_search(b, key, level, &slot);
1679 if (ret && slot > 0) {
1683 p->slots[level] = slot;
1684 err = setup_nodes_for_search(trans, root, p, b, level,
1692 b = p->nodes[level];
1693 slot = p->slots[level];
1695 unlock_up(p, level, lowest_unlock);
1697 if (level == lowest_level) {
1703 err = read_block_for_search(trans, root, p,
1704 &b, level, slot, key);
1712 if (!p->skip_locking) {
1713 btrfs_clear_path_blocking(p, NULL);
1714 err = btrfs_try_spin_lock(b);
1717 btrfs_set_path_blocking(p);
1719 btrfs_clear_path_blocking(p, b);
1723 p->slots[level] = slot;
1725 btrfs_leaf_free_space(root, b) < ins_len) {
1726 btrfs_set_path_blocking(p);
1727 err = split_leaf(trans, root, key,
1728 p, ins_len, ret == 0);
1729 btrfs_clear_path_blocking(p, NULL);
1737 if (!p->search_for_split)
1738 unlock_up(p, level, lowest_unlock);
1745 * we don't really know what they plan on doing with the path
1746 * from here on, so for now just mark it as blocking
1748 if (!p->leave_spinning)
1749 btrfs_set_path_blocking(p);
1751 btrfs_release_path(p);
1756 * adjust the pointers going up the tree, starting at level
1757 * making sure the right key of each node is points to 'key'.
1758 * This is used after shifting pointers to the left, so it stops
1759 * fixing up pointers when a given leaf/node is not in slot 0 of the
1762 * If this fails to write a tree block, it returns -1, but continues
1763 * fixing up the blocks in ram so the tree is consistent.
1765 static int fixup_low_keys(struct btrfs_trans_handle *trans,
1766 struct btrfs_root *root, struct btrfs_path *path,
1767 struct btrfs_disk_key *key, int level)
1771 struct extent_buffer *t;
1773 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1774 int tslot = path->slots[i];
1775 if (!path->nodes[i])
1778 btrfs_set_node_key(t, key, tslot);
1779 btrfs_mark_buffer_dirty(path->nodes[i]);
1789 * This function isn't completely safe. It's the caller's responsibility
1790 * that the new key won't break the order
1792 int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1793 struct btrfs_root *root, struct btrfs_path *path,
1794 struct btrfs_key *new_key)
1796 struct btrfs_disk_key disk_key;
1797 struct extent_buffer *eb;
1800 eb = path->nodes[0];
1801 slot = path->slots[0];
1803 btrfs_item_key(eb, &disk_key, slot - 1);
1804 if (comp_keys(&disk_key, new_key) >= 0)
1807 if (slot < btrfs_header_nritems(eb) - 1) {
1808 btrfs_item_key(eb, &disk_key, slot + 1);
1809 if (comp_keys(&disk_key, new_key) <= 0)
1813 btrfs_cpu_key_to_disk(&disk_key, new_key);
1814 btrfs_set_item_key(eb, &disk_key, slot);
1815 btrfs_mark_buffer_dirty(eb);
1817 fixup_low_keys(trans, root, path, &disk_key, 1);
1822 * try to push data from one node into the next node left in the
1825 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1826 * error, and > 0 if there was no room in the left hand block.
1828 static int push_node_left(struct btrfs_trans_handle *trans,
1829 struct btrfs_root *root, struct extent_buffer *dst,
1830 struct extent_buffer *src, int empty)
1837 src_nritems = btrfs_header_nritems(src);
1838 dst_nritems = btrfs_header_nritems(dst);
1839 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1840 WARN_ON(btrfs_header_generation(src) != trans->transid);
1841 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1843 if (!empty && src_nritems <= 8)
1846 if (push_items <= 0)
1850 push_items = min(src_nritems, push_items);
1851 if (push_items < src_nritems) {
1852 /* leave at least 8 pointers in the node if
1853 * we aren't going to empty it
1855 if (src_nritems - push_items < 8) {
1856 if (push_items <= 8)
1862 push_items = min(src_nritems - 8, push_items);
1864 copy_extent_buffer(dst, src,
1865 btrfs_node_key_ptr_offset(dst_nritems),
1866 btrfs_node_key_ptr_offset(0),
1867 push_items * sizeof(struct btrfs_key_ptr));
1869 if (push_items < src_nritems) {
1870 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1871 btrfs_node_key_ptr_offset(push_items),
1872 (src_nritems - push_items) *
1873 sizeof(struct btrfs_key_ptr));
1875 btrfs_set_header_nritems(src, src_nritems - push_items);
1876 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1877 btrfs_mark_buffer_dirty(src);
1878 btrfs_mark_buffer_dirty(dst);
1884 * try to push data from one node into the next node right in the
1887 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
1888 * error, and > 0 if there was no room in the right hand block.
1890 * this will only push up to 1/2 the contents of the left node over
1892 static int balance_node_right(struct btrfs_trans_handle *trans,
1893 struct btrfs_root *root,
1894 struct extent_buffer *dst,
1895 struct extent_buffer *src)
1903 WARN_ON(btrfs_header_generation(src) != trans->transid);
1904 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1906 src_nritems = btrfs_header_nritems(src);
1907 dst_nritems = btrfs_header_nritems(dst);
1908 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1909 if (push_items <= 0)
1912 if (src_nritems < 4)
1915 max_push = src_nritems / 2 + 1;
1916 /* don't try to empty the node */
1917 if (max_push >= src_nritems)
1920 if (max_push < push_items)
1921 push_items = max_push;
1923 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
1924 btrfs_node_key_ptr_offset(0),
1926 sizeof(struct btrfs_key_ptr));
1928 copy_extent_buffer(dst, src,
1929 btrfs_node_key_ptr_offset(0),
1930 btrfs_node_key_ptr_offset(src_nritems - push_items),
1931 push_items * sizeof(struct btrfs_key_ptr));
1933 btrfs_set_header_nritems(src, src_nritems - push_items);
1934 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1936 btrfs_mark_buffer_dirty(src);
1937 btrfs_mark_buffer_dirty(dst);
1943 * helper function to insert a new root level in the tree.
1944 * A new node is allocated, and a single item is inserted to
1945 * point to the existing root
1947 * returns zero on success or < 0 on failure.
1949 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
1950 struct btrfs_root *root,
1951 struct btrfs_path *path, int level)
1954 struct extent_buffer *lower;
1955 struct extent_buffer *c;
1956 struct extent_buffer *old;
1957 struct btrfs_disk_key lower_key;
1959 BUG_ON(path->nodes[level]);
1960 BUG_ON(path->nodes[level-1] != root->node);
1962 lower = path->nodes[level-1];
1964 btrfs_item_key(lower, &lower_key, 0);
1966 btrfs_node_key(lower, &lower_key, 0);
1968 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
1969 root->root_key.objectid, &lower_key,
1970 level, root->node->start, 0);
1974 root_add_used(root, root->nodesize);
1976 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
1977 btrfs_set_header_nritems(c, 1);
1978 btrfs_set_header_level(c, level);
1979 btrfs_set_header_bytenr(c, c->start);
1980 btrfs_set_header_generation(c, trans->transid);
1981 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
1982 btrfs_set_header_owner(c, root->root_key.objectid);
1984 write_extent_buffer(c, root->fs_info->fsid,
1985 (unsigned long)btrfs_header_fsid(c),
1988 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
1989 (unsigned long)btrfs_header_chunk_tree_uuid(c),
1992 btrfs_set_node_key(c, &lower_key, 0);
1993 btrfs_set_node_blockptr(c, 0, lower->start);
1994 lower_gen = btrfs_header_generation(lower);
1995 WARN_ON(lower_gen != trans->transid);
1997 btrfs_set_node_ptr_generation(c, 0, lower_gen);
1999 btrfs_mark_buffer_dirty(c);
2002 rcu_assign_pointer(root->node, c);
2004 /* the super has an extra ref to root->node */
2005 free_extent_buffer(old);
2007 add_root_to_dirty_list(root);
2008 extent_buffer_get(c);
2009 path->nodes[level] = c;
2010 path->locks[level] = 1;
2011 path->slots[level] = 0;
2016 * worker function to insert a single pointer in a node.
2017 * the node should have enough room for the pointer already
2019 * slot and level indicate where you want the key to go, and
2020 * blocknr is the block the key points to.
2022 * returns zero on success and < 0 on any error
2024 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2025 *root, struct btrfs_path *path, struct btrfs_disk_key
2026 *key, u64 bytenr, int slot, int level)
2028 struct extent_buffer *lower;
2031 BUG_ON(!path->nodes[level]);
2032 btrfs_assert_tree_locked(path->nodes[level]);
2033 lower = path->nodes[level];
2034 nritems = btrfs_header_nritems(lower);
2035 BUG_ON(slot > nritems);
2036 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
2038 if (slot != nritems) {
2039 memmove_extent_buffer(lower,
2040 btrfs_node_key_ptr_offset(slot + 1),
2041 btrfs_node_key_ptr_offset(slot),
2042 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2044 btrfs_set_node_key(lower, key, slot);
2045 btrfs_set_node_blockptr(lower, slot, bytenr);
2046 WARN_ON(trans->transid == 0);
2047 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2048 btrfs_set_header_nritems(lower, nritems + 1);
2049 btrfs_mark_buffer_dirty(lower);
2054 * split the node at the specified level in path in two.
2055 * The path is corrected to point to the appropriate node after the split
2057 * Before splitting this tries to make some room in the node by pushing
2058 * left and right, if either one works, it returns right away.
2060 * returns 0 on success and < 0 on failure
2062 static noinline int split_node(struct btrfs_trans_handle *trans,
2063 struct btrfs_root *root,
2064 struct btrfs_path *path, int level)
2066 struct extent_buffer *c;
2067 struct extent_buffer *split;
2068 struct btrfs_disk_key disk_key;
2074 c = path->nodes[level];
2075 WARN_ON(btrfs_header_generation(c) != trans->transid);
2076 if (c == root->node) {
2077 /* trying to split the root, lets make a new one */
2078 ret = insert_new_root(trans, root, path, level + 1);
2082 ret = push_nodes_for_insert(trans, root, path, level);
2083 c = path->nodes[level];
2084 if (!ret && btrfs_header_nritems(c) <
2085 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
2091 c_nritems = btrfs_header_nritems(c);
2092 mid = (c_nritems + 1) / 2;
2093 btrfs_node_key(c, &disk_key, mid);
2095 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2096 root->root_key.objectid,
2097 &disk_key, level, c->start, 0);
2099 return PTR_ERR(split);
2101 root_add_used(root, root->nodesize);
2103 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
2104 btrfs_set_header_level(split, btrfs_header_level(c));
2105 btrfs_set_header_bytenr(split, split->start);
2106 btrfs_set_header_generation(split, trans->transid);
2107 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
2108 btrfs_set_header_owner(split, root->root_key.objectid);
2109 write_extent_buffer(split, root->fs_info->fsid,
2110 (unsigned long)btrfs_header_fsid(split),
2112 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2113 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2117 copy_extent_buffer(split, c,
2118 btrfs_node_key_ptr_offset(0),
2119 btrfs_node_key_ptr_offset(mid),
2120 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2121 btrfs_set_header_nritems(split, c_nritems - mid);
2122 btrfs_set_header_nritems(c, mid);
2125 btrfs_mark_buffer_dirty(c);
2126 btrfs_mark_buffer_dirty(split);
2128 wret = insert_ptr(trans, root, path, &disk_key, split->start,
2129 path->slots[level + 1] + 1,
2134 if (path->slots[level] >= mid) {
2135 path->slots[level] -= mid;
2136 btrfs_tree_unlock(c);
2137 free_extent_buffer(c);
2138 path->nodes[level] = split;
2139 path->slots[level + 1] += 1;
2141 btrfs_tree_unlock(split);
2142 free_extent_buffer(split);
2148 * how many bytes are required to store the items in a leaf. start
2149 * and nr indicate which items in the leaf to check. This totals up the
2150 * space used both by the item structs and the item data
2152 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2155 int nritems = btrfs_header_nritems(l);
2156 int end = min(nritems, start + nr) - 1;
2160 data_len = btrfs_item_end_nr(l, start);
2161 data_len = data_len - btrfs_item_offset_nr(l, end);
2162 data_len += sizeof(struct btrfs_item) * nr;
2163 WARN_ON(data_len < 0);
2168 * The space between the end of the leaf items and
2169 * the start of the leaf data. IOW, how much room
2170 * the leaf has left for both items and data
2172 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2173 struct extent_buffer *leaf)
2175 int nritems = btrfs_header_nritems(leaf);
2177 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2179 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2180 "used %d nritems %d\n",
2181 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2182 leaf_space_used(leaf, 0, nritems), nritems);
2188 * min slot controls the lowest index we're willing to push to the
2189 * right. We'll push up to and including min_slot, but no lower
2191 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2192 struct btrfs_root *root,
2193 struct btrfs_path *path,
2194 int data_size, int empty,
2195 struct extent_buffer *right,
2196 int free_space, u32 left_nritems,
2199 struct extent_buffer *left = path->nodes[0];
2200 struct extent_buffer *upper = path->nodes[1];
2201 struct btrfs_disk_key disk_key;
2206 struct btrfs_item *item;
2215 nr = max_t(u32, 1, min_slot);
2217 if (path->slots[0] >= left_nritems)
2218 push_space += data_size;
2220 slot = path->slots[1];
2221 i = left_nritems - 1;
2223 item = btrfs_item_nr(left, i);
2225 if (!empty && push_items > 0) {
2226 if (path->slots[0] > i)
2228 if (path->slots[0] == i) {
2229 int space = btrfs_leaf_free_space(root, left);
2230 if (space + push_space * 2 > free_space)
2235 if (path->slots[0] == i)
2236 push_space += data_size;
2238 if (!left->map_token) {
2239 map_extent_buffer(left, (unsigned long)item,
2240 sizeof(struct btrfs_item),
2241 &left->map_token, &left->kaddr,
2242 &left->map_start, &left->map_len,
2246 this_item_size = btrfs_item_size(left, item);
2247 if (this_item_size + sizeof(*item) + push_space > free_space)
2251 push_space += this_item_size + sizeof(*item);
2256 if (left->map_token) {
2257 unmap_extent_buffer(left, left->map_token, KM_USER1);
2258 left->map_token = NULL;
2261 if (push_items == 0)
2264 if (!empty && push_items == left_nritems)
2267 /* push left to right */
2268 right_nritems = btrfs_header_nritems(right);
2270 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2271 push_space -= leaf_data_end(root, left);
2273 /* make room in the right data area */
2274 data_end = leaf_data_end(root, right);
2275 memmove_extent_buffer(right,
2276 btrfs_leaf_data(right) + data_end - push_space,
2277 btrfs_leaf_data(right) + data_end,
2278 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2280 /* copy from the left data area */
2281 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
2282 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2283 btrfs_leaf_data(left) + leaf_data_end(root, left),
2286 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2287 btrfs_item_nr_offset(0),
2288 right_nritems * sizeof(struct btrfs_item));
2290 /* copy the items from left to right */
2291 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2292 btrfs_item_nr_offset(left_nritems - push_items),
2293 push_items * sizeof(struct btrfs_item));
2295 /* update the item pointers */
2296 right_nritems += push_items;
2297 btrfs_set_header_nritems(right, right_nritems);
2298 push_space = BTRFS_LEAF_DATA_SIZE(root);
2299 for (i = 0; i < right_nritems; i++) {
2300 item = btrfs_item_nr(right, i);
2301 if (!right->map_token) {
2302 map_extent_buffer(right, (unsigned long)item,
2303 sizeof(struct btrfs_item),
2304 &right->map_token, &right->kaddr,
2305 &right->map_start, &right->map_len,
2308 push_space -= btrfs_item_size(right, item);
2309 btrfs_set_item_offset(right, item, push_space);
2312 if (right->map_token) {
2313 unmap_extent_buffer(right, right->map_token, KM_USER1);
2314 right->map_token = NULL;
2316 left_nritems -= push_items;
2317 btrfs_set_header_nritems(left, left_nritems);
2320 btrfs_mark_buffer_dirty(left);
2322 clean_tree_block(trans, root, left);
2324 btrfs_mark_buffer_dirty(right);
2326 btrfs_item_key(right, &disk_key, 0);
2327 btrfs_set_node_key(upper, &disk_key, slot + 1);
2328 btrfs_mark_buffer_dirty(upper);
2330 /* then fixup the leaf pointer in the path */
2331 if (path->slots[0] >= left_nritems) {
2332 path->slots[0] -= left_nritems;
2333 if (btrfs_header_nritems(path->nodes[0]) == 0)
2334 clean_tree_block(trans, root, path->nodes[0]);
2335 btrfs_tree_unlock(path->nodes[0]);
2336 free_extent_buffer(path->nodes[0]);
2337 path->nodes[0] = right;
2338 path->slots[1] += 1;
2340 btrfs_tree_unlock(right);
2341 free_extent_buffer(right);
2346 btrfs_tree_unlock(right);
2347 free_extent_buffer(right);
2352 * push some data in the path leaf to the right, trying to free up at
2353 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2355 * returns 1 if the push failed because the other node didn't have enough
2356 * room, 0 if everything worked out and < 0 if there were major errors.
2358 * this will push starting from min_slot to the end of the leaf. It won't
2359 * push any slot lower than min_slot
2361 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2362 *root, struct btrfs_path *path,
2363 int min_data_size, int data_size,
2364 int empty, u32 min_slot)
2366 struct extent_buffer *left = path->nodes[0];
2367 struct extent_buffer *right;
2368 struct extent_buffer *upper;
2374 if (!path->nodes[1])
2377 slot = path->slots[1];
2378 upper = path->nodes[1];
2379 if (slot >= btrfs_header_nritems(upper) - 1)
2382 btrfs_assert_tree_locked(path->nodes[1]);
2384 right = read_node_slot(root, upper, slot + 1);
2388 btrfs_tree_lock(right);
2389 btrfs_set_lock_blocking(right);
2391 free_space = btrfs_leaf_free_space(root, right);
2392 if (free_space < data_size)
2395 /* cow and double check */
2396 ret = btrfs_cow_block(trans, root, right, upper,
2401 free_space = btrfs_leaf_free_space(root, right);
2402 if (free_space < data_size)
2405 left_nritems = btrfs_header_nritems(left);
2406 if (left_nritems == 0)
2409 return __push_leaf_right(trans, root, path, min_data_size, empty,
2410 right, free_space, left_nritems, min_slot);
2412 btrfs_tree_unlock(right);
2413 free_extent_buffer(right);
2418 * push some data in the path leaf to the left, trying to free up at
2419 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2421 * max_slot can put a limit on how far into the leaf we'll push items. The
2422 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2425 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2426 struct btrfs_root *root,
2427 struct btrfs_path *path, int data_size,
2428 int empty, struct extent_buffer *left,
2429 int free_space, u32 right_nritems,
2432 struct btrfs_disk_key disk_key;
2433 struct extent_buffer *right = path->nodes[0];
2437 struct btrfs_item *item;
2438 u32 old_left_nritems;
2443 u32 old_left_item_size;
2446 nr = min(right_nritems, max_slot);
2448 nr = min(right_nritems - 1, max_slot);
2450 for (i = 0; i < nr; i++) {
2451 item = btrfs_item_nr(right, i);
2452 if (!right->map_token) {
2453 map_extent_buffer(right, (unsigned long)item,
2454 sizeof(struct btrfs_item),
2455 &right->map_token, &right->kaddr,
2456 &right->map_start, &right->map_len,
2460 if (!empty && push_items > 0) {
2461 if (path->slots[0] < i)
2463 if (path->slots[0] == i) {
2464 int space = btrfs_leaf_free_space(root, right);
2465 if (space + push_space * 2 > free_space)
2470 if (path->slots[0] == i)
2471 push_space += data_size;
2473 this_item_size = btrfs_item_size(right, item);
2474 if (this_item_size + sizeof(*item) + push_space > free_space)
2478 push_space += this_item_size + sizeof(*item);
2481 if (right->map_token) {
2482 unmap_extent_buffer(right, right->map_token, KM_USER1);
2483 right->map_token = NULL;
2486 if (push_items == 0) {
2490 if (!empty && push_items == btrfs_header_nritems(right))
2493 /* push data from right to left */
2494 copy_extent_buffer(left, right,
2495 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2496 btrfs_item_nr_offset(0),
2497 push_items * sizeof(struct btrfs_item));
2499 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2500 btrfs_item_offset_nr(right, push_items - 1);
2502 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2503 leaf_data_end(root, left) - push_space,
2504 btrfs_leaf_data(right) +
2505 btrfs_item_offset_nr(right, push_items - 1),
2507 old_left_nritems = btrfs_header_nritems(left);
2508 BUG_ON(old_left_nritems <= 0);
2510 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2511 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2514 item = btrfs_item_nr(left, i);
2515 if (!left->map_token) {
2516 map_extent_buffer(left, (unsigned long)item,
2517 sizeof(struct btrfs_item),
2518 &left->map_token, &left->kaddr,
2519 &left->map_start, &left->map_len,
2523 ioff = btrfs_item_offset(left, item);
2524 btrfs_set_item_offset(left, item,
2525 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
2527 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2528 if (left->map_token) {
2529 unmap_extent_buffer(left, left->map_token, KM_USER1);
2530 left->map_token = NULL;
2533 /* fixup right node */
2534 if (push_items > right_nritems) {
2535 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2540 if (push_items < right_nritems) {
2541 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2542 leaf_data_end(root, right);
2543 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2544 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2545 btrfs_leaf_data(right) +
2546 leaf_data_end(root, right), push_space);
2548 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2549 btrfs_item_nr_offset(push_items),
2550 (btrfs_header_nritems(right) - push_items) *
2551 sizeof(struct btrfs_item));
2553 right_nritems -= push_items;
2554 btrfs_set_header_nritems(right, right_nritems);
2555 push_space = BTRFS_LEAF_DATA_SIZE(root);
2556 for (i = 0; i < right_nritems; i++) {
2557 item = btrfs_item_nr(right, i);
2559 if (!right->map_token) {
2560 map_extent_buffer(right, (unsigned long)item,
2561 sizeof(struct btrfs_item),
2562 &right->map_token, &right->kaddr,
2563 &right->map_start, &right->map_len,
2567 push_space = push_space - btrfs_item_size(right, item);
2568 btrfs_set_item_offset(right, item, push_space);
2570 if (right->map_token) {
2571 unmap_extent_buffer(right, right->map_token, KM_USER1);
2572 right->map_token = NULL;
2575 btrfs_mark_buffer_dirty(left);
2577 btrfs_mark_buffer_dirty(right);
2579 clean_tree_block(trans, root, right);
2581 btrfs_item_key(right, &disk_key, 0);
2582 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2586 /* then fixup the leaf pointer in the path */
2587 if (path->slots[0] < push_items) {
2588 path->slots[0] += old_left_nritems;
2589 btrfs_tree_unlock(path->nodes[0]);
2590 free_extent_buffer(path->nodes[0]);
2591 path->nodes[0] = left;
2592 path->slots[1] -= 1;
2594 btrfs_tree_unlock(left);
2595 free_extent_buffer(left);
2596 path->slots[0] -= push_items;
2598 BUG_ON(path->slots[0] < 0);
2601 btrfs_tree_unlock(left);
2602 free_extent_buffer(left);
2607 * push some data in the path leaf to the left, trying to free up at
2608 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2610 * max_slot can put a limit on how far into the leaf we'll push items. The
2611 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
2614 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2615 *root, struct btrfs_path *path, int min_data_size,
2616 int data_size, int empty, u32 max_slot)
2618 struct extent_buffer *right = path->nodes[0];
2619 struct extent_buffer *left;
2625 slot = path->slots[1];
2628 if (!path->nodes[1])
2631 right_nritems = btrfs_header_nritems(right);
2632 if (right_nritems == 0)
2635 btrfs_assert_tree_locked(path->nodes[1]);
2637 left = read_node_slot(root, path->nodes[1], slot - 1);
2641 btrfs_tree_lock(left);
2642 btrfs_set_lock_blocking(left);
2644 free_space = btrfs_leaf_free_space(root, left);
2645 if (free_space < data_size) {
2650 /* cow and double check */
2651 ret = btrfs_cow_block(trans, root, left,
2652 path->nodes[1], slot - 1, &left);
2654 /* we hit -ENOSPC, but it isn't fatal here */
2659 free_space = btrfs_leaf_free_space(root, left);
2660 if (free_space < data_size) {
2665 return __push_leaf_left(trans, root, path, min_data_size,
2666 empty, left, free_space, right_nritems,
2669 btrfs_tree_unlock(left);
2670 free_extent_buffer(left);
2675 * split the path's leaf in two, making sure there is at least data_size
2676 * available for the resulting leaf level of the path.
2678 * returns 0 if all went well and < 0 on failure.
2680 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2681 struct btrfs_root *root,
2682 struct btrfs_path *path,
2683 struct extent_buffer *l,
2684 struct extent_buffer *right,
2685 int slot, int mid, int nritems)
2692 struct btrfs_disk_key disk_key;
2694 nritems = nritems - mid;
2695 btrfs_set_header_nritems(right, nritems);
2696 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2698 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2699 btrfs_item_nr_offset(mid),
2700 nritems * sizeof(struct btrfs_item));
2702 copy_extent_buffer(right, l,
2703 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2704 data_copy_size, btrfs_leaf_data(l) +
2705 leaf_data_end(root, l), data_copy_size);
2707 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2708 btrfs_item_end_nr(l, mid);
2710 for (i = 0; i < nritems; i++) {
2711 struct btrfs_item *item = btrfs_item_nr(right, i);
2714 if (!right->map_token) {
2715 map_extent_buffer(right, (unsigned long)item,
2716 sizeof(struct btrfs_item),
2717 &right->map_token, &right->kaddr,
2718 &right->map_start, &right->map_len,
2722 ioff = btrfs_item_offset(right, item);
2723 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2726 if (right->map_token) {
2727 unmap_extent_buffer(right, right->map_token, KM_USER1);
2728 right->map_token = NULL;
2731 btrfs_set_header_nritems(l, mid);
2733 btrfs_item_key(right, &disk_key, 0);
2734 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2735 path->slots[1] + 1, 1);
2739 btrfs_mark_buffer_dirty(right);
2740 btrfs_mark_buffer_dirty(l);
2741 BUG_ON(path->slots[0] != slot);
2744 btrfs_tree_unlock(path->nodes[0]);
2745 free_extent_buffer(path->nodes[0]);
2746 path->nodes[0] = right;
2747 path->slots[0] -= mid;
2748 path->slots[1] += 1;
2750 btrfs_tree_unlock(right);
2751 free_extent_buffer(right);
2754 BUG_ON(path->slots[0] < 0);
2760 * double splits happen when we need to insert a big item in the middle
2761 * of a leaf. A double split can leave us with 3 mostly empty leaves:
2762 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
2765 * We avoid this by trying to push the items on either side of our target
2766 * into the adjacent leaves. If all goes well we can avoid the double split
2769 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
2770 struct btrfs_root *root,
2771 struct btrfs_path *path,
2779 slot = path->slots[0];
2782 * try to push all the items after our slot into the
2785 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
2792 nritems = btrfs_header_nritems(path->nodes[0]);
2794 * our goal is to get our slot at the start or end of a leaf. If
2795 * we've done so we're done
2797 if (path->slots[0] == 0 || path->slots[0] == nritems)
2800 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
2803 /* try to push all the items before our slot into the next leaf */
2804 slot = path->slots[0];
2805 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
2818 * split the path's leaf in two, making sure there is at least data_size
2819 * available for the resulting leaf level of the path.
2821 * returns 0 if all went well and < 0 on failure.
2823 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2824 struct btrfs_root *root,
2825 struct btrfs_key *ins_key,
2826 struct btrfs_path *path, int data_size,
2829 struct btrfs_disk_key disk_key;
2830 struct extent_buffer *l;
2834 struct extent_buffer *right;
2838 int num_doubles = 0;
2839 int tried_avoid_double = 0;
2842 slot = path->slots[0];
2843 if (extend && data_size + btrfs_item_size_nr(l, slot) +
2844 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
2847 /* first try to make some room by pushing left and right */
2849 wret = push_leaf_right(trans, root, path, data_size,
2854 wret = push_leaf_left(trans, root, path, data_size,
2855 data_size, 0, (u32)-1);
2861 /* did the pushes work? */
2862 if (btrfs_leaf_free_space(root, l) >= data_size)
2866 if (!path->nodes[1]) {
2867 ret = insert_new_root(trans, root, path, 1);
2874 slot = path->slots[0];
2875 nritems = btrfs_header_nritems(l);
2876 mid = (nritems + 1) / 2;
2880 leaf_space_used(l, mid, nritems - mid) + data_size >
2881 BTRFS_LEAF_DATA_SIZE(root)) {
2882 if (slot >= nritems) {
2886 if (mid != nritems &&
2887 leaf_space_used(l, mid, nritems - mid) +
2888 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2889 if (data_size && !tried_avoid_double)
2890 goto push_for_double;
2896 if (leaf_space_used(l, 0, mid) + data_size >
2897 BTRFS_LEAF_DATA_SIZE(root)) {
2898 if (!extend && data_size && slot == 0) {
2900 } else if ((extend || !data_size) && slot == 0) {
2904 if (mid != nritems &&
2905 leaf_space_used(l, mid, nritems - mid) +
2906 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2907 if (data_size && !tried_avoid_double)
2908 goto push_for_double;
2916 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2918 btrfs_item_key(l, &disk_key, mid);
2920 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
2921 root->root_key.objectid,
2922 &disk_key, 0, l->start, 0);
2924 return PTR_ERR(right);
2926 root_add_used(root, root->leafsize);
2928 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2929 btrfs_set_header_bytenr(right, right->start);
2930 btrfs_set_header_generation(right, trans->transid);
2931 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
2932 btrfs_set_header_owner(right, root->root_key.objectid);
2933 btrfs_set_header_level(right, 0);
2934 write_extent_buffer(right, root->fs_info->fsid,
2935 (unsigned long)btrfs_header_fsid(right),
2938 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2939 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2944 btrfs_set_header_nritems(right, 0);
2945 wret = insert_ptr(trans, root, path,
2946 &disk_key, right->start,
2947 path->slots[1] + 1, 1);
2951 btrfs_tree_unlock(path->nodes[0]);
2952 free_extent_buffer(path->nodes[0]);
2953 path->nodes[0] = right;
2955 path->slots[1] += 1;
2957 btrfs_set_header_nritems(right, 0);
2958 wret = insert_ptr(trans, root, path,
2964 btrfs_tree_unlock(path->nodes[0]);
2965 free_extent_buffer(path->nodes[0]);
2966 path->nodes[0] = right;
2968 if (path->slots[1] == 0) {
2969 wret = fixup_low_keys(trans, root,
2970 path, &disk_key, 1);
2975 btrfs_mark_buffer_dirty(right);
2979 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
2983 BUG_ON(num_doubles != 0);
2991 push_for_double_split(trans, root, path, data_size);
2992 tried_avoid_double = 1;
2993 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
2998 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
2999 struct btrfs_root *root,
3000 struct btrfs_path *path, int ins_len)
3002 struct btrfs_key key;
3003 struct extent_buffer *leaf;
3004 struct btrfs_file_extent_item *fi;
3009 leaf = path->nodes[0];
3010 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3012 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3013 key.type != BTRFS_EXTENT_CSUM_KEY);
3015 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3018 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3019 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3020 fi = btrfs_item_ptr(leaf, path->slots[0],
3021 struct btrfs_file_extent_item);
3022 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3024 btrfs_release_path(path);
3026 path->keep_locks = 1;
3027 path->search_for_split = 1;
3028 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3029 path->search_for_split = 0;
3034 leaf = path->nodes[0];
3035 /* if our item isn't there or got smaller, return now */
3036 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3039 /* the leaf has changed, it now has room. return now */
3040 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3043 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3044 fi = btrfs_item_ptr(leaf, path->slots[0],
3045 struct btrfs_file_extent_item);
3046 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3050 btrfs_set_path_blocking(path);
3051 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3055 path->keep_locks = 0;
3056 btrfs_unlock_up_safe(path, 1);
3059 path->keep_locks = 0;
3063 static noinline int split_item(struct btrfs_trans_handle *trans,
3064 struct btrfs_root *root,
3065 struct btrfs_path *path,
3066 struct btrfs_key *new_key,
3067 unsigned long split_offset)
3069 struct extent_buffer *leaf;
3070 struct btrfs_item *item;
3071 struct btrfs_item *new_item;
3077 struct btrfs_disk_key disk_key;
3079 leaf = path->nodes[0];
3080 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3082 btrfs_set_path_blocking(path);
3084 item = btrfs_item_nr(leaf, path->slots[0]);
3085 orig_offset = btrfs_item_offset(leaf, item);
3086 item_size = btrfs_item_size(leaf, item);
3088 buf = kmalloc(item_size, GFP_NOFS);
3092 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3093 path->slots[0]), item_size);
3095 slot = path->slots[0] + 1;
3096 nritems = btrfs_header_nritems(leaf);
3097 if (slot != nritems) {
3098 /* shift the items */
3099 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3100 btrfs_item_nr_offset(slot),
3101 (nritems - slot) * sizeof(struct btrfs_item));
3104 btrfs_cpu_key_to_disk(&disk_key, new_key);
3105 btrfs_set_item_key(leaf, &disk_key, slot);
3107 new_item = btrfs_item_nr(leaf, slot);
3109 btrfs_set_item_offset(leaf, new_item, orig_offset);
3110 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3112 btrfs_set_item_offset(leaf, item,
3113 orig_offset + item_size - split_offset);
3114 btrfs_set_item_size(leaf, item, split_offset);
3116 btrfs_set_header_nritems(leaf, nritems + 1);
3118 /* write the data for the start of the original item */
3119 write_extent_buffer(leaf, buf,
3120 btrfs_item_ptr_offset(leaf, path->slots[0]),
3123 /* write the data for the new item */
3124 write_extent_buffer(leaf, buf + split_offset,
3125 btrfs_item_ptr_offset(leaf, slot),
3126 item_size - split_offset);
3127 btrfs_mark_buffer_dirty(leaf);
3129 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
3135 * This function splits a single item into two items,
3136 * giving 'new_key' to the new item and splitting the
3137 * old one at split_offset (from the start of the item).
3139 * The path may be released by this operation. After
3140 * the split, the path is pointing to the old item. The
3141 * new item is going to be in the same node as the old one.
3143 * Note, the item being split must be smaller enough to live alone on
3144 * a tree block with room for one extra struct btrfs_item
3146 * This allows us to split the item in place, keeping a lock on the
3147 * leaf the entire time.
3149 int btrfs_split_item(struct btrfs_trans_handle *trans,
3150 struct btrfs_root *root,
3151 struct btrfs_path *path,
3152 struct btrfs_key *new_key,
3153 unsigned long split_offset)
3156 ret = setup_leaf_for_split(trans, root, path,
3157 sizeof(struct btrfs_item));
3161 ret = split_item(trans, root, path, new_key, split_offset);
3166 * This function duplicate a item, giving 'new_key' to the new item.
3167 * It guarantees both items live in the same tree leaf and the new item
3168 * is contiguous with the original item.
3170 * This allows us to split file extent in place, keeping a lock on the
3171 * leaf the entire time.
3173 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3174 struct btrfs_root *root,
3175 struct btrfs_path *path,
3176 struct btrfs_key *new_key)
3178 struct extent_buffer *leaf;
3182 leaf = path->nodes[0];
3183 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3184 ret = setup_leaf_for_split(trans, root, path,
3185 item_size + sizeof(struct btrfs_item));
3190 ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
3191 item_size, item_size +
3192 sizeof(struct btrfs_item), 1);
3195 leaf = path->nodes[0];
3196 memcpy_extent_buffer(leaf,
3197 btrfs_item_ptr_offset(leaf, path->slots[0]),
3198 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3204 * make the item pointed to by the path smaller. new_size indicates
3205 * how small to make it, and from_end tells us if we just chop bytes
3206 * off the end of the item or if we shift the item to chop bytes off
3209 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3210 struct btrfs_root *root,
3211 struct btrfs_path *path,
3212 u32 new_size, int from_end)
3215 struct extent_buffer *leaf;
3216 struct btrfs_item *item;
3218 unsigned int data_end;
3219 unsigned int old_data_start;
3220 unsigned int old_size;
3221 unsigned int size_diff;
3224 leaf = path->nodes[0];
3225 slot = path->slots[0];
3227 old_size = btrfs_item_size_nr(leaf, slot);
3228 if (old_size == new_size)
3231 nritems = btrfs_header_nritems(leaf);
3232 data_end = leaf_data_end(root, leaf);
3234 old_data_start = btrfs_item_offset_nr(leaf, slot);
3236 size_diff = old_size - new_size;
3239 BUG_ON(slot >= nritems);
3242 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3244 /* first correct the data pointers */
3245 for (i = slot; i < nritems; i++) {
3247 item = btrfs_item_nr(leaf, i);
3249 if (!leaf->map_token) {
3250 map_extent_buffer(leaf, (unsigned long)item,
3251 sizeof(struct btrfs_item),
3252 &leaf->map_token, &leaf->kaddr,
3253 &leaf->map_start, &leaf->map_len,
3257 ioff = btrfs_item_offset(leaf, item);
3258 btrfs_set_item_offset(leaf, item, ioff + size_diff);
3261 if (leaf->map_token) {
3262 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3263 leaf->map_token = NULL;
3266 /* shift the data */
3268 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3269 data_end + size_diff, btrfs_leaf_data(leaf) +
3270 data_end, old_data_start + new_size - data_end);
3272 struct btrfs_disk_key disk_key;
3275 btrfs_item_key(leaf, &disk_key, slot);
3277 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3279 struct btrfs_file_extent_item *fi;
3281 fi = btrfs_item_ptr(leaf, slot,
3282 struct btrfs_file_extent_item);
3283 fi = (struct btrfs_file_extent_item *)(
3284 (unsigned long)fi - size_diff);
3286 if (btrfs_file_extent_type(leaf, fi) ==
3287 BTRFS_FILE_EXTENT_INLINE) {
3288 ptr = btrfs_item_ptr_offset(leaf, slot);
3289 memmove_extent_buffer(leaf, ptr,
3291 offsetof(struct btrfs_file_extent_item,
3296 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3297 data_end + size_diff, btrfs_leaf_data(leaf) +
3298 data_end, old_data_start - data_end);
3300 offset = btrfs_disk_key_offset(&disk_key);
3301 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3302 btrfs_set_item_key(leaf, &disk_key, slot);
3304 fixup_low_keys(trans, root, path, &disk_key, 1);
3307 item = btrfs_item_nr(leaf, slot);
3308 btrfs_set_item_size(leaf, item, new_size);
3309 btrfs_mark_buffer_dirty(leaf);
3311 if (btrfs_leaf_free_space(root, leaf) < 0) {
3312 btrfs_print_leaf(root, leaf);
3319 * make the item pointed to by the path bigger, data_size is the new size.
3321 int btrfs_extend_item(struct btrfs_trans_handle *trans,
3322 struct btrfs_root *root, struct btrfs_path *path,
3326 struct extent_buffer *leaf;
3327 struct btrfs_item *item;
3329 unsigned int data_end;
3330 unsigned int old_data;
3331 unsigned int old_size;
3334 leaf = path->nodes[0];
3336 nritems = btrfs_header_nritems(leaf);
3337 data_end = leaf_data_end(root, leaf);
3339 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3340 btrfs_print_leaf(root, leaf);
3343 slot = path->slots[0];
3344 old_data = btrfs_item_end_nr(leaf, slot);
3347 if (slot >= nritems) {
3348 btrfs_print_leaf(root, leaf);
3349 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3355 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3357 /* first correct the data pointers */
3358 for (i = slot; i < nritems; i++) {
3360 item = btrfs_item_nr(leaf, i);
3362 if (!leaf->map_token) {
3363 map_extent_buffer(leaf, (unsigned long)item,
3364 sizeof(struct btrfs_item),
3365 &leaf->map_token, &leaf->kaddr,
3366 &leaf->map_start, &leaf->map_len,
3369 ioff = btrfs_item_offset(leaf, item);
3370 btrfs_set_item_offset(leaf, item, ioff - data_size);
3373 if (leaf->map_token) {
3374 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3375 leaf->map_token = NULL;
3378 /* shift the data */
3379 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3380 data_end - data_size, btrfs_leaf_data(leaf) +
3381 data_end, old_data - data_end);
3383 data_end = old_data;
3384 old_size = btrfs_item_size_nr(leaf, slot);
3385 item = btrfs_item_nr(leaf, slot);
3386 btrfs_set_item_size(leaf, item, old_size + data_size);
3387 btrfs_mark_buffer_dirty(leaf);
3389 if (btrfs_leaf_free_space(root, leaf) < 0) {
3390 btrfs_print_leaf(root, leaf);
3397 * Given a key and some data, insert items into the tree.
3398 * This does all the path init required, making room in the tree if needed.
3399 * Returns the number of keys that were inserted.
3401 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3402 struct btrfs_root *root,
3403 struct btrfs_path *path,
3404 struct btrfs_key *cpu_key, u32 *data_size,
3407 struct extent_buffer *leaf;
3408 struct btrfs_item *item;
3415 unsigned int data_end;
3416 struct btrfs_disk_key disk_key;
3417 struct btrfs_key found_key;
3419 for (i = 0; i < nr; i++) {
3420 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3421 BTRFS_LEAF_DATA_SIZE(root)) {
3425 total_data += data_size[i];
3426 total_size += data_size[i] + sizeof(struct btrfs_item);
3430 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3436 leaf = path->nodes[0];
3438 nritems = btrfs_header_nritems(leaf);
3439 data_end = leaf_data_end(root, leaf);
3441 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3442 for (i = nr; i >= 0; i--) {
3443 total_data -= data_size[i];
3444 total_size -= data_size[i] + sizeof(struct btrfs_item);
3445 if (total_size < btrfs_leaf_free_space(root, leaf))
3451 slot = path->slots[0];
3454 if (slot != nritems) {
3455 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3457 item = btrfs_item_nr(leaf, slot);
3458 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3460 /* figure out how many keys we can insert in here */
3461 total_data = data_size[0];
3462 for (i = 1; i < nr; i++) {
3463 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
3465 total_data += data_size[i];
3469 if (old_data < data_end) {
3470 btrfs_print_leaf(root, leaf);
3471 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3472 slot, old_data, data_end);
3476 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3478 /* first correct the data pointers */
3479 WARN_ON(leaf->map_token);
3480 for (i = slot; i < nritems; i++) {
3483 item = btrfs_item_nr(leaf, i);
3484 if (!leaf->map_token) {
3485 map_extent_buffer(leaf, (unsigned long)item,
3486 sizeof(struct btrfs_item),
3487 &leaf->map_token, &leaf->kaddr,
3488 &leaf->map_start, &leaf->map_len,
3492 ioff = btrfs_item_offset(leaf, item);
3493 btrfs_set_item_offset(leaf, item, ioff - total_data);
3495 if (leaf->map_token) {
3496 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3497 leaf->map_token = NULL;
3500 /* shift the items */
3501 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3502 btrfs_item_nr_offset(slot),
3503 (nritems - slot) * sizeof(struct btrfs_item));
3505 /* shift the data */
3506 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3507 data_end - total_data, btrfs_leaf_data(leaf) +
3508 data_end, old_data - data_end);
3509 data_end = old_data;
3512 * this sucks but it has to be done, if we are inserting at
3513 * the end of the leaf only insert 1 of the items, since we
3514 * have no way of knowing whats on the next leaf and we'd have
3515 * to drop our current locks to figure it out
3520 /* setup the item for the new data */
3521 for (i = 0; i < nr; i++) {
3522 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3523 btrfs_set_item_key(leaf, &disk_key, slot + i);
3524 item = btrfs_item_nr(leaf, slot + i);
3525 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3526 data_end -= data_size[i];
3527 btrfs_set_item_size(leaf, item, data_size[i]);
3529 btrfs_set_header_nritems(leaf, nritems + nr);
3530 btrfs_mark_buffer_dirty(leaf);
3534 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3535 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3538 if (btrfs_leaf_free_space(root, leaf) < 0) {
3539 btrfs_print_leaf(root, leaf);
3549 * this is a helper for btrfs_insert_empty_items, the main goal here is
3550 * to save stack depth by doing the bulk of the work in a function
3551 * that doesn't call btrfs_search_slot
3553 int setup_items_for_insert(struct btrfs_trans_handle *trans,
3554 struct btrfs_root *root, struct btrfs_path *path,
3555 struct btrfs_key *cpu_key, u32 *data_size,
3556 u32 total_data, u32 total_size, int nr)
3558 struct btrfs_item *item;
3561 unsigned int data_end;
3562 struct btrfs_disk_key disk_key;
3564 struct extent_buffer *leaf;
3567 leaf = path->nodes[0];
3568 slot = path->slots[0];
3570 nritems = btrfs_header_nritems(leaf);
3571 data_end = leaf_data_end(root, leaf);
3573 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3574 btrfs_print_leaf(root, leaf);
3575 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3576 total_size, btrfs_leaf_free_space(root, leaf));
3580 if (slot != nritems) {
3581 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3583 if (old_data < data_end) {
3584 btrfs_print_leaf(root, leaf);
3585 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3586 slot, old_data, data_end);
3590 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3592 /* first correct the data pointers */
3593 WARN_ON(leaf->map_token);
3594 for (i = slot; i < nritems; i++) {
3597 item = btrfs_item_nr(leaf, i);
3598 if (!leaf->map_token) {
3599 map_extent_buffer(leaf, (unsigned long)item,
3600 sizeof(struct btrfs_item),
3601 &leaf->map_token, &leaf->kaddr,
3602 &leaf->map_start, &leaf->map_len,
3606 ioff = btrfs_item_offset(leaf, item);
3607 btrfs_set_item_offset(leaf, item, ioff - total_data);
3609 if (leaf->map_token) {
3610 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3611 leaf->map_token = NULL;
3614 /* shift the items */
3615 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3616 btrfs_item_nr_offset(slot),
3617 (nritems - slot) * sizeof(struct btrfs_item));
3619 /* shift the data */
3620 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3621 data_end - total_data, btrfs_leaf_data(leaf) +
3622 data_end, old_data - data_end);
3623 data_end = old_data;
3626 /* setup the item for the new data */
3627 for (i = 0; i < nr; i++) {
3628 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3629 btrfs_set_item_key(leaf, &disk_key, slot + i);
3630 item = btrfs_item_nr(leaf, slot + i);
3631 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3632 data_end -= data_size[i];
3633 btrfs_set_item_size(leaf, item, data_size[i]);
3636 btrfs_set_header_nritems(leaf, nritems + nr);
3640 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3641 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3643 btrfs_unlock_up_safe(path, 1);
3644 btrfs_mark_buffer_dirty(leaf);
3646 if (btrfs_leaf_free_space(root, leaf) < 0) {
3647 btrfs_print_leaf(root, leaf);
3654 * Given a key and some data, insert items into the tree.
3655 * This does all the path init required, making room in the tree if needed.
3657 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3658 struct btrfs_root *root,
3659 struct btrfs_path *path,
3660 struct btrfs_key *cpu_key, u32 *data_size,
3669 for (i = 0; i < nr; i++)
3670 total_data += data_size[i];
3672 total_size = total_data + (nr * sizeof(struct btrfs_item));
3673 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3679 slot = path->slots[0];
3682 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
3683 total_data, total_size, nr);
3690 * Given a key and some data, insert an item into the tree.
3691 * This does all the path init required, making room in the tree if needed.
3693 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3694 *root, struct btrfs_key *cpu_key, void *data, u32
3698 struct btrfs_path *path;
3699 struct extent_buffer *leaf;
3702 path = btrfs_alloc_path();
3705 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3707 leaf = path->nodes[0];
3708 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3709 write_extent_buffer(leaf, data, ptr, data_size);
3710 btrfs_mark_buffer_dirty(leaf);
3712 btrfs_free_path(path);
3717 * delete the pointer from a given node.
3719 * the tree should have been previously balanced so the deletion does not
3722 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3723 struct btrfs_path *path, int level, int slot)
3725 struct extent_buffer *parent = path->nodes[level];
3730 nritems = btrfs_header_nritems(parent);
3731 if (slot != nritems - 1) {
3732 memmove_extent_buffer(parent,
3733 btrfs_node_key_ptr_offset(slot),
3734 btrfs_node_key_ptr_offset(slot + 1),
3735 sizeof(struct btrfs_key_ptr) *
3736 (nritems - slot - 1));
3739 btrfs_set_header_nritems(parent, nritems);
3740 if (nritems == 0 && parent == root->node) {
3741 BUG_ON(btrfs_header_level(root->node) != 1);
3742 /* just turn the root into a leaf and break */
3743 btrfs_set_header_level(root->node, 0);
3744 } else if (slot == 0) {
3745 struct btrfs_disk_key disk_key;
3747 btrfs_node_key(parent, &disk_key, 0);
3748 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
3752 btrfs_mark_buffer_dirty(parent);
3757 * a helper function to delete the leaf pointed to by path->slots[1] and
3760 * This deletes the pointer in path->nodes[1] and frees the leaf
3761 * block extent. zero is returned if it all worked out, < 0 otherwise.
3763 * The path must have already been setup for deleting the leaf, including
3764 * all the proper balancing. path->nodes[1] must be locked.
3766 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3767 struct btrfs_root *root,
3768 struct btrfs_path *path,
3769 struct extent_buffer *leaf)
3773 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3774 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3779 * btrfs_free_extent is expensive, we want to make sure we
3780 * aren't holding any locks when we call it
3782 btrfs_unlock_up_safe(path, 0);
3784 root_sub_used(root, leaf->len);
3786 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3790 * delete the item at the leaf level in path. If that empties
3791 * the leaf, remove it from the tree
3793 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3794 struct btrfs_path *path, int slot, int nr)
3796 struct extent_buffer *leaf;
3797 struct btrfs_item *item;
3805 leaf = path->nodes[0];
3806 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3808 for (i = 0; i < nr; i++)
3809 dsize += btrfs_item_size_nr(leaf, slot + i);
3811 nritems = btrfs_header_nritems(leaf);
3813 if (slot + nr != nritems) {
3814 int data_end = leaf_data_end(root, leaf);
3816 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3818 btrfs_leaf_data(leaf) + data_end,
3819 last_off - data_end);
3821 for (i = slot + nr; i < nritems; i++) {
3824 item = btrfs_item_nr(leaf, i);
3825 if (!leaf->map_token) {
3826 map_extent_buffer(leaf, (unsigned long)item,
3827 sizeof(struct btrfs_item),
3828 &leaf->map_token, &leaf->kaddr,
3829 &leaf->map_start, &leaf->map_len,
3832 ioff = btrfs_item_offset(leaf, item);
3833 btrfs_set_item_offset(leaf, item, ioff + dsize);
3836 if (leaf->map_token) {
3837 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3838 leaf->map_token = NULL;
3841 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
3842 btrfs_item_nr_offset(slot + nr),
3843 sizeof(struct btrfs_item) *
3844 (nritems - slot - nr));
3846 btrfs_set_header_nritems(leaf, nritems - nr);
3849 /* delete the leaf if we've emptied it */
3851 if (leaf == root->node) {
3852 btrfs_set_header_level(leaf, 0);
3854 btrfs_set_path_blocking(path);
3855 clean_tree_block(trans, root, leaf);
3856 ret = btrfs_del_leaf(trans, root, path, leaf);
3860 int used = leaf_space_used(leaf, 0, nritems);
3862 struct btrfs_disk_key disk_key;
3864 btrfs_item_key(leaf, &disk_key, 0);
3865 wret = fixup_low_keys(trans, root, path,
3871 /* delete the leaf if it is mostly empty */
3872 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
3873 /* push_leaf_left fixes the path.
3874 * make sure the path still points to our leaf
3875 * for possible call to del_ptr below
3877 slot = path->slots[1];
3878 extent_buffer_get(leaf);
3880 btrfs_set_path_blocking(path);
3881 wret = push_leaf_left(trans, root, path, 1, 1,
3883 if (wret < 0 && wret != -ENOSPC)
3886 if (path->nodes[0] == leaf &&
3887 btrfs_header_nritems(leaf)) {
3888 wret = push_leaf_right(trans, root, path, 1,
3890 if (wret < 0 && wret != -ENOSPC)
3894 if (btrfs_header_nritems(leaf) == 0) {
3895 path->slots[1] = slot;
3896 ret = btrfs_del_leaf(trans, root, path, leaf);
3898 free_extent_buffer(leaf);
3900 /* if we're still in the path, make sure
3901 * we're dirty. Otherwise, one of the
3902 * push_leaf functions must have already
3903 * dirtied this buffer
3905 if (path->nodes[0] == leaf)
3906 btrfs_mark_buffer_dirty(leaf);
3907 free_extent_buffer(leaf);
3910 btrfs_mark_buffer_dirty(leaf);
3917 * search the tree again to find a leaf with lesser keys
3918 * returns 0 if it found something or 1 if there are no lesser leaves.
3919 * returns < 0 on io errors.
3921 * This may release the path, and so you may lose any locks held at the
3924 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3926 struct btrfs_key key;
3927 struct btrfs_disk_key found_key;
3930 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
3934 else if (key.type > 0)
3936 else if (key.objectid > 0)
3941 btrfs_release_path(path);
3942 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3945 btrfs_item_key(path->nodes[0], &found_key, 0);
3946 ret = comp_keys(&found_key, &key);
3953 * A helper function to walk down the tree starting at min_key, and looking
3954 * for nodes or leaves that are either in cache or have a minimum
3955 * transaction id. This is used by the btree defrag code, and tree logging
3957 * This does not cow, but it does stuff the starting key it finds back
3958 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3959 * key and get a writable path.
3961 * This does lock as it descends, and path->keep_locks should be set
3962 * to 1 by the caller.
3964 * This honors path->lowest_level to prevent descent past a given level
3967 * min_trans indicates the oldest transaction that you are interested
3968 * in walking through. Any nodes or leaves older than min_trans are
3969 * skipped over (without reading them).
3971 * returns zero if something useful was found, < 0 on error and 1 if there
3972 * was nothing in the tree that matched the search criteria.
3974 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
3975 struct btrfs_key *max_key,
3976 struct btrfs_path *path, int cache_only,
3979 struct extent_buffer *cur;
3980 struct btrfs_key found_key;
3987 WARN_ON(!path->keep_locks);
3989 cur = btrfs_lock_root_node(root);
3990 level = btrfs_header_level(cur);
3991 WARN_ON(path->nodes[level]);
3992 path->nodes[level] = cur;
3993 path->locks[level] = 1;
3995 if (btrfs_header_generation(cur) < min_trans) {
4000 nritems = btrfs_header_nritems(cur);
4001 level = btrfs_header_level(cur);
4002 sret = bin_search(cur, min_key, level, &slot);
4004 /* at the lowest level, we're done, setup the path and exit */
4005 if (level == path->lowest_level) {
4006 if (slot >= nritems)
4009 path->slots[level] = slot;
4010 btrfs_item_key_to_cpu(cur, &found_key, slot);
4013 if (sret && slot > 0)
4016 * check this node pointer against the cache_only and
4017 * min_trans parameters. If it isn't in cache or is too
4018 * old, skip to the next one.
4020 while (slot < nritems) {
4023 struct extent_buffer *tmp;
4024 struct btrfs_disk_key disk_key;
4026 blockptr = btrfs_node_blockptr(cur, slot);
4027 gen = btrfs_node_ptr_generation(cur, slot);
4028 if (gen < min_trans) {
4036 btrfs_node_key(cur, &disk_key, slot);
4037 if (comp_keys(&disk_key, max_key) >= 0) {
4043 tmp = btrfs_find_tree_block(root, blockptr,
4044 btrfs_level_size(root, level - 1));
4046 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
4047 free_extent_buffer(tmp);
4051 free_extent_buffer(tmp);
4056 * we didn't find a candidate key in this node, walk forward
4057 * and find another one
4059 if (slot >= nritems) {
4060 path->slots[level] = slot;
4061 btrfs_set_path_blocking(path);
4062 sret = btrfs_find_next_key(root, path, min_key, level,
4063 cache_only, min_trans);
4065 btrfs_release_path(path);
4071 /* save our key for returning back */
4072 btrfs_node_key_to_cpu(cur, &found_key, slot);
4073 path->slots[level] = slot;
4074 if (level == path->lowest_level) {
4076 unlock_up(path, level, 1);
4079 btrfs_set_path_blocking(path);
4080 cur = read_node_slot(root, cur, slot);
4083 btrfs_tree_lock(cur);
4085 path->locks[level - 1] = 1;
4086 path->nodes[level - 1] = cur;
4087 unlock_up(path, level, 1);
4088 btrfs_clear_path_blocking(path, NULL);
4092 memcpy(min_key, &found_key, sizeof(found_key));
4093 btrfs_set_path_blocking(path);
4098 * this is similar to btrfs_next_leaf, but does not try to preserve
4099 * and fixup the path. It looks for and returns the next key in the
4100 * tree based on the current path and the cache_only and min_trans
4103 * 0 is returned if another key is found, < 0 if there are any errors
4104 * and 1 is returned if there are no higher keys in the tree
4106 * path->keep_locks should be set to 1 on the search made before
4107 * calling this function.
4109 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4110 struct btrfs_key *key, int level,
4111 int cache_only, u64 min_trans)
4114 struct extent_buffer *c;
4116 WARN_ON(!path->keep_locks);
4117 while (level < BTRFS_MAX_LEVEL) {
4118 if (!path->nodes[level])
4121 slot = path->slots[level] + 1;
4122 c = path->nodes[level];
4124 if (slot >= btrfs_header_nritems(c)) {
4127 struct btrfs_key cur_key;
4128 if (level + 1 >= BTRFS_MAX_LEVEL ||
4129 !path->nodes[level + 1])
4132 if (path->locks[level + 1]) {
4137 slot = btrfs_header_nritems(c) - 1;
4139 btrfs_item_key_to_cpu(c, &cur_key, slot);
4141 btrfs_node_key_to_cpu(c, &cur_key, slot);
4143 orig_lowest = path->lowest_level;
4144 btrfs_release_path(path);
4145 path->lowest_level = level;
4146 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4148 path->lowest_level = orig_lowest;
4152 c = path->nodes[level];
4153 slot = path->slots[level];
4160 btrfs_item_key_to_cpu(c, key, slot);
4162 u64 blockptr = btrfs_node_blockptr(c, slot);
4163 u64 gen = btrfs_node_ptr_generation(c, slot);
4166 struct extent_buffer *cur;
4167 cur = btrfs_find_tree_block(root, blockptr,
4168 btrfs_level_size(root, level - 1));
4169 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
4172 free_extent_buffer(cur);
4175 free_extent_buffer(cur);
4177 if (gen < min_trans) {
4181 btrfs_node_key_to_cpu(c, key, slot);
4189 * search the tree again to find a leaf with greater keys
4190 * returns 0 if it found something or 1 if there are no greater leaves.
4191 * returns < 0 on io errors.
4193 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4197 struct extent_buffer *c;
4198 struct extent_buffer *next;
4199 struct btrfs_key key;
4202 int old_spinning = path->leave_spinning;
4203 int force_blocking = 0;
4205 nritems = btrfs_header_nritems(path->nodes[0]);
4210 * we take the blocks in an order that upsets lockdep. Using
4211 * blocking mode is the only way around it.
4213 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4217 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4221 btrfs_release_path(path);
4223 path->keep_locks = 1;
4225 if (!force_blocking)
4226 path->leave_spinning = 1;
4228 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4229 path->keep_locks = 0;
4234 nritems = btrfs_header_nritems(path->nodes[0]);
4236 * by releasing the path above we dropped all our locks. A balance
4237 * could have added more items next to the key that used to be
4238 * at the very end of the block. So, check again here and
4239 * advance the path if there are now more items available.
4241 if (nritems > 0 && path->slots[0] < nritems - 1) {
4248 while (level < BTRFS_MAX_LEVEL) {
4249 if (!path->nodes[level]) {
4254 slot = path->slots[level] + 1;
4255 c = path->nodes[level];
4256 if (slot >= btrfs_header_nritems(c)) {
4258 if (level == BTRFS_MAX_LEVEL) {
4266 btrfs_tree_unlock(next);
4267 free_extent_buffer(next);
4271 ret = read_block_for_search(NULL, root, path, &next, level,
4277 btrfs_release_path(path);
4281 if (!path->skip_locking) {
4282 ret = btrfs_try_spin_lock(next);
4284 btrfs_set_path_blocking(path);
4285 btrfs_tree_lock(next);
4286 if (!force_blocking)
4287 btrfs_clear_path_blocking(path, next);
4290 btrfs_set_lock_blocking(next);
4294 path->slots[level] = slot;
4297 c = path->nodes[level];
4298 if (path->locks[level])
4299 btrfs_tree_unlock(c);
4301 free_extent_buffer(c);
4302 path->nodes[level] = next;
4303 path->slots[level] = 0;
4304 if (!path->skip_locking)
4305 path->locks[level] = 1;
4310 ret = read_block_for_search(NULL, root, path, &next, level,
4316 btrfs_release_path(path);
4320 if (!path->skip_locking) {
4321 btrfs_assert_tree_locked(path->nodes[level]);
4322 ret = btrfs_try_spin_lock(next);
4324 btrfs_set_path_blocking(path);
4325 btrfs_tree_lock(next);
4326 if (!force_blocking)
4327 btrfs_clear_path_blocking(path, next);
4330 btrfs_set_lock_blocking(next);
4335 unlock_up(path, 0, 1);
4336 path->leave_spinning = old_spinning;
4338 btrfs_set_path_blocking(path);
4344 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4345 * searching until it gets past min_objectid or finds an item of 'type'
4347 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4349 int btrfs_previous_item(struct btrfs_root *root,
4350 struct btrfs_path *path, u64 min_objectid,
4353 struct btrfs_key found_key;
4354 struct extent_buffer *leaf;
4359 if (path->slots[0] == 0) {
4360 btrfs_set_path_blocking(path);
4361 ret = btrfs_prev_leaf(root, path);
4367 leaf = path->nodes[0];
4368 nritems = btrfs_header_nritems(leaf);
4371 if (path->slots[0] == nritems)
4374 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4375 if (found_key.objectid < min_objectid)
4377 if (found_key.type == type)
4379 if (found_key.objectid == min_objectid &&
4380 found_key.type < type)