2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
23 #include "transaction.h"
24 #include "print-tree.h"
27 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
28 *root, struct btrfs_path *path, int level);
29 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_key *ins_key,
31 struct btrfs_path *path, int data_size, int extend);
32 static int push_node_left(struct btrfs_trans_handle *trans,
33 struct btrfs_root *root, struct extent_buffer *dst,
34 struct extent_buffer *src, int empty);
35 static int balance_node_right(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root,
37 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf);
39 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot);
41 static int setup_items_for_insert(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root, struct btrfs_path *path,
43 struct btrfs_key *cpu_key, u32 *data_size,
44 u32 total_data, u32 total_size, int nr);
47 struct btrfs_path *btrfs_alloc_path(void)
49 struct btrfs_path *path;
50 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
57 * set all locked nodes in the path to blocking locks. This should
58 * be done before scheduling
60 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
63 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
64 if (p->nodes[i] && p->locks[i])
65 btrfs_set_lock_blocking(p->nodes[i]);
70 * reset all the locked nodes in the patch to spinning locks.
72 * held is used to keep lockdep happy, when lockdep is enabled
73 * we set held to a blocking lock before we go around and
74 * retake all the spinlocks in the path. You can safely use NULL
77 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
78 struct extent_buffer *held)
82 #ifdef CONFIG_DEBUG_LOCK_ALLOC
83 /* lockdep really cares that we take all of these spinlocks
84 * in the right order. If any of the locks in the path are not
85 * currently blocking, it is going to complain. So, make really
86 * really sure by forcing the path to blocking before we clear
90 btrfs_set_lock_blocking(held);
91 btrfs_set_path_blocking(p);
94 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
95 if (p->nodes[i] && p->locks[i])
96 btrfs_clear_lock_blocking(p->nodes[i]);
99 #ifdef CONFIG_DEBUG_LOCK_ALLOC
101 btrfs_clear_lock_blocking(held);
105 /* this also releases the path */
106 void btrfs_free_path(struct btrfs_path *p)
108 btrfs_release_path(NULL, p);
109 kmem_cache_free(btrfs_path_cachep, p);
113 * path release drops references on the extent buffers in the path
114 * and it drops any locks held by this path
116 * It is safe to call this on paths that no locks or extent buffers held.
118 noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
122 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
127 btrfs_tree_unlock(p->nodes[i]);
130 free_extent_buffer(p->nodes[i]);
136 * safely gets a reference on the root node of a tree. A lock
137 * is not taken, so a concurrent writer may put a different node
138 * at the root of the tree. See btrfs_lock_root_node for the
141 * The extent buffer returned by this has a reference taken, so
142 * it won't disappear. It may stop being the root of the tree
143 * at any time because there are no locks held.
145 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
147 struct extent_buffer *eb;
148 spin_lock(&root->node_lock);
150 extent_buffer_get(eb);
151 spin_unlock(&root->node_lock);
155 /* loop around taking references on and locking the root node of the
156 * tree until you end up with a lock on the root. A locked buffer
157 * is returned, with a reference held.
159 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
161 struct extent_buffer *eb;
164 eb = btrfs_root_node(root);
167 spin_lock(&root->node_lock);
168 if (eb == root->node) {
169 spin_unlock(&root->node_lock);
172 spin_unlock(&root->node_lock);
174 btrfs_tree_unlock(eb);
175 free_extent_buffer(eb);
180 /* cowonly root (everything not a reference counted cow subvolume), just get
181 * put onto a simple dirty list. transaction.c walks this to make sure they
182 * get properly updated on disk.
184 static void add_root_to_dirty_list(struct btrfs_root *root)
186 if (root->track_dirty && list_empty(&root->dirty_list)) {
187 list_add(&root->dirty_list,
188 &root->fs_info->dirty_cowonly_roots);
193 * used by snapshot creation to make a copy of a root for a tree with
194 * a given objectid. The buffer with the new root node is returned in
195 * cow_ret, and this func returns zero on success or a negative error code.
197 int btrfs_copy_root(struct btrfs_trans_handle *trans,
198 struct btrfs_root *root,
199 struct extent_buffer *buf,
200 struct extent_buffer **cow_ret, u64 new_root_objectid)
202 struct extent_buffer *cow;
206 struct btrfs_disk_key disk_key;
208 WARN_ON(root->ref_cows && trans->transid !=
209 root->fs_info->running_transaction->transid);
210 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
212 level = btrfs_header_level(buf);
213 nritems = btrfs_header_nritems(buf);
215 btrfs_item_key(buf, &disk_key, 0);
217 btrfs_node_key(buf, &disk_key, 0);
219 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
220 new_root_objectid, &disk_key, level,
225 copy_extent_buffer(cow, buf, 0, 0, cow->len);
226 btrfs_set_header_bytenr(cow, cow->start);
227 btrfs_set_header_generation(cow, trans->transid);
228 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
229 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
230 BTRFS_HEADER_FLAG_RELOC);
231 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
232 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
234 btrfs_set_header_owner(cow, new_root_objectid);
236 write_extent_buffer(cow, root->fs_info->fsid,
237 (unsigned long)btrfs_header_fsid(cow),
240 WARN_ON(btrfs_header_generation(buf) > trans->transid);
241 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
242 ret = btrfs_inc_ref(trans, root, cow, 1);
244 ret = btrfs_inc_ref(trans, root, cow, 0);
249 btrfs_mark_buffer_dirty(cow);
255 * check if the tree block can be shared by multiple trees
257 int btrfs_block_can_be_shared(struct btrfs_root *root,
258 struct extent_buffer *buf)
261 * Tree blocks not in refernece counted trees and tree roots
262 * are never shared. If a block was allocated after the last
263 * snapshot and the block was not allocated by tree relocation,
264 * we know the block is not shared.
266 if (root->ref_cows &&
267 buf != root->node && buf != root->commit_root &&
268 (btrfs_header_generation(buf) <=
269 btrfs_root_last_snapshot(&root->root_item) ||
270 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
272 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
273 if (root->ref_cows &&
274 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
280 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
281 struct btrfs_root *root,
282 struct extent_buffer *buf,
283 struct extent_buffer *cow,
293 * Backrefs update rules:
295 * Always use full backrefs for extent pointers in tree block
296 * allocated by tree relocation.
298 * If a shared tree block is no longer referenced by its owner
299 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
300 * use full backrefs for extent pointers in tree block.
302 * If a tree block is been relocating
303 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
304 * use full backrefs for extent pointers in tree block.
305 * The reason for this is some operations (such as drop tree)
306 * are only allowed for blocks use full backrefs.
309 if (btrfs_block_can_be_shared(root, buf)) {
310 ret = btrfs_lookup_extent_info(trans, root, buf->start,
311 buf->len, &refs, &flags);
316 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
317 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
318 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
323 owner = btrfs_header_owner(buf);
324 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
325 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
328 if ((owner == root->root_key.objectid ||
329 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
330 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
331 ret = btrfs_inc_ref(trans, root, buf, 1);
334 if (root->root_key.objectid ==
335 BTRFS_TREE_RELOC_OBJECTID) {
336 ret = btrfs_dec_ref(trans, root, buf, 0);
338 ret = btrfs_inc_ref(trans, root, cow, 1);
341 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
344 if (root->root_key.objectid ==
345 BTRFS_TREE_RELOC_OBJECTID)
346 ret = btrfs_inc_ref(trans, root, cow, 1);
348 ret = btrfs_inc_ref(trans, root, cow, 0);
351 if (new_flags != 0) {
352 ret = btrfs_set_disk_extent_flags(trans, root,
359 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
360 if (root->root_key.objectid ==
361 BTRFS_TREE_RELOC_OBJECTID)
362 ret = btrfs_inc_ref(trans, root, cow, 1);
364 ret = btrfs_inc_ref(trans, root, cow, 0);
366 ret = btrfs_dec_ref(trans, root, buf, 1);
369 clean_tree_block(trans, root, buf);
376 * does the dirty work in cow of a single block. The parent block (if
377 * supplied) is updated to point to the new cow copy. The new buffer is marked
378 * dirty and returned locked. If you modify the block it needs to be marked
381 * search_start -- an allocation hint for the new block
383 * empty_size -- a hint that you plan on doing more cow. This is the size in
384 * bytes the allocator should try to find free next to the block it returns.
385 * This is just a hint and may be ignored by the allocator.
387 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
388 struct btrfs_root *root,
389 struct extent_buffer *buf,
390 struct extent_buffer *parent, int parent_slot,
391 struct extent_buffer **cow_ret,
392 u64 search_start, u64 empty_size)
394 struct btrfs_disk_key disk_key;
395 struct extent_buffer *cow;
404 btrfs_assert_tree_locked(buf);
406 WARN_ON(root->ref_cows && trans->transid !=
407 root->fs_info->running_transaction->transid);
408 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
410 level = btrfs_header_level(buf);
413 btrfs_item_key(buf, &disk_key, 0);
415 btrfs_node_key(buf, &disk_key, 0);
417 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
419 parent_start = parent->start;
425 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
426 root->root_key.objectid, &disk_key,
427 level, search_start, empty_size);
431 /* cow is set to blocking by btrfs_init_new_buffer */
433 copy_extent_buffer(cow, buf, 0, 0, cow->len);
434 btrfs_set_header_bytenr(cow, cow->start);
435 btrfs_set_header_generation(cow, trans->transid);
436 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
437 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
438 BTRFS_HEADER_FLAG_RELOC);
439 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
440 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
442 btrfs_set_header_owner(cow, root->root_key.objectid);
444 write_extent_buffer(cow, root->fs_info->fsid,
445 (unsigned long)btrfs_header_fsid(cow),
448 update_ref_for_cow(trans, root, buf, cow, &last_ref);
450 if (buf == root->node) {
451 WARN_ON(parent && parent != buf);
452 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
453 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
454 parent_start = buf->start;
458 spin_lock(&root->node_lock);
460 extent_buffer_get(cow);
461 spin_unlock(&root->node_lock);
463 btrfs_free_tree_block(trans, root, buf, parent_start,
465 free_extent_buffer(buf);
466 add_root_to_dirty_list(root);
468 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
469 parent_start = parent->start;
473 WARN_ON(trans->transid != btrfs_header_generation(parent));
474 btrfs_set_node_blockptr(parent, parent_slot,
476 btrfs_set_node_ptr_generation(parent, parent_slot,
478 btrfs_mark_buffer_dirty(parent);
479 btrfs_free_tree_block(trans, root, buf, parent_start,
483 btrfs_tree_unlock(buf);
484 free_extent_buffer(buf);
485 btrfs_mark_buffer_dirty(cow);
490 static inline int should_cow_block(struct btrfs_trans_handle *trans,
491 struct btrfs_root *root,
492 struct extent_buffer *buf)
494 if (btrfs_header_generation(buf) == trans->transid &&
495 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
496 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
497 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
503 * cows a single block, see __btrfs_cow_block for the real work.
504 * This version of it has extra checks so that a block isn't cow'd more than
505 * once per transaction, as long as it hasn't been written yet
507 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
508 struct btrfs_root *root, struct extent_buffer *buf,
509 struct extent_buffer *parent, int parent_slot,
510 struct extent_buffer **cow_ret)
515 if (trans->transaction != root->fs_info->running_transaction) {
516 printk(KERN_CRIT "trans %llu running %llu\n",
517 (unsigned long long)trans->transid,
519 root->fs_info->running_transaction->transid);
522 if (trans->transid != root->fs_info->generation) {
523 printk(KERN_CRIT "trans %llu running %llu\n",
524 (unsigned long long)trans->transid,
525 (unsigned long long)root->fs_info->generation);
529 if (!should_cow_block(trans, root, buf)) {
534 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
537 btrfs_set_lock_blocking(parent);
538 btrfs_set_lock_blocking(buf);
540 ret = __btrfs_cow_block(trans, root, buf, parent,
541 parent_slot, cow_ret, search_start, 0);
546 * helper function for defrag to decide if two blocks pointed to by a
547 * node are actually close by
549 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
551 if (blocknr < other && other - (blocknr + blocksize) < 32768)
553 if (blocknr > other && blocknr - (other + blocksize) < 32768)
559 * compare two keys in a memcmp fashion
561 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
565 btrfs_disk_key_to_cpu(&k1, disk);
567 return btrfs_comp_cpu_keys(&k1, k2);
571 * same as comp_keys only with two btrfs_key's
573 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
575 if (k1->objectid > k2->objectid)
577 if (k1->objectid < k2->objectid)
579 if (k1->type > k2->type)
581 if (k1->type < k2->type)
583 if (k1->offset > k2->offset)
585 if (k1->offset < k2->offset)
591 * this is used by the defrag code to go through all the
592 * leaves pointed to by a node and reallocate them so that
593 * disk order is close to key order
595 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
596 struct btrfs_root *root, struct extent_buffer *parent,
597 int start_slot, int cache_only, u64 *last_ret,
598 struct btrfs_key *progress)
600 struct extent_buffer *cur;
603 u64 search_start = *last_ret;
613 int progress_passed = 0;
614 struct btrfs_disk_key disk_key;
616 parent_level = btrfs_header_level(parent);
617 if (cache_only && parent_level != 1)
620 if (trans->transaction != root->fs_info->running_transaction)
622 if (trans->transid != root->fs_info->generation)
625 parent_nritems = btrfs_header_nritems(parent);
626 blocksize = btrfs_level_size(root, parent_level - 1);
627 end_slot = parent_nritems;
629 if (parent_nritems == 1)
632 btrfs_set_lock_blocking(parent);
634 for (i = start_slot; i < end_slot; i++) {
637 if (!parent->map_token) {
638 map_extent_buffer(parent,
639 btrfs_node_key_ptr_offset(i),
640 sizeof(struct btrfs_key_ptr),
641 &parent->map_token, &parent->kaddr,
642 &parent->map_start, &parent->map_len,
645 btrfs_node_key(parent, &disk_key, i);
646 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
650 blocknr = btrfs_node_blockptr(parent, i);
651 gen = btrfs_node_ptr_generation(parent, i);
653 last_block = blocknr;
656 other = btrfs_node_blockptr(parent, i - 1);
657 close = close_blocks(blocknr, other, blocksize);
659 if (!close && i < end_slot - 2) {
660 other = btrfs_node_blockptr(parent, i + 1);
661 close = close_blocks(blocknr, other, blocksize);
664 last_block = blocknr;
667 if (parent->map_token) {
668 unmap_extent_buffer(parent, parent->map_token,
670 parent->map_token = NULL;
673 cur = btrfs_find_tree_block(root, blocknr, blocksize);
675 uptodate = btrfs_buffer_uptodate(cur, gen);
678 if (!cur || !uptodate) {
680 free_extent_buffer(cur);
684 cur = read_tree_block(root, blocknr,
686 } else if (!uptodate) {
687 btrfs_read_buffer(cur, gen);
690 if (search_start == 0)
691 search_start = last_block;
693 btrfs_tree_lock(cur);
694 btrfs_set_lock_blocking(cur);
695 err = __btrfs_cow_block(trans, root, cur, parent, i,
698 (end_slot - i) * blocksize));
700 btrfs_tree_unlock(cur);
701 free_extent_buffer(cur);
704 search_start = cur->start;
705 last_block = cur->start;
706 *last_ret = search_start;
707 btrfs_tree_unlock(cur);
708 free_extent_buffer(cur);
710 if (parent->map_token) {
711 unmap_extent_buffer(parent, parent->map_token,
713 parent->map_token = NULL;
719 * The leaf data grows from end-to-front in the node.
720 * this returns the address of the start of the last item,
721 * which is the stop of the leaf data stack
723 static inline unsigned int leaf_data_end(struct btrfs_root *root,
724 struct extent_buffer *leaf)
726 u32 nr = btrfs_header_nritems(leaf);
728 return BTRFS_LEAF_DATA_SIZE(root);
729 return btrfs_item_offset_nr(leaf, nr - 1);
733 * extra debugging checks to make sure all the items in a key are
734 * well formed and in the proper order
736 static int check_node(struct btrfs_root *root, struct btrfs_path *path,
739 struct extent_buffer *parent = NULL;
740 struct extent_buffer *node = path->nodes[level];
741 struct btrfs_disk_key parent_key;
742 struct btrfs_disk_key node_key;
745 struct btrfs_key cpukey;
746 u32 nritems = btrfs_header_nritems(node);
748 if (path->nodes[level + 1])
749 parent = path->nodes[level + 1];
751 slot = path->slots[level];
752 BUG_ON(nritems == 0);
754 parent_slot = path->slots[level + 1];
755 btrfs_node_key(parent, &parent_key, parent_slot);
756 btrfs_node_key(node, &node_key, 0);
757 BUG_ON(memcmp(&parent_key, &node_key,
758 sizeof(struct btrfs_disk_key)));
759 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
760 btrfs_header_bytenr(node));
762 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
764 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
765 btrfs_node_key(node, &node_key, slot);
766 BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
768 if (slot < nritems - 1) {
769 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
770 btrfs_node_key(node, &node_key, slot);
771 BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
777 * extra checking to make sure all the items in a leaf are
778 * well formed and in the proper order
780 static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
783 struct extent_buffer *leaf = path->nodes[level];
784 struct extent_buffer *parent = NULL;
786 struct btrfs_key cpukey;
787 struct btrfs_disk_key parent_key;
788 struct btrfs_disk_key leaf_key;
789 int slot = path->slots[0];
791 u32 nritems = btrfs_header_nritems(leaf);
793 if (path->nodes[level + 1])
794 parent = path->nodes[level + 1];
800 parent_slot = path->slots[level + 1];
801 btrfs_node_key(parent, &parent_key, parent_slot);
802 btrfs_item_key(leaf, &leaf_key, 0);
804 BUG_ON(memcmp(&parent_key, &leaf_key,
805 sizeof(struct btrfs_disk_key)));
806 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
807 btrfs_header_bytenr(leaf));
809 if (slot != 0 && slot < nritems - 1) {
810 btrfs_item_key(leaf, &leaf_key, slot);
811 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
812 if (comp_keys(&leaf_key, &cpukey) <= 0) {
813 btrfs_print_leaf(root, leaf);
814 printk(KERN_CRIT "slot %d offset bad key\n", slot);
817 if (btrfs_item_offset_nr(leaf, slot - 1) !=
818 btrfs_item_end_nr(leaf, slot)) {
819 btrfs_print_leaf(root, leaf);
820 printk(KERN_CRIT "slot %d offset bad\n", slot);
824 if (slot < nritems - 1) {
825 btrfs_item_key(leaf, &leaf_key, slot);
826 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
827 BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
828 if (btrfs_item_offset_nr(leaf, slot) !=
829 btrfs_item_end_nr(leaf, slot + 1)) {
830 btrfs_print_leaf(root, leaf);
831 printk(KERN_CRIT "slot %d offset bad\n", slot);
835 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
836 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
840 static noinline int check_block(struct btrfs_root *root,
841 struct btrfs_path *path, int level)
845 return check_leaf(root, path, level);
846 return check_node(root, path, level);
850 * search for key in the extent_buffer. The items start at offset p,
851 * and they are item_size apart. There are 'max' items in p.
853 * the slot in the array is returned via slot, and it points to
854 * the place where you would insert key if it is not found in
857 * slot may point to max if the key is bigger than all of the keys
859 static noinline int generic_bin_search(struct extent_buffer *eb,
861 int item_size, struct btrfs_key *key,
868 struct btrfs_disk_key *tmp = NULL;
869 struct btrfs_disk_key unaligned;
870 unsigned long offset;
871 char *map_token = NULL;
873 unsigned long map_start = 0;
874 unsigned long map_len = 0;
878 mid = (low + high) / 2;
879 offset = p + mid * item_size;
881 if (!map_token || offset < map_start ||
882 (offset + sizeof(struct btrfs_disk_key)) >
883 map_start + map_len) {
885 unmap_extent_buffer(eb, map_token, KM_USER0);
889 err = map_private_extent_buffer(eb, offset,
890 sizeof(struct btrfs_disk_key),
892 &map_start, &map_len, KM_USER0);
895 tmp = (struct btrfs_disk_key *)(kaddr + offset -
898 read_extent_buffer(eb, &unaligned,
899 offset, sizeof(unaligned));
904 tmp = (struct btrfs_disk_key *)(kaddr + offset -
907 ret = comp_keys(tmp, key);
916 unmap_extent_buffer(eb, map_token, KM_USER0);
922 unmap_extent_buffer(eb, map_token, KM_USER0);
927 * simple bin_search frontend that does the right thing for
930 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
931 int level, int *slot)
934 return generic_bin_search(eb,
935 offsetof(struct btrfs_leaf, items),
936 sizeof(struct btrfs_item),
937 key, btrfs_header_nritems(eb),
940 return generic_bin_search(eb,
941 offsetof(struct btrfs_node, ptrs),
942 sizeof(struct btrfs_key_ptr),
943 key, btrfs_header_nritems(eb),
949 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
950 int level, int *slot)
952 return bin_search(eb, key, level, slot);
955 static void root_add_used(struct btrfs_root *root, u32 size)
957 spin_lock(&root->accounting_lock);
958 btrfs_set_root_used(&root->root_item,
959 btrfs_root_used(&root->root_item) + size);
960 spin_unlock(&root->accounting_lock);
963 static void root_sub_used(struct btrfs_root *root, u32 size)
965 spin_lock(&root->accounting_lock);
966 btrfs_set_root_used(&root->root_item,
967 btrfs_root_used(&root->root_item) - size);
968 spin_unlock(&root->accounting_lock);
971 /* given a node and slot number, this reads the blocks it points to. The
972 * extent buffer is returned with a reference taken (but unlocked).
973 * NULL is returned on error.
975 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
976 struct extent_buffer *parent, int slot)
978 int level = btrfs_header_level(parent);
981 if (slot >= btrfs_header_nritems(parent))
986 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
987 btrfs_level_size(root, level - 1),
988 btrfs_node_ptr_generation(parent, slot));
992 * node level balancing, used to make sure nodes are in proper order for
993 * item deletion. We balance from the top down, so we have to make sure
994 * that a deletion won't leave an node completely empty later on.
996 static noinline int balance_level(struct btrfs_trans_handle *trans,
997 struct btrfs_root *root,
998 struct btrfs_path *path, int level)
1000 struct extent_buffer *right = NULL;
1001 struct extent_buffer *mid;
1002 struct extent_buffer *left = NULL;
1003 struct extent_buffer *parent = NULL;
1007 int orig_slot = path->slots[level];
1008 int err_on_enospc = 0;
1014 mid = path->nodes[level];
1016 WARN_ON(!path->locks[level]);
1017 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1019 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1021 if (level < BTRFS_MAX_LEVEL - 1)
1022 parent = path->nodes[level + 1];
1023 pslot = path->slots[level + 1];
1026 * deal with the case where there is only one pointer in the root
1027 * by promoting the node below to a root
1030 struct extent_buffer *child;
1032 if (btrfs_header_nritems(mid) != 1)
1035 /* promote the child to a root */
1036 child = read_node_slot(root, mid, 0);
1038 btrfs_tree_lock(child);
1039 btrfs_set_lock_blocking(child);
1040 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1042 btrfs_tree_unlock(child);
1043 free_extent_buffer(child);
1047 spin_lock(&root->node_lock);
1049 spin_unlock(&root->node_lock);
1051 add_root_to_dirty_list(root);
1052 btrfs_tree_unlock(child);
1054 path->locks[level] = 0;
1055 path->nodes[level] = NULL;
1056 clean_tree_block(trans, root, mid);
1057 btrfs_tree_unlock(mid);
1058 /* once for the path */
1059 free_extent_buffer(mid);
1061 root_sub_used(root, mid->len);
1062 btrfs_free_tree_block(trans, root, mid, 0, 1);
1063 /* once for the root ptr */
1064 free_extent_buffer(mid);
1067 if (btrfs_header_nritems(mid) >
1068 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1071 if (btrfs_header_nritems(mid) < 2)
1074 left = read_node_slot(root, parent, pslot - 1);
1076 btrfs_tree_lock(left);
1077 btrfs_set_lock_blocking(left);
1078 wret = btrfs_cow_block(trans, root, left,
1079 parent, pslot - 1, &left);
1085 right = read_node_slot(root, parent, pslot + 1);
1087 btrfs_tree_lock(right);
1088 btrfs_set_lock_blocking(right);
1089 wret = btrfs_cow_block(trans, root, right,
1090 parent, pslot + 1, &right);
1097 /* first, try to make some room in the middle buffer */
1099 orig_slot += btrfs_header_nritems(left);
1100 wret = push_node_left(trans, root, left, mid, 1);
1103 if (btrfs_header_nritems(mid) < 2)
1108 * then try to empty the right most buffer into the middle
1111 wret = push_node_left(trans, root, mid, right, 1);
1112 if (wret < 0 && wret != -ENOSPC)
1114 if (btrfs_header_nritems(right) == 0) {
1115 clean_tree_block(trans, root, right);
1116 btrfs_tree_unlock(right);
1117 wret = del_ptr(trans, root, path, level + 1, pslot +
1121 root_sub_used(root, right->len);
1122 btrfs_free_tree_block(trans, root, right, 0, 1);
1123 free_extent_buffer(right);
1126 struct btrfs_disk_key right_key;
1127 btrfs_node_key(right, &right_key, 0);
1128 btrfs_set_node_key(parent, &right_key, pslot + 1);
1129 btrfs_mark_buffer_dirty(parent);
1132 if (btrfs_header_nritems(mid) == 1) {
1134 * we're not allowed to leave a node with one item in the
1135 * tree during a delete. A deletion from lower in the tree
1136 * could try to delete the only pointer in this node.
1137 * So, pull some keys from the left.
1138 * There has to be a left pointer at this point because
1139 * otherwise we would have pulled some pointers from the
1143 wret = balance_node_right(trans, root, mid, left);
1149 wret = push_node_left(trans, root, left, mid, 1);
1155 if (btrfs_header_nritems(mid) == 0) {
1156 clean_tree_block(trans, root, mid);
1157 btrfs_tree_unlock(mid);
1158 wret = del_ptr(trans, root, path, level + 1, pslot);
1161 root_sub_used(root, mid->len);
1162 btrfs_free_tree_block(trans, root, mid, 0, 1);
1163 free_extent_buffer(mid);
1166 /* update the parent key to reflect our changes */
1167 struct btrfs_disk_key mid_key;
1168 btrfs_node_key(mid, &mid_key, 0);
1169 btrfs_set_node_key(parent, &mid_key, pslot);
1170 btrfs_mark_buffer_dirty(parent);
1173 /* update the path */
1175 if (btrfs_header_nritems(left) > orig_slot) {
1176 extent_buffer_get(left);
1177 /* left was locked after cow */
1178 path->nodes[level] = left;
1179 path->slots[level + 1] -= 1;
1180 path->slots[level] = orig_slot;
1182 btrfs_tree_unlock(mid);
1183 free_extent_buffer(mid);
1186 orig_slot -= btrfs_header_nritems(left);
1187 path->slots[level] = orig_slot;
1190 /* double check we haven't messed things up */
1191 check_block(root, path, level);
1193 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1197 btrfs_tree_unlock(right);
1198 free_extent_buffer(right);
1201 if (path->nodes[level] != left)
1202 btrfs_tree_unlock(left);
1203 free_extent_buffer(left);
1208 /* Node balancing for insertion. Here we only split or push nodes around
1209 * when they are completely full. This is also done top down, so we
1210 * have to be pessimistic.
1212 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1213 struct btrfs_root *root,
1214 struct btrfs_path *path, int level)
1216 struct extent_buffer *right = NULL;
1217 struct extent_buffer *mid;
1218 struct extent_buffer *left = NULL;
1219 struct extent_buffer *parent = NULL;
1223 int orig_slot = path->slots[level];
1229 mid = path->nodes[level];
1230 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1231 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1233 if (level < BTRFS_MAX_LEVEL - 1)
1234 parent = path->nodes[level + 1];
1235 pslot = path->slots[level + 1];
1240 left = read_node_slot(root, parent, pslot - 1);
1242 /* first, try to make some room in the middle buffer */
1246 btrfs_tree_lock(left);
1247 btrfs_set_lock_blocking(left);
1249 left_nr = btrfs_header_nritems(left);
1250 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1253 ret = btrfs_cow_block(trans, root, left, parent,
1258 wret = push_node_left(trans, root,
1265 struct btrfs_disk_key disk_key;
1266 orig_slot += left_nr;
1267 btrfs_node_key(mid, &disk_key, 0);
1268 btrfs_set_node_key(parent, &disk_key, pslot);
1269 btrfs_mark_buffer_dirty(parent);
1270 if (btrfs_header_nritems(left) > orig_slot) {
1271 path->nodes[level] = left;
1272 path->slots[level + 1] -= 1;
1273 path->slots[level] = orig_slot;
1274 btrfs_tree_unlock(mid);
1275 free_extent_buffer(mid);
1278 btrfs_header_nritems(left);
1279 path->slots[level] = orig_slot;
1280 btrfs_tree_unlock(left);
1281 free_extent_buffer(left);
1285 btrfs_tree_unlock(left);
1286 free_extent_buffer(left);
1288 right = read_node_slot(root, parent, pslot + 1);
1291 * then try to empty the right most buffer into the middle
1296 btrfs_tree_lock(right);
1297 btrfs_set_lock_blocking(right);
1299 right_nr = btrfs_header_nritems(right);
1300 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1303 ret = btrfs_cow_block(trans, root, right,
1309 wret = balance_node_right(trans, root,
1316 struct btrfs_disk_key disk_key;
1318 btrfs_node_key(right, &disk_key, 0);
1319 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1320 btrfs_mark_buffer_dirty(parent);
1322 if (btrfs_header_nritems(mid) <= orig_slot) {
1323 path->nodes[level] = right;
1324 path->slots[level + 1] += 1;
1325 path->slots[level] = orig_slot -
1326 btrfs_header_nritems(mid);
1327 btrfs_tree_unlock(mid);
1328 free_extent_buffer(mid);
1330 btrfs_tree_unlock(right);
1331 free_extent_buffer(right);
1335 btrfs_tree_unlock(right);
1336 free_extent_buffer(right);
1342 * readahead one full node of leaves, finding things that are close
1343 * to the block in 'slot', and triggering ra on them.
1345 static void reada_for_search(struct btrfs_root *root,
1346 struct btrfs_path *path,
1347 int level, int slot, u64 objectid)
1349 struct extent_buffer *node;
1350 struct btrfs_disk_key disk_key;
1355 int direction = path->reada;
1356 struct extent_buffer *eb;
1364 if (!path->nodes[level])
1367 node = path->nodes[level];
1369 search = btrfs_node_blockptr(node, slot);
1370 blocksize = btrfs_level_size(root, level - 1);
1371 eb = btrfs_find_tree_block(root, search, blocksize);
1373 free_extent_buffer(eb);
1379 nritems = btrfs_header_nritems(node);
1382 if (direction < 0) {
1386 } else if (direction > 0) {
1391 if (path->reada < 0 && objectid) {
1392 btrfs_node_key(node, &disk_key, nr);
1393 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1396 search = btrfs_node_blockptr(node, nr);
1397 if ((search <= target && target - search <= 65536) ||
1398 (search > target && search - target <= 65536)) {
1399 readahead_tree_block(root, search, blocksize,
1400 btrfs_node_ptr_generation(node, nr));
1404 if ((nread > 65536 || nscan > 32))
1410 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1413 static noinline int reada_for_balance(struct btrfs_root *root,
1414 struct btrfs_path *path, int level)
1418 struct extent_buffer *parent;
1419 struct extent_buffer *eb;
1426 parent = path->nodes[level + 1];
1430 nritems = btrfs_header_nritems(parent);
1431 slot = path->slots[level + 1];
1432 blocksize = btrfs_level_size(root, level);
1435 block1 = btrfs_node_blockptr(parent, slot - 1);
1436 gen = btrfs_node_ptr_generation(parent, slot - 1);
1437 eb = btrfs_find_tree_block(root, block1, blocksize);
1438 if (eb && btrfs_buffer_uptodate(eb, gen))
1440 free_extent_buffer(eb);
1442 if (slot + 1 < nritems) {
1443 block2 = btrfs_node_blockptr(parent, slot + 1);
1444 gen = btrfs_node_ptr_generation(parent, slot + 1);
1445 eb = btrfs_find_tree_block(root, block2, blocksize);
1446 if (eb && btrfs_buffer_uptodate(eb, gen))
1448 free_extent_buffer(eb);
1450 if (block1 || block2) {
1453 /* release the whole path */
1454 btrfs_release_path(root, path);
1456 /* read the blocks */
1458 readahead_tree_block(root, block1, blocksize, 0);
1460 readahead_tree_block(root, block2, blocksize, 0);
1463 eb = read_tree_block(root, block1, blocksize, 0);
1464 free_extent_buffer(eb);
1467 eb = read_tree_block(root, block2, blocksize, 0);
1468 free_extent_buffer(eb);
1476 * when we walk down the tree, it is usually safe to unlock the higher layers
1477 * in the tree. The exceptions are when our path goes through slot 0, because
1478 * operations on the tree might require changing key pointers higher up in the
1481 * callers might also have set path->keep_locks, which tells this code to keep
1482 * the lock if the path points to the last slot in the block. This is part of
1483 * walking through the tree, and selecting the next slot in the higher block.
1485 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1486 * if lowest_unlock is 1, level 0 won't be unlocked
1488 static noinline void unlock_up(struct btrfs_path *path, int level,
1492 int skip_level = level;
1494 struct extent_buffer *t;
1496 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1497 if (!path->nodes[i])
1499 if (!path->locks[i])
1501 if (!no_skips && path->slots[i] == 0) {
1505 if (!no_skips && path->keep_locks) {
1508 nritems = btrfs_header_nritems(t);
1509 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1514 if (skip_level < i && i >= lowest_unlock)
1518 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1519 btrfs_tree_unlock(t);
1526 * This releases any locks held in the path starting at level and
1527 * going all the way up to the root.
1529 * btrfs_search_slot will keep the lock held on higher nodes in a few
1530 * corner cases, such as COW of the block at slot zero in the node. This
1531 * ignores those rules, and it should only be called when there are no
1532 * more updates to be done higher up in the tree.
1534 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1538 if (path->keep_locks)
1541 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1542 if (!path->nodes[i])
1544 if (!path->locks[i])
1546 btrfs_tree_unlock(path->nodes[i]);
1552 * helper function for btrfs_search_slot. The goal is to find a block
1553 * in cache without setting the path to blocking. If we find the block
1554 * we return zero and the path is unchanged.
1556 * If we can't find the block, we set the path blocking and do some
1557 * reada. -EAGAIN is returned and the search must be repeated.
1560 read_block_for_search(struct btrfs_trans_handle *trans,
1561 struct btrfs_root *root, struct btrfs_path *p,
1562 struct extent_buffer **eb_ret, int level, int slot,
1563 struct btrfs_key *key)
1568 struct extent_buffer *b = *eb_ret;
1569 struct extent_buffer *tmp;
1572 blocknr = btrfs_node_blockptr(b, slot);
1573 gen = btrfs_node_ptr_generation(b, slot);
1574 blocksize = btrfs_level_size(root, level - 1);
1576 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1577 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
1579 * we found an up to date block without sleeping, return
1587 * reduce lock contention at high levels
1588 * of the btree by dropping locks before
1589 * we read. Don't release the lock on the current
1590 * level because we need to walk this node to figure
1591 * out which blocks to read.
1593 btrfs_unlock_up_safe(p, level + 1);
1594 btrfs_set_path_blocking(p);
1597 free_extent_buffer(tmp);
1599 reada_for_search(root, p, level, slot, key->objectid);
1601 btrfs_release_path(NULL, p);
1604 tmp = read_tree_block(root, blocknr, blocksize, gen);
1607 * If the read above didn't mark this buffer up to date,
1608 * it will never end up being up to date. Set ret to EIO now
1609 * and give up so that our caller doesn't loop forever
1612 if (!btrfs_buffer_uptodate(tmp, 0))
1614 free_extent_buffer(tmp);
1620 * helper function for btrfs_search_slot. This does all of the checks
1621 * for node-level blocks and does any balancing required based on
1624 * If no extra work was required, zero is returned. If we had to
1625 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1629 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1630 struct btrfs_root *root, struct btrfs_path *p,
1631 struct extent_buffer *b, int level, int ins_len)
1634 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1635 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1638 sret = reada_for_balance(root, p, level);
1642 btrfs_set_path_blocking(p);
1643 sret = split_node(trans, root, p, level);
1644 btrfs_clear_path_blocking(p, NULL);
1651 b = p->nodes[level];
1652 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1653 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
1656 sret = reada_for_balance(root, p, level);
1660 btrfs_set_path_blocking(p);
1661 sret = balance_level(trans, root, p, level);
1662 btrfs_clear_path_blocking(p, NULL);
1668 b = p->nodes[level];
1670 btrfs_release_path(NULL, p);
1673 BUG_ON(btrfs_header_nritems(b) == 1);
1684 * look for key in the tree. path is filled in with nodes along the way
1685 * if key is found, we return zero and you can find the item in the leaf
1686 * level of the path (level 0)
1688 * If the key isn't found, the path points to the slot where it should
1689 * be inserted, and 1 is returned. If there are other errors during the
1690 * search a negative error number is returned.
1692 * if ins_len > 0, nodes and leaves will be split as we walk down the
1693 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1696 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1697 *root, struct btrfs_key *key, struct btrfs_path *p, int
1700 struct extent_buffer *b;
1705 int lowest_unlock = 1;
1706 u8 lowest_level = 0;
1708 lowest_level = p->lowest_level;
1709 WARN_ON(lowest_level && ins_len > 0);
1710 WARN_ON(p->nodes[0] != NULL);
1716 if (p->search_commit_root) {
1717 b = root->commit_root;
1718 extent_buffer_get(b);
1719 if (!p->skip_locking)
1722 if (p->skip_locking)
1723 b = btrfs_root_node(root);
1725 b = btrfs_lock_root_node(root);
1729 level = btrfs_header_level(b);
1732 * setup the path here so we can release it under lock
1733 * contention with the cow code
1735 p->nodes[level] = b;
1736 if (!p->skip_locking)
1737 p->locks[level] = 1;
1741 * if we don't really need to cow this block
1742 * then we don't want to set the path blocking,
1743 * so we test it here
1745 if (!should_cow_block(trans, root, b))
1748 btrfs_set_path_blocking(p);
1750 err = btrfs_cow_block(trans, root, b,
1751 p->nodes[level + 1],
1752 p->slots[level + 1], &b);
1759 BUG_ON(!cow && ins_len);
1760 if (level != btrfs_header_level(b))
1762 level = btrfs_header_level(b);
1764 p->nodes[level] = b;
1765 if (!p->skip_locking)
1766 p->locks[level] = 1;
1768 btrfs_clear_path_blocking(p, NULL);
1771 * we have a lock on b and as long as we aren't changing
1772 * the tree, there is no way to for the items in b to change.
1773 * It is safe to drop the lock on our parent before we
1774 * go through the expensive btree search on b.
1776 * If cow is true, then we might be changing slot zero,
1777 * which may require changing the parent. So, we can't
1778 * drop the lock until after we know which slot we're
1782 btrfs_unlock_up_safe(p, level + 1);
1784 ret = check_block(root, p, level);
1790 ret = bin_search(b, key, level, &slot);
1794 if (ret && slot > 0) {
1798 p->slots[level] = slot;
1799 err = setup_nodes_for_search(trans, root, p, b, level,
1807 b = p->nodes[level];
1808 slot = p->slots[level];
1810 unlock_up(p, level, lowest_unlock);
1812 if (level == lowest_level) {
1818 err = read_block_for_search(trans, root, p,
1819 &b, level, slot, key);
1827 if (!p->skip_locking) {
1828 btrfs_clear_path_blocking(p, NULL);
1829 err = btrfs_try_spin_lock(b);
1832 btrfs_set_path_blocking(p);
1834 btrfs_clear_path_blocking(p, b);
1838 p->slots[level] = slot;
1840 btrfs_leaf_free_space(root, b) < ins_len) {
1841 btrfs_set_path_blocking(p);
1842 err = split_leaf(trans, root, key,
1843 p, ins_len, ret == 0);
1844 btrfs_clear_path_blocking(p, NULL);
1852 if (!p->search_for_split)
1853 unlock_up(p, level, lowest_unlock);
1860 * we don't really know what they plan on doing with the path
1861 * from here on, so for now just mark it as blocking
1863 if (!p->leave_spinning)
1864 btrfs_set_path_blocking(p);
1866 btrfs_release_path(root, p);
1871 * adjust the pointers going up the tree, starting at level
1872 * making sure the right key of each node is points to 'key'.
1873 * This is used after shifting pointers to the left, so it stops
1874 * fixing up pointers when a given leaf/node is not in slot 0 of the
1877 * If this fails to write a tree block, it returns -1, but continues
1878 * fixing up the blocks in ram so the tree is consistent.
1880 static int fixup_low_keys(struct btrfs_trans_handle *trans,
1881 struct btrfs_root *root, struct btrfs_path *path,
1882 struct btrfs_disk_key *key, int level)
1886 struct extent_buffer *t;
1888 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1889 int tslot = path->slots[i];
1890 if (!path->nodes[i])
1893 btrfs_set_node_key(t, key, tslot);
1894 btrfs_mark_buffer_dirty(path->nodes[i]);
1904 * This function isn't completely safe. It's the caller's responsibility
1905 * that the new key won't break the order
1907 int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1908 struct btrfs_root *root, struct btrfs_path *path,
1909 struct btrfs_key *new_key)
1911 struct btrfs_disk_key disk_key;
1912 struct extent_buffer *eb;
1915 eb = path->nodes[0];
1916 slot = path->slots[0];
1918 btrfs_item_key(eb, &disk_key, slot - 1);
1919 if (comp_keys(&disk_key, new_key) >= 0)
1922 if (slot < btrfs_header_nritems(eb) - 1) {
1923 btrfs_item_key(eb, &disk_key, slot + 1);
1924 if (comp_keys(&disk_key, new_key) <= 0)
1928 btrfs_cpu_key_to_disk(&disk_key, new_key);
1929 btrfs_set_item_key(eb, &disk_key, slot);
1930 btrfs_mark_buffer_dirty(eb);
1932 fixup_low_keys(trans, root, path, &disk_key, 1);
1937 * try to push data from one node into the next node left in the
1940 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1941 * error, and > 0 if there was no room in the left hand block.
1943 static int push_node_left(struct btrfs_trans_handle *trans,
1944 struct btrfs_root *root, struct extent_buffer *dst,
1945 struct extent_buffer *src, int empty)
1952 src_nritems = btrfs_header_nritems(src);
1953 dst_nritems = btrfs_header_nritems(dst);
1954 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1955 WARN_ON(btrfs_header_generation(src) != trans->transid);
1956 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1958 if (!empty && src_nritems <= 8)
1961 if (push_items <= 0)
1965 push_items = min(src_nritems, push_items);
1966 if (push_items < src_nritems) {
1967 /* leave at least 8 pointers in the node if
1968 * we aren't going to empty it
1970 if (src_nritems - push_items < 8) {
1971 if (push_items <= 8)
1977 push_items = min(src_nritems - 8, push_items);
1979 copy_extent_buffer(dst, src,
1980 btrfs_node_key_ptr_offset(dst_nritems),
1981 btrfs_node_key_ptr_offset(0),
1982 push_items * sizeof(struct btrfs_key_ptr));
1984 if (push_items < src_nritems) {
1985 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1986 btrfs_node_key_ptr_offset(push_items),
1987 (src_nritems - push_items) *
1988 sizeof(struct btrfs_key_ptr));
1990 btrfs_set_header_nritems(src, src_nritems - push_items);
1991 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1992 btrfs_mark_buffer_dirty(src);
1993 btrfs_mark_buffer_dirty(dst);
1999 * try to push data from one node into the next node right in the
2002 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2003 * error, and > 0 if there was no room in the right hand block.
2005 * this will only push up to 1/2 the contents of the left node over
2007 static int balance_node_right(struct btrfs_trans_handle *trans,
2008 struct btrfs_root *root,
2009 struct extent_buffer *dst,
2010 struct extent_buffer *src)
2018 WARN_ON(btrfs_header_generation(src) != trans->transid);
2019 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2021 src_nritems = btrfs_header_nritems(src);
2022 dst_nritems = btrfs_header_nritems(dst);
2023 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2024 if (push_items <= 0)
2027 if (src_nritems < 4)
2030 max_push = src_nritems / 2 + 1;
2031 /* don't try to empty the node */
2032 if (max_push >= src_nritems)
2035 if (max_push < push_items)
2036 push_items = max_push;
2038 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2039 btrfs_node_key_ptr_offset(0),
2041 sizeof(struct btrfs_key_ptr));
2043 copy_extent_buffer(dst, src,
2044 btrfs_node_key_ptr_offset(0),
2045 btrfs_node_key_ptr_offset(src_nritems - push_items),
2046 push_items * sizeof(struct btrfs_key_ptr));
2048 btrfs_set_header_nritems(src, src_nritems - push_items);
2049 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2051 btrfs_mark_buffer_dirty(src);
2052 btrfs_mark_buffer_dirty(dst);
2058 * helper function to insert a new root level in the tree.
2059 * A new node is allocated, and a single item is inserted to
2060 * point to the existing root
2062 * returns zero on success or < 0 on failure.
2064 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2065 struct btrfs_root *root,
2066 struct btrfs_path *path, int level)
2069 struct extent_buffer *lower;
2070 struct extent_buffer *c;
2071 struct extent_buffer *old;
2072 struct btrfs_disk_key lower_key;
2074 BUG_ON(path->nodes[level]);
2075 BUG_ON(path->nodes[level-1] != root->node);
2077 lower = path->nodes[level-1];
2079 btrfs_item_key(lower, &lower_key, 0);
2081 btrfs_node_key(lower, &lower_key, 0);
2083 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2084 root->root_key.objectid, &lower_key,
2085 level, root->node->start, 0);
2089 root_add_used(root, root->nodesize);
2091 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2092 btrfs_set_header_nritems(c, 1);
2093 btrfs_set_header_level(c, level);
2094 btrfs_set_header_bytenr(c, c->start);
2095 btrfs_set_header_generation(c, trans->transid);
2096 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
2097 btrfs_set_header_owner(c, root->root_key.objectid);
2099 write_extent_buffer(c, root->fs_info->fsid,
2100 (unsigned long)btrfs_header_fsid(c),
2103 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2104 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2107 btrfs_set_node_key(c, &lower_key, 0);
2108 btrfs_set_node_blockptr(c, 0, lower->start);
2109 lower_gen = btrfs_header_generation(lower);
2110 WARN_ON(lower_gen != trans->transid);
2112 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2114 btrfs_mark_buffer_dirty(c);
2116 spin_lock(&root->node_lock);
2119 spin_unlock(&root->node_lock);
2121 /* the super has an extra ref to root->node */
2122 free_extent_buffer(old);
2124 add_root_to_dirty_list(root);
2125 extent_buffer_get(c);
2126 path->nodes[level] = c;
2127 path->locks[level] = 1;
2128 path->slots[level] = 0;
2133 * worker function to insert a single pointer in a node.
2134 * the node should have enough room for the pointer already
2136 * slot and level indicate where you want the key to go, and
2137 * blocknr is the block the key points to.
2139 * returns zero on success and < 0 on any error
2141 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2142 *root, struct btrfs_path *path, struct btrfs_disk_key
2143 *key, u64 bytenr, int slot, int level)
2145 struct extent_buffer *lower;
2148 BUG_ON(!path->nodes[level]);
2149 btrfs_assert_tree_locked(path->nodes[level]);
2150 lower = path->nodes[level];
2151 nritems = btrfs_header_nritems(lower);
2152 BUG_ON(slot > nritems);
2153 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
2155 if (slot != nritems) {
2156 memmove_extent_buffer(lower,
2157 btrfs_node_key_ptr_offset(slot + 1),
2158 btrfs_node_key_ptr_offset(slot),
2159 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2161 btrfs_set_node_key(lower, key, slot);
2162 btrfs_set_node_blockptr(lower, slot, bytenr);
2163 WARN_ON(trans->transid == 0);
2164 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2165 btrfs_set_header_nritems(lower, nritems + 1);
2166 btrfs_mark_buffer_dirty(lower);
2171 * split the node at the specified level in path in two.
2172 * The path is corrected to point to the appropriate node after the split
2174 * Before splitting this tries to make some room in the node by pushing
2175 * left and right, if either one works, it returns right away.
2177 * returns 0 on success and < 0 on failure
2179 static noinline int split_node(struct btrfs_trans_handle *trans,
2180 struct btrfs_root *root,
2181 struct btrfs_path *path, int level)
2183 struct extent_buffer *c;
2184 struct extent_buffer *split;
2185 struct btrfs_disk_key disk_key;
2191 c = path->nodes[level];
2192 WARN_ON(btrfs_header_generation(c) != trans->transid);
2193 if (c == root->node) {
2194 /* trying to split the root, lets make a new one */
2195 ret = insert_new_root(trans, root, path, level + 1);
2199 ret = push_nodes_for_insert(trans, root, path, level);
2200 c = path->nodes[level];
2201 if (!ret && btrfs_header_nritems(c) <
2202 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
2208 c_nritems = btrfs_header_nritems(c);
2209 mid = (c_nritems + 1) / 2;
2210 btrfs_node_key(c, &disk_key, mid);
2212 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2213 root->root_key.objectid,
2214 &disk_key, level, c->start, 0);
2216 return PTR_ERR(split);
2218 root_add_used(root, root->nodesize);
2220 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
2221 btrfs_set_header_level(split, btrfs_header_level(c));
2222 btrfs_set_header_bytenr(split, split->start);
2223 btrfs_set_header_generation(split, trans->transid);
2224 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
2225 btrfs_set_header_owner(split, root->root_key.objectid);
2226 write_extent_buffer(split, root->fs_info->fsid,
2227 (unsigned long)btrfs_header_fsid(split),
2229 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2230 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2234 copy_extent_buffer(split, c,
2235 btrfs_node_key_ptr_offset(0),
2236 btrfs_node_key_ptr_offset(mid),
2237 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2238 btrfs_set_header_nritems(split, c_nritems - mid);
2239 btrfs_set_header_nritems(c, mid);
2242 btrfs_mark_buffer_dirty(c);
2243 btrfs_mark_buffer_dirty(split);
2245 wret = insert_ptr(trans, root, path, &disk_key, split->start,
2246 path->slots[level + 1] + 1,
2251 if (path->slots[level] >= mid) {
2252 path->slots[level] -= mid;
2253 btrfs_tree_unlock(c);
2254 free_extent_buffer(c);
2255 path->nodes[level] = split;
2256 path->slots[level + 1] += 1;
2258 btrfs_tree_unlock(split);
2259 free_extent_buffer(split);
2265 * how many bytes are required to store the items in a leaf. start
2266 * and nr indicate which items in the leaf to check. This totals up the
2267 * space used both by the item structs and the item data
2269 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2272 int nritems = btrfs_header_nritems(l);
2273 int end = min(nritems, start + nr) - 1;
2277 data_len = btrfs_item_end_nr(l, start);
2278 data_len = data_len - btrfs_item_offset_nr(l, end);
2279 data_len += sizeof(struct btrfs_item) * nr;
2280 WARN_ON(data_len < 0);
2285 * The space between the end of the leaf items and
2286 * the start of the leaf data. IOW, how much room
2287 * the leaf has left for both items and data
2289 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2290 struct extent_buffer *leaf)
2292 int nritems = btrfs_header_nritems(leaf);
2294 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2296 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2297 "used %d nritems %d\n",
2298 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2299 leaf_space_used(leaf, 0, nritems), nritems);
2304 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2305 struct btrfs_root *root,
2306 struct btrfs_path *path,
2307 int data_size, int empty,
2308 struct extent_buffer *right,
2309 int free_space, u32 left_nritems)
2311 struct extent_buffer *left = path->nodes[0];
2312 struct extent_buffer *upper = path->nodes[1];
2313 struct btrfs_disk_key disk_key;
2318 struct btrfs_item *item;
2329 if (path->slots[0] >= left_nritems)
2330 push_space += data_size;
2332 slot = path->slots[1];
2333 i = left_nritems - 1;
2335 item = btrfs_item_nr(left, i);
2337 if (!empty && push_items > 0) {
2338 if (path->slots[0] > i)
2340 if (path->slots[0] == i) {
2341 int space = btrfs_leaf_free_space(root, left);
2342 if (space + push_space * 2 > free_space)
2347 if (path->slots[0] == i)
2348 push_space += data_size;
2350 if (!left->map_token) {
2351 map_extent_buffer(left, (unsigned long)item,
2352 sizeof(struct btrfs_item),
2353 &left->map_token, &left->kaddr,
2354 &left->map_start, &left->map_len,
2358 this_item_size = btrfs_item_size(left, item);
2359 if (this_item_size + sizeof(*item) + push_space > free_space)
2363 push_space += this_item_size + sizeof(*item);
2368 if (left->map_token) {
2369 unmap_extent_buffer(left, left->map_token, KM_USER1);
2370 left->map_token = NULL;
2373 if (push_items == 0)
2376 if (!empty && push_items == left_nritems)
2379 /* push left to right */
2380 right_nritems = btrfs_header_nritems(right);
2382 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2383 push_space -= leaf_data_end(root, left);
2385 /* make room in the right data area */
2386 data_end = leaf_data_end(root, right);
2387 memmove_extent_buffer(right,
2388 btrfs_leaf_data(right) + data_end - push_space,
2389 btrfs_leaf_data(right) + data_end,
2390 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2392 /* copy from the left data area */
2393 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
2394 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2395 btrfs_leaf_data(left) + leaf_data_end(root, left),
2398 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2399 btrfs_item_nr_offset(0),
2400 right_nritems * sizeof(struct btrfs_item));
2402 /* copy the items from left to right */
2403 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2404 btrfs_item_nr_offset(left_nritems - push_items),
2405 push_items * sizeof(struct btrfs_item));
2407 /* update the item pointers */
2408 right_nritems += push_items;
2409 btrfs_set_header_nritems(right, right_nritems);
2410 push_space = BTRFS_LEAF_DATA_SIZE(root);
2411 for (i = 0; i < right_nritems; i++) {
2412 item = btrfs_item_nr(right, i);
2413 if (!right->map_token) {
2414 map_extent_buffer(right, (unsigned long)item,
2415 sizeof(struct btrfs_item),
2416 &right->map_token, &right->kaddr,
2417 &right->map_start, &right->map_len,
2420 push_space -= btrfs_item_size(right, item);
2421 btrfs_set_item_offset(right, item, push_space);
2424 if (right->map_token) {
2425 unmap_extent_buffer(right, right->map_token, KM_USER1);
2426 right->map_token = NULL;
2428 left_nritems -= push_items;
2429 btrfs_set_header_nritems(left, left_nritems);
2432 btrfs_mark_buffer_dirty(left);
2434 clean_tree_block(trans, root, left);
2436 btrfs_mark_buffer_dirty(right);
2438 btrfs_item_key(right, &disk_key, 0);
2439 btrfs_set_node_key(upper, &disk_key, slot + 1);
2440 btrfs_mark_buffer_dirty(upper);
2442 /* then fixup the leaf pointer in the path */
2443 if (path->slots[0] >= left_nritems) {
2444 path->slots[0] -= left_nritems;
2445 if (btrfs_header_nritems(path->nodes[0]) == 0)
2446 clean_tree_block(trans, root, path->nodes[0]);
2447 btrfs_tree_unlock(path->nodes[0]);
2448 free_extent_buffer(path->nodes[0]);
2449 path->nodes[0] = right;
2450 path->slots[1] += 1;
2452 btrfs_tree_unlock(right);
2453 free_extent_buffer(right);
2458 btrfs_tree_unlock(right);
2459 free_extent_buffer(right);
2464 * push some data in the path leaf to the right, trying to free up at
2465 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2467 * returns 1 if the push failed because the other node didn't have enough
2468 * room, 0 if everything worked out and < 0 if there were major errors.
2470 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2471 *root, struct btrfs_path *path, int data_size,
2474 struct extent_buffer *left = path->nodes[0];
2475 struct extent_buffer *right;
2476 struct extent_buffer *upper;
2482 if (!path->nodes[1])
2485 slot = path->slots[1];
2486 upper = path->nodes[1];
2487 if (slot >= btrfs_header_nritems(upper) - 1)
2490 btrfs_assert_tree_locked(path->nodes[1]);
2492 right = read_node_slot(root, upper, slot + 1);
2493 btrfs_tree_lock(right);
2494 btrfs_set_lock_blocking(right);
2496 free_space = btrfs_leaf_free_space(root, right);
2497 if (free_space < data_size)
2500 /* cow and double check */
2501 ret = btrfs_cow_block(trans, root, right, upper,
2506 free_space = btrfs_leaf_free_space(root, right);
2507 if (free_space < data_size)
2510 left_nritems = btrfs_header_nritems(left);
2511 if (left_nritems == 0)
2514 return __push_leaf_right(trans, root, path, data_size, empty,
2515 right, free_space, left_nritems);
2517 btrfs_tree_unlock(right);
2518 free_extent_buffer(right);
2523 * push some data in the path leaf to the left, trying to free up at
2524 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2526 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2527 struct btrfs_root *root,
2528 struct btrfs_path *path, int data_size,
2529 int empty, struct extent_buffer *left,
2530 int free_space, int right_nritems)
2532 struct btrfs_disk_key disk_key;
2533 struct extent_buffer *right = path->nodes[0];
2538 struct btrfs_item *item;
2539 u32 old_left_nritems;
2544 u32 old_left_item_size;
2546 slot = path->slots[1];
2551 nr = right_nritems - 1;
2553 for (i = 0; i < nr; i++) {
2554 item = btrfs_item_nr(right, i);
2555 if (!right->map_token) {
2556 map_extent_buffer(right, (unsigned long)item,
2557 sizeof(struct btrfs_item),
2558 &right->map_token, &right->kaddr,
2559 &right->map_start, &right->map_len,
2563 if (!empty && push_items > 0) {
2564 if (path->slots[0] < i)
2566 if (path->slots[0] == i) {
2567 int space = btrfs_leaf_free_space(root, right);
2568 if (space + push_space * 2 > free_space)
2573 if (path->slots[0] == i)
2574 push_space += data_size;
2576 this_item_size = btrfs_item_size(right, item);
2577 if (this_item_size + sizeof(*item) + push_space > free_space)
2581 push_space += this_item_size + sizeof(*item);
2584 if (right->map_token) {
2585 unmap_extent_buffer(right, right->map_token, KM_USER1);
2586 right->map_token = NULL;
2589 if (push_items == 0) {
2593 if (!empty && push_items == btrfs_header_nritems(right))
2596 /* push data from right to left */
2597 copy_extent_buffer(left, right,
2598 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2599 btrfs_item_nr_offset(0),
2600 push_items * sizeof(struct btrfs_item));
2602 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2603 btrfs_item_offset_nr(right, push_items - 1);
2605 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2606 leaf_data_end(root, left) - push_space,
2607 btrfs_leaf_data(right) +
2608 btrfs_item_offset_nr(right, push_items - 1),
2610 old_left_nritems = btrfs_header_nritems(left);
2611 BUG_ON(old_left_nritems <= 0);
2613 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2614 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2617 item = btrfs_item_nr(left, i);
2618 if (!left->map_token) {
2619 map_extent_buffer(left, (unsigned long)item,
2620 sizeof(struct btrfs_item),
2621 &left->map_token, &left->kaddr,
2622 &left->map_start, &left->map_len,
2626 ioff = btrfs_item_offset(left, item);
2627 btrfs_set_item_offset(left, item,
2628 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
2630 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2631 if (left->map_token) {
2632 unmap_extent_buffer(left, left->map_token, KM_USER1);
2633 left->map_token = NULL;
2636 /* fixup right node */
2637 if (push_items > right_nritems) {
2638 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2643 if (push_items < right_nritems) {
2644 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2645 leaf_data_end(root, right);
2646 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2647 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2648 btrfs_leaf_data(right) +
2649 leaf_data_end(root, right), push_space);
2651 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2652 btrfs_item_nr_offset(push_items),
2653 (btrfs_header_nritems(right) - push_items) *
2654 sizeof(struct btrfs_item));
2656 right_nritems -= push_items;
2657 btrfs_set_header_nritems(right, right_nritems);
2658 push_space = BTRFS_LEAF_DATA_SIZE(root);
2659 for (i = 0; i < right_nritems; i++) {
2660 item = btrfs_item_nr(right, i);
2662 if (!right->map_token) {
2663 map_extent_buffer(right, (unsigned long)item,
2664 sizeof(struct btrfs_item),
2665 &right->map_token, &right->kaddr,
2666 &right->map_start, &right->map_len,
2670 push_space = push_space - btrfs_item_size(right, item);
2671 btrfs_set_item_offset(right, item, push_space);
2673 if (right->map_token) {
2674 unmap_extent_buffer(right, right->map_token, KM_USER1);
2675 right->map_token = NULL;
2678 btrfs_mark_buffer_dirty(left);
2680 btrfs_mark_buffer_dirty(right);
2682 clean_tree_block(trans, root, right);
2684 btrfs_item_key(right, &disk_key, 0);
2685 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2689 /* then fixup the leaf pointer in the path */
2690 if (path->slots[0] < push_items) {
2691 path->slots[0] += old_left_nritems;
2692 btrfs_tree_unlock(path->nodes[0]);
2693 free_extent_buffer(path->nodes[0]);
2694 path->nodes[0] = left;
2695 path->slots[1] -= 1;
2697 btrfs_tree_unlock(left);
2698 free_extent_buffer(left);
2699 path->slots[0] -= push_items;
2701 BUG_ON(path->slots[0] < 0);
2704 btrfs_tree_unlock(left);
2705 free_extent_buffer(left);
2710 * push some data in the path leaf to the left, trying to free up at
2711 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2713 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2714 *root, struct btrfs_path *path, int data_size,
2717 struct extent_buffer *right = path->nodes[0];
2718 struct extent_buffer *left;
2724 slot = path->slots[1];
2727 if (!path->nodes[1])
2730 right_nritems = btrfs_header_nritems(right);
2731 if (right_nritems == 0)
2734 btrfs_assert_tree_locked(path->nodes[1]);
2736 left = read_node_slot(root, path->nodes[1], slot - 1);
2737 btrfs_tree_lock(left);
2738 btrfs_set_lock_blocking(left);
2740 free_space = btrfs_leaf_free_space(root, left);
2741 if (free_space < data_size) {
2746 /* cow and double check */
2747 ret = btrfs_cow_block(trans, root, left,
2748 path->nodes[1], slot - 1, &left);
2750 /* we hit -ENOSPC, but it isn't fatal here */
2755 free_space = btrfs_leaf_free_space(root, left);
2756 if (free_space < data_size) {
2761 return __push_leaf_left(trans, root, path, data_size,
2762 empty, left, free_space, right_nritems);
2764 btrfs_tree_unlock(left);
2765 free_extent_buffer(left);
2770 * split the path's leaf in two, making sure there is at least data_size
2771 * available for the resulting leaf level of the path.
2773 * returns 0 if all went well and < 0 on failure.
2775 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2776 struct btrfs_root *root,
2777 struct btrfs_path *path,
2778 struct extent_buffer *l,
2779 struct extent_buffer *right,
2780 int slot, int mid, int nritems)
2787 struct btrfs_disk_key disk_key;
2789 nritems = nritems - mid;
2790 btrfs_set_header_nritems(right, nritems);
2791 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2793 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2794 btrfs_item_nr_offset(mid),
2795 nritems * sizeof(struct btrfs_item));
2797 copy_extent_buffer(right, l,
2798 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2799 data_copy_size, btrfs_leaf_data(l) +
2800 leaf_data_end(root, l), data_copy_size);
2802 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2803 btrfs_item_end_nr(l, mid);
2805 for (i = 0; i < nritems; i++) {
2806 struct btrfs_item *item = btrfs_item_nr(right, i);
2809 if (!right->map_token) {
2810 map_extent_buffer(right, (unsigned long)item,
2811 sizeof(struct btrfs_item),
2812 &right->map_token, &right->kaddr,
2813 &right->map_start, &right->map_len,
2817 ioff = btrfs_item_offset(right, item);
2818 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2821 if (right->map_token) {
2822 unmap_extent_buffer(right, right->map_token, KM_USER1);
2823 right->map_token = NULL;
2826 btrfs_set_header_nritems(l, mid);
2828 btrfs_item_key(right, &disk_key, 0);
2829 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2830 path->slots[1] + 1, 1);
2834 btrfs_mark_buffer_dirty(right);
2835 btrfs_mark_buffer_dirty(l);
2836 BUG_ON(path->slots[0] != slot);
2839 btrfs_tree_unlock(path->nodes[0]);
2840 free_extent_buffer(path->nodes[0]);
2841 path->nodes[0] = right;
2842 path->slots[0] -= mid;
2843 path->slots[1] += 1;
2845 btrfs_tree_unlock(right);
2846 free_extent_buffer(right);
2849 BUG_ON(path->slots[0] < 0);
2855 * split the path's leaf in two, making sure there is at least data_size
2856 * available for the resulting leaf level of the path.
2858 * returns 0 if all went well and < 0 on failure.
2860 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2861 struct btrfs_root *root,
2862 struct btrfs_key *ins_key,
2863 struct btrfs_path *path, int data_size,
2866 struct btrfs_disk_key disk_key;
2867 struct extent_buffer *l;
2871 struct extent_buffer *right;
2875 int num_doubles = 0;
2878 slot = path->slots[0];
2879 if (extend && data_size + btrfs_item_size_nr(l, slot) +
2880 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
2883 /* first try to make some room by pushing left and right */
2884 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
2885 wret = push_leaf_right(trans, root, path, data_size, 0);
2889 wret = push_leaf_left(trans, root, path, data_size, 0);
2895 /* did the pushes work? */
2896 if (btrfs_leaf_free_space(root, l) >= data_size)
2900 if (!path->nodes[1]) {
2901 ret = insert_new_root(trans, root, path, 1);
2908 slot = path->slots[0];
2909 nritems = btrfs_header_nritems(l);
2910 mid = (nritems + 1) / 2;
2914 leaf_space_used(l, mid, nritems - mid) + data_size >
2915 BTRFS_LEAF_DATA_SIZE(root)) {
2916 if (slot >= nritems) {
2920 if (mid != nritems &&
2921 leaf_space_used(l, mid, nritems - mid) +
2922 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2928 if (leaf_space_used(l, 0, mid) + data_size >
2929 BTRFS_LEAF_DATA_SIZE(root)) {
2930 if (!extend && data_size && slot == 0) {
2932 } else if ((extend || !data_size) && slot == 0) {
2936 if (mid != nritems &&
2937 leaf_space_used(l, mid, nritems - mid) +
2938 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2946 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2948 btrfs_item_key(l, &disk_key, mid);
2950 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
2951 root->root_key.objectid,
2952 &disk_key, 0, l->start, 0);
2954 return PTR_ERR(right);
2956 root_add_used(root, root->leafsize);
2958 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2959 btrfs_set_header_bytenr(right, right->start);
2960 btrfs_set_header_generation(right, trans->transid);
2961 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
2962 btrfs_set_header_owner(right, root->root_key.objectid);
2963 btrfs_set_header_level(right, 0);
2964 write_extent_buffer(right, root->fs_info->fsid,
2965 (unsigned long)btrfs_header_fsid(right),
2968 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2969 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2974 btrfs_set_header_nritems(right, 0);
2975 wret = insert_ptr(trans, root, path,
2976 &disk_key, right->start,
2977 path->slots[1] + 1, 1);
2981 btrfs_tree_unlock(path->nodes[0]);
2982 free_extent_buffer(path->nodes[0]);
2983 path->nodes[0] = right;
2985 path->slots[1] += 1;
2987 btrfs_set_header_nritems(right, 0);
2988 wret = insert_ptr(trans, root, path,
2994 btrfs_tree_unlock(path->nodes[0]);
2995 free_extent_buffer(path->nodes[0]);
2996 path->nodes[0] = right;
2998 if (path->slots[1] == 0) {
2999 wret = fixup_low_keys(trans, root,
3000 path, &disk_key, 1);
3005 btrfs_mark_buffer_dirty(right);
3009 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3013 BUG_ON(num_doubles != 0);
3021 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3022 struct btrfs_root *root,
3023 struct btrfs_path *path, int ins_len)
3025 struct btrfs_key key;
3026 struct extent_buffer *leaf;
3027 struct btrfs_file_extent_item *fi;
3032 leaf = path->nodes[0];
3033 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3035 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3036 key.type != BTRFS_EXTENT_CSUM_KEY);
3038 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3041 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3042 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3043 fi = btrfs_item_ptr(leaf, path->slots[0],
3044 struct btrfs_file_extent_item);
3045 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3047 btrfs_release_path(root, path);
3049 path->keep_locks = 1;
3050 path->search_for_split = 1;
3051 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3052 path->search_for_split = 0;
3057 leaf = path->nodes[0];
3058 /* if our item isn't there or got smaller, return now */
3059 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3062 /* the leaf has changed, it now has room. return now */
3063 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3066 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3067 fi = btrfs_item_ptr(leaf, path->slots[0],
3068 struct btrfs_file_extent_item);
3069 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3073 btrfs_set_path_blocking(path);
3074 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3078 path->keep_locks = 0;
3079 btrfs_unlock_up_safe(path, 1);
3082 path->keep_locks = 0;
3086 static noinline int split_item(struct btrfs_trans_handle *trans,
3087 struct btrfs_root *root,
3088 struct btrfs_path *path,
3089 struct btrfs_key *new_key,
3090 unsigned long split_offset)
3092 struct extent_buffer *leaf;
3093 struct btrfs_item *item;
3094 struct btrfs_item *new_item;
3100 struct btrfs_disk_key disk_key;
3102 leaf = path->nodes[0];
3103 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3105 btrfs_set_path_blocking(path);
3107 item = btrfs_item_nr(leaf, path->slots[0]);
3108 orig_offset = btrfs_item_offset(leaf, item);
3109 item_size = btrfs_item_size(leaf, item);
3111 buf = kmalloc(item_size, GFP_NOFS);
3115 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3116 path->slots[0]), item_size);
3118 slot = path->slots[0] + 1;
3119 nritems = btrfs_header_nritems(leaf);
3120 if (slot != nritems) {
3121 /* shift the items */
3122 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3123 btrfs_item_nr_offset(slot),
3124 (nritems - slot) * sizeof(struct btrfs_item));
3127 btrfs_cpu_key_to_disk(&disk_key, new_key);
3128 btrfs_set_item_key(leaf, &disk_key, slot);
3130 new_item = btrfs_item_nr(leaf, slot);
3132 btrfs_set_item_offset(leaf, new_item, orig_offset);
3133 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3135 btrfs_set_item_offset(leaf, item,
3136 orig_offset + item_size - split_offset);
3137 btrfs_set_item_size(leaf, item, split_offset);
3139 btrfs_set_header_nritems(leaf, nritems + 1);
3141 /* write the data for the start of the original item */
3142 write_extent_buffer(leaf, buf,
3143 btrfs_item_ptr_offset(leaf, path->slots[0]),
3146 /* write the data for the new item */
3147 write_extent_buffer(leaf, buf + split_offset,
3148 btrfs_item_ptr_offset(leaf, slot),
3149 item_size - split_offset);
3150 btrfs_mark_buffer_dirty(leaf);
3152 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
3158 * This function splits a single item into two items,
3159 * giving 'new_key' to the new item and splitting the
3160 * old one at split_offset (from the start of the item).
3162 * The path may be released by this operation. After
3163 * the split, the path is pointing to the old item. The
3164 * new item is going to be in the same node as the old one.
3166 * Note, the item being split must be smaller enough to live alone on
3167 * a tree block with room for one extra struct btrfs_item
3169 * This allows us to split the item in place, keeping a lock on the
3170 * leaf the entire time.
3172 int btrfs_split_item(struct btrfs_trans_handle *trans,
3173 struct btrfs_root *root,
3174 struct btrfs_path *path,
3175 struct btrfs_key *new_key,
3176 unsigned long split_offset)
3179 ret = setup_leaf_for_split(trans, root, path,
3180 sizeof(struct btrfs_item));
3184 ret = split_item(trans, root, path, new_key, split_offset);
3189 * This function duplicate a item, giving 'new_key' to the new item.
3190 * It guarantees both items live in the same tree leaf and the new item
3191 * is contiguous with the original item.
3193 * This allows us to split file extent in place, keeping a lock on the
3194 * leaf the entire time.
3196 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3197 struct btrfs_root *root,
3198 struct btrfs_path *path,
3199 struct btrfs_key *new_key)
3201 struct extent_buffer *leaf;
3205 leaf = path->nodes[0];
3206 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3207 ret = setup_leaf_for_split(trans, root, path,
3208 item_size + sizeof(struct btrfs_item));
3213 ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
3214 item_size, item_size +
3215 sizeof(struct btrfs_item), 1);
3218 leaf = path->nodes[0];
3219 memcpy_extent_buffer(leaf,
3220 btrfs_item_ptr_offset(leaf, path->slots[0]),
3221 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3227 * make the item pointed to by the path smaller. new_size indicates
3228 * how small to make it, and from_end tells us if we just chop bytes
3229 * off the end of the item or if we shift the item to chop bytes off
3232 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3233 struct btrfs_root *root,
3234 struct btrfs_path *path,
3235 u32 new_size, int from_end)
3240 struct extent_buffer *leaf;
3241 struct btrfs_item *item;
3243 unsigned int data_end;
3244 unsigned int old_data_start;
3245 unsigned int old_size;
3246 unsigned int size_diff;
3249 slot_orig = path->slots[0];
3250 leaf = path->nodes[0];
3251 slot = path->slots[0];
3253 old_size = btrfs_item_size_nr(leaf, slot);
3254 if (old_size == new_size)
3257 nritems = btrfs_header_nritems(leaf);
3258 data_end = leaf_data_end(root, leaf);
3260 old_data_start = btrfs_item_offset_nr(leaf, slot);
3262 size_diff = old_size - new_size;
3265 BUG_ON(slot >= nritems);
3268 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3270 /* first correct the data pointers */
3271 for (i = slot; i < nritems; i++) {
3273 item = btrfs_item_nr(leaf, i);
3275 if (!leaf->map_token) {
3276 map_extent_buffer(leaf, (unsigned long)item,
3277 sizeof(struct btrfs_item),
3278 &leaf->map_token, &leaf->kaddr,
3279 &leaf->map_start, &leaf->map_len,
3283 ioff = btrfs_item_offset(leaf, item);
3284 btrfs_set_item_offset(leaf, item, ioff + size_diff);
3287 if (leaf->map_token) {
3288 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3289 leaf->map_token = NULL;
3292 /* shift the data */
3294 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3295 data_end + size_diff, btrfs_leaf_data(leaf) +
3296 data_end, old_data_start + new_size - data_end);
3298 struct btrfs_disk_key disk_key;
3301 btrfs_item_key(leaf, &disk_key, slot);
3303 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3305 struct btrfs_file_extent_item *fi;
3307 fi = btrfs_item_ptr(leaf, slot,
3308 struct btrfs_file_extent_item);
3309 fi = (struct btrfs_file_extent_item *)(
3310 (unsigned long)fi - size_diff);
3312 if (btrfs_file_extent_type(leaf, fi) ==
3313 BTRFS_FILE_EXTENT_INLINE) {
3314 ptr = btrfs_item_ptr_offset(leaf, slot);
3315 memmove_extent_buffer(leaf, ptr,
3317 offsetof(struct btrfs_file_extent_item,
3322 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3323 data_end + size_diff, btrfs_leaf_data(leaf) +
3324 data_end, old_data_start - data_end);
3326 offset = btrfs_disk_key_offset(&disk_key);
3327 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3328 btrfs_set_item_key(leaf, &disk_key, slot);
3330 fixup_low_keys(trans, root, path, &disk_key, 1);
3333 item = btrfs_item_nr(leaf, slot);
3334 btrfs_set_item_size(leaf, item, new_size);
3335 btrfs_mark_buffer_dirty(leaf);
3338 if (btrfs_leaf_free_space(root, leaf) < 0) {
3339 btrfs_print_leaf(root, leaf);
3346 * make the item pointed to by the path bigger, data_size is the new size.
3348 int btrfs_extend_item(struct btrfs_trans_handle *trans,
3349 struct btrfs_root *root, struct btrfs_path *path,
3355 struct extent_buffer *leaf;
3356 struct btrfs_item *item;
3358 unsigned int data_end;
3359 unsigned int old_data;
3360 unsigned int old_size;
3363 slot_orig = path->slots[0];
3364 leaf = path->nodes[0];
3366 nritems = btrfs_header_nritems(leaf);
3367 data_end = leaf_data_end(root, leaf);
3369 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3370 btrfs_print_leaf(root, leaf);
3373 slot = path->slots[0];
3374 old_data = btrfs_item_end_nr(leaf, slot);
3377 if (slot >= nritems) {
3378 btrfs_print_leaf(root, leaf);
3379 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3385 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3387 /* first correct the data pointers */
3388 for (i = slot; i < nritems; i++) {
3390 item = btrfs_item_nr(leaf, i);
3392 if (!leaf->map_token) {
3393 map_extent_buffer(leaf, (unsigned long)item,
3394 sizeof(struct btrfs_item),
3395 &leaf->map_token, &leaf->kaddr,
3396 &leaf->map_start, &leaf->map_len,
3399 ioff = btrfs_item_offset(leaf, item);
3400 btrfs_set_item_offset(leaf, item, ioff - data_size);
3403 if (leaf->map_token) {
3404 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3405 leaf->map_token = NULL;
3408 /* shift the data */
3409 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3410 data_end - data_size, btrfs_leaf_data(leaf) +
3411 data_end, old_data - data_end);
3413 data_end = old_data;
3414 old_size = btrfs_item_size_nr(leaf, slot);
3415 item = btrfs_item_nr(leaf, slot);
3416 btrfs_set_item_size(leaf, item, old_size + data_size);
3417 btrfs_mark_buffer_dirty(leaf);
3420 if (btrfs_leaf_free_space(root, leaf) < 0) {
3421 btrfs_print_leaf(root, leaf);
3428 * Given a key and some data, insert items into the tree.
3429 * This does all the path init required, making room in the tree if needed.
3430 * Returns the number of keys that were inserted.
3432 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3433 struct btrfs_root *root,
3434 struct btrfs_path *path,
3435 struct btrfs_key *cpu_key, u32 *data_size,
3438 struct extent_buffer *leaf;
3439 struct btrfs_item *item;
3446 unsigned int data_end;
3447 struct btrfs_disk_key disk_key;
3448 struct btrfs_key found_key;
3450 for (i = 0; i < nr; i++) {
3451 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3452 BTRFS_LEAF_DATA_SIZE(root)) {
3456 total_data += data_size[i];
3457 total_size += data_size[i] + sizeof(struct btrfs_item);
3461 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3467 leaf = path->nodes[0];
3469 nritems = btrfs_header_nritems(leaf);
3470 data_end = leaf_data_end(root, leaf);
3472 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3473 for (i = nr; i >= 0; i--) {
3474 total_data -= data_size[i];
3475 total_size -= data_size[i] + sizeof(struct btrfs_item);
3476 if (total_size < btrfs_leaf_free_space(root, leaf))
3482 slot = path->slots[0];
3485 if (slot != nritems) {
3486 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3488 item = btrfs_item_nr(leaf, slot);
3489 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3491 /* figure out how many keys we can insert in here */
3492 total_data = data_size[0];
3493 for (i = 1; i < nr; i++) {
3494 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
3496 total_data += data_size[i];
3500 if (old_data < data_end) {
3501 btrfs_print_leaf(root, leaf);
3502 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3503 slot, old_data, data_end);
3507 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3509 /* first correct the data pointers */
3510 WARN_ON(leaf->map_token);
3511 for (i = slot; i < nritems; i++) {
3514 item = btrfs_item_nr(leaf, i);
3515 if (!leaf->map_token) {
3516 map_extent_buffer(leaf, (unsigned long)item,
3517 sizeof(struct btrfs_item),
3518 &leaf->map_token, &leaf->kaddr,
3519 &leaf->map_start, &leaf->map_len,
3523 ioff = btrfs_item_offset(leaf, item);
3524 btrfs_set_item_offset(leaf, item, ioff - total_data);
3526 if (leaf->map_token) {
3527 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3528 leaf->map_token = NULL;
3531 /* shift the items */
3532 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3533 btrfs_item_nr_offset(slot),
3534 (nritems - slot) * sizeof(struct btrfs_item));
3536 /* shift the data */
3537 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3538 data_end - total_data, btrfs_leaf_data(leaf) +
3539 data_end, old_data - data_end);
3540 data_end = old_data;
3543 * this sucks but it has to be done, if we are inserting at
3544 * the end of the leaf only insert 1 of the items, since we
3545 * have no way of knowing whats on the next leaf and we'd have
3546 * to drop our current locks to figure it out
3551 /* setup the item for the new data */
3552 for (i = 0; i < nr; i++) {
3553 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3554 btrfs_set_item_key(leaf, &disk_key, slot + i);
3555 item = btrfs_item_nr(leaf, slot + i);
3556 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3557 data_end -= data_size[i];
3558 btrfs_set_item_size(leaf, item, data_size[i]);
3560 btrfs_set_header_nritems(leaf, nritems + nr);
3561 btrfs_mark_buffer_dirty(leaf);
3565 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3566 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3569 if (btrfs_leaf_free_space(root, leaf) < 0) {
3570 btrfs_print_leaf(root, leaf);
3580 * this is a helper for btrfs_insert_empty_items, the main goal here is
3581 * to save stack depth by doing the bulk of the work in a function
3582 * that doesn't call btrfs_search_slot
3584 static noinline_for_stack int
3585 setup_items_for_insert(struct btrfs_trans_handle *trans,
3586 struct btrfs_root *root, struct btrfs_path *path,
3587 struct btrfs_key *cpu_key, u32 *data_size,
3588 u32 total_data, u32 total_size, int nr)
3590 struct btrfs_item *item;
3593 unsigned int data_end;
3594 struct btrfs_disk_key disk_key;
3596 struct extent_buffer *leaf;
3599 leaf = path->nodes[0];
3600 slot = path->slots[0];
3602 nritems = btrfs_header_nritems(leaf);
3603 data_end = leaf_data_end(root, leaf);
3605 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3606 btrfs_print_leaf(root, leaf);
3607 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3608 total_size, btrfs_leaf_free_space(root, leaf));
3612 if (slot != nritems) {
3613 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3615 if (old_data < data_end) {
3616 btrfs_print_leaf(root, leaf);
3617 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3618 slot, old_data, data_end);
3622 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3624 /* first correct the data pointers */
3625 WARN_ON(leaf->map_token);
3626 for (i = slot; i < nritems; i++) {
3629 item = btrfs_item_nr(leaf, i);
3630 if (!leaf->map_token) {
3631 map_extent_buffer(leaf, (unsigned long)item,
3632 sizeof(struct btrfs_item),
3633 &leaf->map_token, &leaf->kaddr,
3634 &leaf->map_start, &leaf->map_len,
3638 ioff = btrfs_item_offset(leaf, item);
3639 btrfs_set_item_offset(leaf, item, ioff - total_data);
3641 if (leaf->map_token) {
3642 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3643 leaf->map_token = NULL;
3646 /* shift the items */
3647 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3648 btrfs_item_nr_offset(slot),
3649 (nritems - slot) * sizeof(struct btrfs_item));
3651 /* shift the data */
3652 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3653 data_end - total_data, btrfs_leaf_data(leaf) +
3654 data_end, old_data - data_end);
3655 data_end = old_data;
3658 /* setup the item for the new data */
3659 for (i = 0; i < nr; i++) {
3660 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3661 btrfs_set_item_key(leaf, &disk_key, slot + i);
3662 item = btrfs_item_nr(leaf, slot + i);
3663 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3664 data_end -= data_size[i];
3665 btrfs_set_item_size(leaf, item, data_size[i]);
3668 btrfs_set_header_nritems(leaf, nritems + nr);
3672 struct btrfs_disk_key disk_key;
3673 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3674 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3676 btrfs_unlock_up_safe(path, 1);
3677 btrfs_mark_buffer_dirty(leaf);
3679 if (btrfs_leaf_free_space(root, leaf) < 0) {
3680 btrfs_print_leaf(root, leaf);
3687 * Given a key and some data, insert items into the tree.
3688 * This does all the path init required, making room in the tree if needed.
3690 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3691 struct btrfs_root *root,
3692 struct btrfs_path *path,
3693 struct btrfs_key *cpu_key, u32 *data_size,
3696 struct extent_buffer *leaf;
3703 for (i = 0; i < nr; i++)
3704 total_data += data_size[i];
3706 total_size = total_data + (nr * sizeof(struct btrfs_item));
3707 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3713 leaf = path->nodes[0];
3714 slot = path->slots[0];
3717 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
3718 total_data, total_size, nr);
3725 * Given a key and some data, insert an item into the tree.
3726 * This does all the path init required, making room in the tree if needed.
3728 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3729 *root, struct btrfs_key *cpu_key, void *data, u32
3733 struct btrfs_path *path;
3734 struct extent_buffer *leaf;
3737 path = btrfs_alloc_path();
3739 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3741 leaf = path->nodes[0];
3742 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3743 write_extent_buffer(leaf, data, ptr, data_size);
3744 btrfs_mark_buffer_dirty(leaf);
3746 btrfs_free_path(path);
3751 * delete the pointer from a given node.
3753 * the tree should have been previously balanced so the deletion does not
3756 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3757 struct btrfs_path *path, int level, int slot)
3759 struct extent_buffer *parent = path->nodes[level];
3764 nritems = btrfs_header_nritems(parent);
3765 if (slot != nritems - 1) {
3766 memmove_extent_buffer(parent,
3767 btrfs_node_key_ptr_offset(slot),
3768 btrfs_node_key_ptr_offset(slot + 1),
3769 sizeof(struct btrfs_key_ptr) *
3770 (nritems - slot - 1));
3773 btrfs_set_header_nritems(parent, nritems);
3774 if (nritems == 0 && parent == root->node) {
3775 BUG_ON(btrfs_header_level(root->node) != 1);
3776 /* just turn the root into a leaf and break */
3777 btrfs_set_header_level(root->node, 0);
3778 } else if (slot == 0) {
3779 struct btrfs_disk_key disk_key;
3781 btrfs_node_key(parent, &disk_key, 0);
3782 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
3786 btrfs_mark_buffer_dirty(parent);
3791 * a helper function to delete the leaf pointed to by path->slots[1] and
3794 * This deletes the pointer in path->nodes[1] and frees the leaf
3795 * block extent. zero is returned if it all worked out, < 0 otherwise.
3797 * The path must have already been setup for deleting the leaf, including
3798 * all the proper balancing. path->nodes[1] must be locked.
3800 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3801 struct btrfs_root *root,
3802 struct btrfs_path *path,
3803 struct extent_buffer *leaf)
3807 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3808 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3813 * btrfs_free_extent is expensive, we want to make sure we
3814 * aren't holding any locks when we call it
3816 btrfs_unlock_up_safe(path, 0);
3818 root_sub_used(root, leaf->len);
3820 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3824 * delete the item at the leaf level in path. If that empties
3825 * the leaf, remove it from the tree
3827 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3828 struct btrfs_path *path, int slot, int nr)
3830 struct extent_buffer *leaf;
3831 struct btrfs_item *item;
3839 leaf = path->nodes[0];
3840 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3842 for (i = 0; i < nr; i++)
3843 dsize += btrfs_item_size_nr(leaf, slot + i);
3845 nritems = btrfs_header_nritems(leaf);
3847 if (slot + nr != nritems) {
3848 int data_end = leaf_data_end(root, leaf);
3850 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3852 btrfs_leaf_data(leaf) + data_end,
3853 last_off - data_end);
3855 for (i = slot + nr; i < nritems; i++) {
3858 item = btrfs_item_nr(leaf, i);
3859 if (!leaf->map_token) {
3860 map_extent_buffer(leaf, (unsigned long)item,
3861 sizeof(struct btrfs_item),
3862 &leaf->map_token, &leaf->kaddr,
3863 &leaf->map_start, &leaf->map_len,
3866 ioff = btrfs_item_offset(leaf, item);
3867 btrfs_set_item_offset(leaf, item, ioff + dsize);
3870 if (leaf->map_token) {
3871 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3872 leaf->map_token = NULL;
3875 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
3876 btrfs_item_nr_offset(slot + nr),
3877 sizeof(struct btrfs_item) *
3878 (nritems - slot - nr));
3880 btrfs_set_header_nritems(leaf, nritems - nr);
3883 /* delete the leaf if we've emptied it */
3885 if (leaf == root->node) {
3886 btrfs_set_header_level(leaf, 0);
3888 btrfs_set_path_blocking(path);
3889 clean_tree_block(trans, root, leaf);
3890 ret = btrfs_del_leaf(trans, root, path, leaf);
3894 int used = leaf_space_used(leaf, 0, nritems);
3896 struct btrfs_disk_key disk_key;
3898 btrfs_item_key(leaf, &disk_key, 0);
3899 wret = fixup_low_keys(trans, root, path,
3905 /* delete the leaf if it is mostly empty */
3906 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
3907 /* push_leaf_left fixes the path.
3908 * make sure the path still points to our leaf
3909 * for possible call to del_ptr below
3911 slot = path->slots[1];
3912 extent_buffer_get(leaf);
3914 btrfs_set_path_blocking(path);
3915 wret = push_leaf_left(trans, root, path, 1, 1);
3916 if (wret < 0 && wret != -ENOSPC)
3919 if (path->nodes[0] == leaf &&
3920 btrfs_header_nritems(leaf)) {
3921 wret = push_leaf_right(trans, root, path, 1, 1);
3922 if (wret < 0 && wret != -ENOSPC)
3926 if (btrfs_header_nritems(leaf) == 0) {
3927 path->slots[1] = slot;
3928 ret = btrfs_del_leaf(trans, root, path, leaf);
3930 free_extent_buffer(leaf);
3932 /* if we're still in the path, make sure
3933 * we're dirty. Otherwise, one of the
3934 * push_leaf functions must have already
3935 * dirtied this buffer
3937 if (path->nodes[0] == leaf)
3938 btrfs_mark_buffer_dirty(leaf);
3939 free_extent_buffer(leaf);
3942 btrfs_mark_buffer_dirty(leaf);
3949 * search the tree again to find a leaf with lesser keys
3950 * returns 0 if it found something or 1 if there are no lesser leaves.
3951 * returns < 0 on io errors.
3953 * This may release the path, and so you may lose any locks held at the
3956 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3958 struct btrfs_key key;
3959 struct btrfs_disk_key found_key;
3962 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
3966 else if (key.type > 0)
3968 else if (key.objectid > 0)
3973 btrfs_release_path(root, path);
3974 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3977 btrfs_item_key(path->nodes[0], &found_key, 0);
3978 ret = comp_keys(&found_key, &key);
3985 * A helper function to walk down the tree starting at min_key, and looking
3986 * for nodes or leaves that are either in cache or have a minimum
3987 * transaction id. This is used by the btree defrag code, and tree logging
3989 * This does not cow, but it does stuff the starting key it finds back
3990 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3991 * key and get a writable path.
3993 * This does lock as it descends, and path->keep_locks should be set
3994 * to 1 by the caller.
3996 * This honors path->lowest_level to prevent descent past a given level
3999 * min_trans indicates the oldest transaction that you are interested
4000 * in walking through. Any nodes or leaves older than min_trans are
4001 * skipped over (without reading them).
4003 * returns zero if something useful was found, < 0 on error and 1 if there
4004 * was nothing in the tree that matched the search criteria.
4006 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4007 struct btrfs_key *max_key,
4008 struct btrfs_path *path, int cache_only,
4011 struct extent_buffer *cur;
4012 struct btrfs_key found_key;
4019 WARN_ON(!path->keep_locks);
4021 cur = btrfs_lock_root_node(root);
4022 level = btrfs_header_level(cur);
4023 WARN_ON(path->nodes[level]);
4024 path->nodes[level] = cur;
4025 path->locks[level] = 1;
4027 if (btrfs_header_generation(cur) < min_trans) {
4032 nritems = btrfs_header_nritems(cur);
4033 level = btrfs_header_level(cur);
4034 sret = bin_search(cur, min_key, level, &slot);
4036 /* at the lowest level, we're done, setup the path and exit */
4037 if (level == path->lowest_level) {
4038 if (slot >= nritems)
4041 path->slots[level] = slot;
4042 btrfs_item_key_to_cpu(cur, &found_key, slot);
4045 if (sret && slot > 0)
4048 * check this node pointer against the cache_only and
4049 * min_trans parameters. If it isn't in cache or is too
4050 * old, skip to the next one.
4052 while (slot < nritems) {
4055 struct extent_buffer *tmp;
4056 struct btrfs_disk_key disk_key;
4058 blockptr = btrfs_node_blockptr(cur, slot);
4059 gen = btrfs_node_ptr_generation(cur, slot);
4060 if (gen < min_trans) {
4068 btrfs_node_key(cur, &disk_key, slot);
4069 if (comp_keys(&disk_key, max_key) >= 0) {
4075 tmp = btrfs_find_tree_block(root, blockptr,
4076 btrfs_level_size(root, level - 1));
4078 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
4079 free_extent_buffer(tmp);
4083 free_extent_buffer(tmp);
4088 * we didn't find a candidate key in this node, walk forward
4089 * and find another one
4091 if (slot >= nritems) {
4092 path->slots[level] = slot;
4093 btrfs_set_path_blocking(path);
4094 sret = btrfs_find_next_key(root, path, min_key, level,
4095 cache_only, min_trans);
4097 btrfs_release_path(root, path);
4103 /* save our key for returning back */
4104 btrfs_node_key_to_cpu(cur, &found_key, slot);
4105 path->slots[level] = slot;
4106 if (level == path->lowest_level) {
4108 unlock_up(path, level, 1);
4111 btrfs_set_path_blocking(path);
4112 cur = read_node_slot(root, cur, slot);
4114 btrfs_tree_lock(cur);
4116 path->locks[level - 1] = 1;
4117 path->nodes[level - 1] = cur;
4118 unlock_up(path, level, 1);
4119 btrfs_clear_path_blocking(path, NULL);
4123 memcpy(min_key, &found_key, sizeof(found_key));
4124 btrfs_set_path_blocking(path);
4129 * this is similar to btrfs_next_leaf, but does not try to preserve
4130 * and fixup the path. It looks for and returns the next key in the
4131 * tree based on the current path and the cache_only and min_trans
4134 * 0 is returned if another key is found, < 0 if there are any errors
4135 * and 1 is returned if there are no higher keys in the tree
4137 * path->keep_locks should be set to 1 on the search made before
4138 * calling this function.
4140 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4141 struct btrfs_key *key, int level,
4142 int cache_only, u64 min_trans)
4145 struct extent_buffer *c;
4147 WARN_ON(!path->keep_locks);
4148 while (level < BTRFS_MAX_LEVEL) {
4149 if (!path->nodes[level])
4152 slot = path->slots[level] + 1;
4153 c = path->nodes[level];
4155 if (slot >= btrfs_header_nritems(c)) {
4158 struct btrfs_key cur_key;
4159 if (level + 1 >= BTRFS_MAX_LEVEL ||
4160 !path->nodes[level + 1])
4163 if (path->locks[level + 1]) {
4168 slot = btrfs_header_nritems(c) - 1;
4170 btrfs_item_key_to_cpu(c, &cur_key, slot);
4172 btrfs_node_key_to_cpu(c, &cur_key, slot);
4174 orig_lowest = path->lowest_level;
4175 btrfs_release_path(root, path);
4176 path->lowest_level = level;
4177 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4179 path->lowest_level = orig_lowest;
4183 c = path->nodes[level];
4184 slot = path->slots[level];
4191 btrfs_item_key_to_cpu(c, key, slot);
4193 u64 blockptr = btrfs_node_blockptr(c, slot);
4194 u64 gen = btrfs_node_ptr_generation(c, slot);
4197 struct extent_buffer *cur;
4198 cur = btrfs_find_tree_block(root, blockptr,
4199 btrfs_level_size(root, level - 1));
4200 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
4203 free_extent_buffer(cur);
4206 free_extent_buffer(cur);
4208 if (gen < min_trans) {
4212 btrfs_node_key_to_cpu(c, key, slot);
4220 * search the tree again to find a leaf with greater keys
4221 * returns 0 if it found something or 1 if there are no greater leaves.
4222 * returns < 0 on io errors.
4224 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4228 struct extent_buffer *c;
4229 struct extent_buffer *next;
4230 struct btrfs_key key;
4233 int old_spinning = path->leave_spinning;
4234 int force_blocking = 0;
4236 nritems = btrfs_header_nritems(path->nodes[0]);
4241 * we take the blocks in an order that upsets lockdep. Using
4242 * blocking mode is the only way around it.
4244 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4248 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4252 btrfs_release_path(root, path);
4254 path->keep_locks = 1;
4256 if (!force_blocking)
4257 path->leave_spinning = 1;
4259 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4260 path->keep_locks = 0;
4265 nritems = btrfs_header_nritems(path->nodes[0]);
4267 * by releasing the path above we dropped all our locks. A balance
4268 * could have added more items next to the key that used to be
4269 * at the very end of the block. So, check again here and
4270 * advance the path if there are now more items available.
4272 if (nritems > 0 && path->slots[0] < nritems - 1) {
4279 while (level < BTRFS_MAX_LEVEL) {
4280 if (!path->nodes[level]) {
4285 slot = path->slots[level] + 1;
4286 c = path->nodes[level];
4287 if (slot >= btrfs_header_nritems(c)) {
4289 if (level == BTRFS_MAX_LEVEL) {
4297 btrfs_tree_unlock(next);
4298 free_extent_buffer(next);
4302 ret = read_block_for_search(NULL, root, path, &next, level,
4308 btrfs_release_path(root, path);
4312 if (!path->skip_locking) {
4313 ret = btrfs_try_spin_lock(next);
4315 btrfs_set_path_blocking(path);
4316 btrfs_tree_lock(next);
4317 if (!force_blocking)
4318 btrfs_clear_path_blocking(path, next);
4321 btrfs_set_lock_blocking(next);
4325 path->slots[level] = slot;
4328 c = path->nodes[level];
4329 if (path->locks[level])
4330 btrfs_tree_unlock(c);
4332 free_extent_buffer(c);
4333 path->nodes[level] = next;
4334 path->slots[level] = 0;
4335 if (!path->skip_locking)
4336 path->locks[level] = 1;
4341 ret = read_block_for_search(NULL, root, path, &next, level,
4347 btrfs_release_path(root, path);
4351 if (!path->skip_locking) {
4352 btrfs_assert_tree_locked(path->nodes[level]);
4353 ret = btrfs_try_spin_lock(next);
4355 btrfs_set_path_blocking(path);
4356 btrfs_tree_lock(next);
4357 if (!force_blocking)
4358 btrfs_clear_path_blocking(path, next);
4361 btrfs_set_lock_blocking(next);
4366 unlock_up(path, 0, 1);
4367 path->leave_spinning = old_spinning;
4369 btrfs_set_path_blocking(path);
4375 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4376 * searching until it gets past min_objectid or finds an item of 'type'
4378 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4380 int btrfs_previous_item(struct btrfs_root *root,
4381 struct btrfs_path *path, u64 min_objectid,
4384 struct btrfs_key found_key;
4385 struct extent_buffer *leaf;
4390 if (path->slots[0] == 0) {
4391 btrfs_set_path_blocking(path);
4392 ret = btrfs_prev_leaf(root, path);
4398 leaf = path->nodes[0];
4399 nritems = btrfs_header_nritems(leaf);
4402 if (path->slots[0] == nritems)
4405 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4406 if (found_key.objectid < min_objectid)
4408 if (found_key.type == type)
4410 if (found_key.objectid == min_objectid &&
4411 found_key.type < type)