2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
22 #include "transaction.h"
23 #include "print-tree.h"
26 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
27 *root, struct btrfs_path *path, int level);
28 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_key *ins_key,
30 struct btrfs_path *path, int data_size, int extend);
31 static int push_node_left(struct btrfs_trans_handle *trans,
32 struct btrfs_root *root, struct extent_buffer *dst,
33 struct extent_buffer *src, int empty);
34 static int balance_node_right(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root,
36 struct extent_buffer *dst_buf,
37 struct extent_buffer *src_buf);
38 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
39 struct btrfs_path *path, int level, int slot);
41 struct btrfs_path *btrfs_alloc_path(void)
43 struct btrfs_path *path;
44 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
51 * set all locked nodes in the path to blocking locks. This should
52 * be done before scheduling
54 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
57 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
58 if (p->nodes[i] && p->locks[i])
59 btrfs_set_lock_blocking(p->nodes[i]);
64 * reset all the locked nodes in the patch to spinning locks.
66 * held is used to keep lockdep happy, when lockdep is enabled
67 * we set held to a blocking lock before we go around and
68 * retake all the spinlocks in the path. You can safely use NULL
71 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
72 struct extent_buffer *held)
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /* lockdep really cares that we take all of these spinlocks
78 * in the right order. If any of the locks in the path are not
79 * currently blocking, it is going to complain. So, make really
80 * really sure by forcing the path to blocking before we clear
84 btrfs_set_lock_blocking(held);
85 btrfs_set_path_blocking(p);
88 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
89 if (p->nodes[i] && p->locks[i])
90 btrfs_clear_lock_blocking(p->nodes[i]);
93 #ifdef CONFIG_DEBUG_LOCK_ALLOC
95 btrfs_clear_lock_blocking(held);
99 /* this also releases the path */
100 void btrfs_free_path(struct btrfs_path *p)
102 btrfs_release_path(NULL, p);
103 kmem_cache_free(btrfs_path_cachep, p);
107 * path release drops references on the extent buffers in the path
108 * and it drops any locks held by this path
110 * It is safe to call this on paths that no locks or extent buffers held.
112 noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
116 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
121 btrfs_tree_unlock(p->nodes[i]);
124 free_extent_buffer(p->nodes[i]);
130 * safely gets a reference on the root node of a tree. A lock
131 * is not taken, so a concurrent writer may put a different node
132 * at the root of the tree. See btrfs_lock_root_node for the
135 * The extent buffer returned by this has a reference taken, so
136 * it won't disappear. It may stop being the root of the tree
137 * at any time because there are no locks held.
139 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
141 struct extent_buffer *eb;
142 spin_lock(&root->node_lock);
144 extent_buffer_get(eb);
145 spin_unlock(&root->node_lock);
149 /* loop around taking references on and locking the root node of the
150 * tree until you end up with a lock on the root. A locked buffer
151 * is returned, with a reference held.
153 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
155 struct extent_buffer *eb;
158 eb = btrfs_root_node(root);
161 spin_lock(&root->node_lock);
162 if (eb == root->node) {
163 spin_unlock(&root->node_lock);
166 spin_unlock(&root->node_lock);
168 btrfs_tree_unlock(eb);
169 free_extent_buffer(eb);
174 /* cowonly root (everything not a reference counted cow subvolume), just get
175 * put onto a simple dirty list. transaction.c walks this to make sure they
176 * get properly updated on disk.
178 static void add_root_to_dirty_list(struct btrfs_root *root)
180 if (root->track_dirty && list_empty(&root->dirty_list)) {
181 list_add(&root->dirty_list,
182 &root->fs_info->dirty_cowonly_roots);
187 * used by snapshot creation to make a copy of a root for a tree with
188 * a given objectid. The buffer with the new root node is returned in
189 * cow_ret, and this func returns zero on success or a negative error code.
191 int btrfs_copy_root(struct btrfs_trans_handle *trans,
192 struct btrfs_root *root,
193 struct extent_buffer *buf,
194 struct extent_buffer **cow_ret, u64 new_root_objectid)
196 struct extent_buffer *cow;
200 struct btrfs_root *new_root;
202 new_root = kmalloc(sizeof(*new_root), GFP_NOFS);
206 memcpy(new_root, root, sizeof(*new_root));
207 new_root->root_key.objectid = new_root_objectid;
209 WARN_ON(root->ref_cows && trans->transid !=
210 root->fs_info->running_transaction->transid);
211 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
213 level = btrfs_header_level(buf);
214 nritems = btrfs_header_nritems(buf);
216 cow = btrfs_alloc_free_block(trans, new_root, buf->len, 0,
217 new_root_objectid, trans->transid,
218 level, buf->start, 0);
224 copy_extent_buffer(cow, buf, 0, 0, cow->len);
225 btrfs_set_header_bytenr(cow, cow->start);
226 btrfs_set_header_generation(cow, trans->transid);
227 btrfs_set_header_owner(cow, new_root_objectid);
228 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN);
230 write_extent_buffer(cow, root->fs_info->fsid,
231 (unsigned long)btrfs_header_fsid(cow),
234 WARN_ON(btrfs_header_generation(buf) > trans->transid);
235 ret = btrfs_inc_ref(trans, new_root, buf, cow, NULL);
241 btrfs_mark_buffer_dirty(cow);
247 * does the dirty work in cow of a single block. The parent block (if
248 * supplied) is updated to point to the new cow copy. The new buffer is marked
249 * dirty and returned locked. If you modify the block it needs to be marked
252 * search_start -- an allocation hint for the new block
254 * empty_size -- a hint that you plan on doing more cow. This is the size in
255 * bytes the allocator should try to find free next to the block it returns.
256 * This is just a hint and may be ignored by the allocator.
258 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
259 struct btrfs_root *root,
260 struct extent_buffer *buf,
261 struct extent_buffer *parent, int parent_slot,
262 struct extent_buffer **cow_ret,
263 u64 search_start, u64 empty_size)
266 struct extent_buffer *cow;
275 btrfs_assert_tree_locked(buf);
278 parent_start = parent->start;
282 WARN_ON(root->ref_cows && trans->transid !=
283 root->fs_info->running_transaction->transid);
284 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
286 level = btrfs_header_level(buf);
287 nritems = btrfs_header_nritems(buf);
289 cow = btrfs_alloc_free_block(trans, root, buf->len,
290 parent_start, root->root_key.objectid,
291 trans->transid, level,
292 search_start, empty_size);
296 /* cow is set to blocking by btrfs_init_new_buffer */
298 copy_extent_buffer(cow, buf, 0, 0, cow->len);
299 btrfs_set_header_bytenr(cow, cow->start);
300 btrfs_set_header_generation(cow, trans->transid);
301 btrfs_set_header_owner(cow, root->root_key.objectid);
302 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN);
304 write_extent_buffer(cow, root->fs_info->fsid,
305 (unsigned long)btrfs_header_fsid(cow),
308 WARN_ON(btrfs_header_generation(buf) > trans->transid);
309 if (btrfs_header_generation(buf) != trans->transid) {
311 ret = btrfs_inc_ref(trans, root, buf, cow, &nr_extents);
315 ret = btrfs_cache_ref(trans, root, buf, nr_extents);
317 } else if (btrfs_header_owner(buf) == BTRFS_TREE_RELOC_OBJECTID) {
319 * There are only two places that can drop reference to
320 * tree blocks owned by living reloc trees, one is here,
321 * the other place is btrfs_drop_subtree. In both places,
322 * we check reference count while tree block is locked.
323 * Furthermore, if reference count is one, it won't get
324 * increased by someone else.
327 ret = btrfs_lookup_extent_ref(trans, root, buf->start,
331 ret = btrfs_update_ref(trans, root, buf, cow,
333 clean_tree_block(trans, root, buf);
335 ret = btrfs_inc_ref(trans, root, buf, cow, NULL);
339 ret = btrfs_update_ref(trans, root, buf, cow, 0, nritems);
342 clean_tree_block(trans, root, buf);
345 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
346 ret = btrfs_reloc_tree_cache_ref(trans, root, cow, buf->start);
350 if (buf == root->node) {
351 WARN_ON(parent && parent != buf);
353 spin_lock(&root->node_lock);
355 extent_buffer_get(cow);
356 spin_unlock(&root->node_lock);
358 if (buf != root->commit_root) {
359 btrfs_free_extent(trans, root, buf->start,
360 buf->len, buf->start,
361 root->root_key.objectid,
362 btrfs_header_generation(buf),
365 free_extent_buffer(buf);
366 add_root_to_dirty_list(root);
368 btrfs_set_node_blockptr(parent, parent_slot,
370 WARN_ON(trans->transid == 0);
371 btrfs_set_node_ptr_generation(parent, parent_slot,
373 btrfs_mark_buffer_dirty(parent);
374 WARN_ON(btrfs_header_generation(parent) != trans->transid);
375 btrfs_free_extent(trans, root, buf->start, buf->len,
376 parent_start, btrfs_header_owner(parent),
377 btrfs_header_generation(parent), level, 1);
380 btrfs_tree_unlock(buf);
381 free_extent_buffer(buf);
382 btrfs_mark_buffer_dirty(cow);
388 * cows a single block, see __btrfs_cow_block for the real work.
389 * This version of it has extra checks so that a block isn't cow'd more than
390 * once per transaction, as long as it hasn't been written yet
392 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
393 struct btrfs_root *root, struct extent_buffer *buf,
394 struct extent_buffer *parent, int parent_slot,
395 struct extent_buffer **cow_ret)
400 if (trans->transaction != root->fs_info->running_transaction) {
401 printk(KERN_CRIT "trans %llu running %llu\n",
402 (unsigned long long)trans->transid,
404 root->fs_info->running_transaction->transid);
407 if (trans->transid != root->fs_info->generation) {
408 printk(KERN_CRIT "trans %llu running %llu\n",
409 (unsigned long long)trans->transid,
410 (unsigned long long)root->fs_info->generation);
414 if (btrfs_header_generation(buf) == trans->transid &&
415 btrfs_header_owner(buf) == root->root_key.objectid &&
416 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
421 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
424 btrfs_set_lock_blocking(parent);
425 btrfs_set_lock_blocking(buf);
427 ret = __btrfs_cow_block(trans, root, buf, parent,
428 parent_slot, cow_ret, search_start, 0);
433 * helper function for defrag to decide if two blocks pointed to by a
434 * node are actually close by
436 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
438 if (blocknr < other && other - (blocknr + blocksize) < 32768)
440 if (blocknr > other && blocknr - (other + blocksize) < 32768)
446 * compare two keys in a memcmp fashion
448 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
452 btrfs_disk_key_to_cpu(&k1, disk);
454 if (k1.objectid > k2->objectid)
456 if (k1.objectid < k2->objectid)
458 if (k1.type > k2->type)
460 if (k1.type < k2->type)
462 if (k1.offset > k2->offset)
464 if (k1.offset < k2->offset)
470 * same as comp_keys only with two btrfs_key's
472 static int comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
474 if (k1->objectid > k2->objectid)
476 if (k1->objectid < k2->objectid)
478 if (k1->type > k2->type)
480 if (k1->type < k2->type)
482 if (k1->offset > k2->offset)
484 if (k1->offset < k2->offset)
490 * this is used by the defrag code to go through all the
491 * leaves pointed to by a node and reallocate them so that
492 * disk order is close to key order
494 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
495 struct btrfs_root *root, struct extent_buffer *parent,
496 int start_slot, int cache_only, u64 *last_ret,
497 struct btrfs_key *progress)
499 struct extent_buffer *cur;
502 u64 search_start = *last_ret;
512 int progress_passed = 0;
513 struct btrfs_disk_key disk_key;
515 parent_level = btrfs_header_level(parent);
516 if (cache_only && parent_level != 1)
519 if (trans->transaction != root->fs_info->running_transaction)
521 if (trans->transid != root->fs_info->generation)
524 parent_nritems = btrfs_header_nritems(parent);
525 blocksize = btrfs_level_size(root, parent_level - 1);
526 end_slot = parent_nritems;
528 if (parent_nritems == 1)
531 btrfs_set_lock_blocking(parent);
533 for (i = start_slot; i < end_slot; i++) {
536 if (!parent->map_token) {
537 map_extent_buffer(parent,
538 btrfs_node_key_ptr_offset(i),
539 sizeof(struct btrfs_key_ptr),
540 &parent->map_token, &parent->kaddr,
541 &parent->map_start, &parent->map_len,
544 btrfs_node_key(parent, &disk_key, i);
545 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
549 blocknr = btrfs_node_blockptr(parent, i);
550 gen = btrfs_node_ptr_generation(parent, i);
552 last_block = blocknr;
555 other = btrfs_node_blockptr(parent, i - 1);
556 close = close_blocks(blocknr, other, blocksize);
558 if (!close && i < end_slot - 2) {
559 other = btrfs_node_blockptr(parent, i + 1);
560 close = close_blocks(blocknr, other, blocksize);
563 last_block = blocknr;
566 if (parent->map_token) {
567 unmap_extent_buffer(parent, parent->map_token,
569 parent->map_token = NULL;
572 cur = btrfs_find_tree_block(root, blocknr, blocksize);
574 uptodate = btrfs_buffer_uptodate(cur, gen);
577 if (!cur || !uptodate) {
579 free_extent_buffer(cur);
583 cur = read_tree_block(root, blocknr,
585 } else if (!uptodate) {
586 btrfs_read_buffer(cur, gen);
589 if (search_start == 0)
590 search_start = last_block;
592 btrfs_tree_lock(cur);
593 btrfs_set_lock_blocking(cur);
594 err = __btrfs_cow_block(trans, root, cur, parent, i,
597 (end_slot - i) * blocksize));
599 btrfs_tree_unlock(cur);
600 free_extent_buffer(cur);
603 search_start = cur->start;
604 last_block = cur->start;
605 *last_ret = search_start;
606 btrfs_tree_unlock(cur);
607 free_extent_buffer(cur);
609 if (parent->map_token) {
610 unmap_extent_buffer(parent, parent->map_token,
612 parent->map_token = NULL;
618 * The leaf data grows from end-to-front in the node.
619 * this returns the address of the start of the last item,
620 * which is the stop of the leaf data stack
622 static inline unsigned int leaf_data_end(struct btrfs_root *root,
623 struct extent_buffer *leaf)
625 u32 nr = btrfs_header_nritems(leaf);
627 return BTRFS_LEAF_DATA_SIZE(root);
628 return btrfs_item_offset_nr(leaf, nr - 1);
632 * extra debugging checks to make sure all the items in a key are
633 * well formed and in the proper order
635 static int check_node(struct btrfs_root *root, struct btrfs_path *path,
638 struct extent_buffer *parent = NULL;
639 struct extent_buffer *node = path->nodes[level];
640 struct btrfs_disk_key parent_key;
641 struct btrfs_disk_key node_key;
644 struct btrfs_key cpukey;
645 u32 nritems = btrfs_header_nritems(node);
647 if (path->nodes[level + 1])
648 parent = path->nodes[level + 1];
650 slot = path->slots[level];
651 BUG_ON(nritems == 0);
653 parent_slot = path->slots[level + 1];
654 btrfs_node_key(parent, &parent_key, parent_slot);
655 btrfs_node_key(node, &node_key, 0);
656 BUG_ON(memcmp(&parent_key, &node_key,
657 sizeof(struct btrfs_disk_key)));
658 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
659 btrfs_header_bytenr(node));
661 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
663 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
664 btrfs_node_key(node, &node_key, slot);
665 BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
667 if (slot < nritems - 1) {
668 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
669 btrfs_node_key(node, &node_key, slot);
670 BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
676 * extra checking to make sure all the items in a leaf are
677 * well formed and in the proper order
679 static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
682 struct extent_buffer *leaf = path->nodes[level];
683 struct extent_buffer *parent = NULL;
685 struct btrfs_key cpukey;
686 struct btrfs_disk_key parent_key;
687 struct btrfs_disk_key leaf_key;
688 int slot = path->slots[0];
690 u32 nritems = btrfs_header_nritems(leaf);
692 if (path->nodes[level + 1])
693 parent = path->nodes[level + 1];
699 parent_slot = path->slots[level + 1];
700 btrfs_node_key(parent, &parent_key, parent_slot);
701 btrfs_item_key(leaf, &leaf_key, 0);
703 BUG_ON(memcmp(&parent_key, &leaf_key,
704 sizeof(struct btrfs_disk_key)));
705 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
706 btrfs_header_bytenr(leaf));
708 if (slot != 0 && slot < nritems - 1) {
709 btrfs_item_key(leaf, &leaf_key, slot);
710 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
711 if (comp_keys(&leaf_key, &cpukey) <= 0) {
712 btrfs_print_leaf(root, leaf);
713 printk(KERN_CRIT "slot %d offset bad key\n", slot);
716 if (btrfs_item_offset_nr(leaf, slot - 1) !=
717 btrfs_item_end_nr(leaf, slot)) {
718 btrfs_print_leaf(root, leaf);
719 printk(KERN_CRIT "slot %d offset bad\n", slot);
723 if (slot < nritems - 1) {
724 btrfs_item_key(leaf, &leaf_key, slot);
725 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
726 BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
727 if (btrfs_item_offset_nr(leaf, slot) !=
728 btrfs_item_end_nr(leaf, slot + 1)) {
729 btrfs_print_leaf(root, leaf);
730 printk(KERN_CRIT "slot %d offset bad\n", slot);
734 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
735 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
739 static noinline int check_block(struct btrfs_root *root,
740 struct btrfs_path *path, int level)
744 return check_leaf(root, path, level);
745 return check_node(root, path, level);
749 * search for key in the extent_buffer. The items start at offset p,
750 * and they are item_size apart. There are 'max' items in p.
752 * the slot in the array is returned via slot, and it points to
753 * the place where you would insert key if it is not found in
756 * slot may point to max if the key is bigger than all of the keys
758 static noinline int generic_bin_search(struct extent_buffer *eb,
760 int item_size, struct btrfs_key *key,
767 struct btrfs_disk_key *tmp = NULL;
768 struct btrfs_disk_key unaligned;
769 unsigned long offset;
770 char *map_token = NULL;
772 unsigned long map_start = 0;
773 unsigned long map_len = 0;
777 mid = (low + high) / 2;
778 offset = p + mid * item_size;
780 if (!map_token || offset < map_start ||
781 (offset + sizeof(struct btrfs_disk_key)) >
782 map_start + map_len) {
784 unmap_extent_buffer(eb, map_token, KM_USER0);
788 err = map_private_extent_buffer(eb, offset,
789 sizeof(struct btrfs_disk_key),
791 &map_start, &map_len, KM_USER0);
794 tmp = (struct btrfs_disk_key *)(kaddr + offset -
797 read_extent_buffer(eb, &unaligned,
798 offset, sizeof(unaligned));
803 tmp = (struct btrfs_disk_key *)(kaddr + offset -
806 ret = comp_keys(tmp, key);
815 unmap_extent_buffer(eb, map_token, KM_USER0);
821 unmap_extent_buffer(eb, map_token, KM_USER0);
826 * simple bin_search frontend that does the right thing for
829 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
830 int level, int *slot)
833 return generic_bin_search(eb,
834 offsetof(struct btrfs_leaf, items),
835 sizeof(struct btrfs_item),
836 key, btrfs_header_nritems(eb),
839 return generic_bin_search(eb,
840 offsetof(struct btrfs_node, ptrs),
841 sizeof(struct btrfs_key_ptr),
842 key, btrfs_header_nritems(eb),
848 /* given a node and slot number, this reads the blocks it points to. The
849 * extent buffer is returned with a reference taken (but unlocked).
850 * NULL is returned on error.
852 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
853 struct extent_buffer *parent, int slot)
855 int level = btrfs_header_level(parent);
858 if (slot >= btrfs_header_nritems(parent))
863 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
864 btrfs_level_size(root, level - 1),
865 btrfs_node_ptr_generation(parent, slot));
869 * node level balancing, used to make sure nodes are in proper order for
870 * item deletion. We balance from the top down, so we have to make sure
871 * that a deletion won't leave an node completely empty later on.
873 static noinline int balance_level(struct btrfs_trans_handle *trans,
874 struct btrfs_root *root,
875 struct btrfs_path *path, int level)
877 struct extent_buffer *right = NULL;
878 struct extent_buffer *mid;
879 struct extent_buffer *left = NULL;
880 struct extent_buffer *parent = NULL;
884 int orig_slot = path->slots[level];
885 int err_on_enospc = 0;
891 mid = path->nodes[level];
893 WARN_ON(!path->locks[level]);
894 WARN_ON(btrfs_header_generation(mid) != trans->transid);
896 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
898 if (level < BTRFS_MAX_LEVEL - 1)
899 parent = path->nodes[level + 1];
900 pslot = path->slots[level + 1];
903 * deal with the case where there is only one pointer in the root
904 * by promoting the node below to a root
907 struct extent_buffer *child;
909 if (btrfs_header_nritems(mid) != 1)
912 /* promote the child to a root */
913 child = read_node_slot(root, mid, 0);
915 btrfs_tree_lock(child);
916 btrfs_set_lock_blocking(child);
917 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
920 spin_lock(&root->node_lock);
922 spin_unlock(&root->node_lock);
924 ret = btrfs_update_extent_ref(trans, root, child->start,
926 mid->start, child->start,
927 root->root_key.objectid,
928 trans->transid, level - 1);
931 add_root_to_dirty_list(root);
932 btrfs_tree_unlock(child);
934 path->locks[level] = 0;
935 path->nodes[level] = NULL;
936 clean_tree_block(trans, root, mid);
937 btrfs_tree_unlock(mid);
938 /* once for the path */
939 free_extent_buffer(mid);
940 ret = btrfs_free_extent(trans, root, mid->start, mid->len,
941 mid->start, root->root_key.objectid,
942 btrfs_header_generation(mid),
944 /* once for the root ptr */
945 free_extent_buffer(mid);
948 if (btrfs_header_nritems(mid) >
949 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
952 if (btrfs_header_nritems(mid) < 2)
955 left = read_node_slot(root, parent, pslot - 1);
957 btrfs_tree_lock(left);
958 btrfs_set_lock_blocking(left);
959 wret = btrfs_cow_block(trans, root, left,
960 parent, pslot - 1, &left);
966 right = read_node_slot(root, parent, pslot + 1);
968 btrfs_tree_lock(right);
969 btrfs_set_lock_blocking(right);
970 wret = btrfs_cow_block(trans, root, right,
971 parent, pslot + 1, &right);
978 /* first, try to make some room in the middle buffer */
980 orig_slot += btrfs_header_nritems(left);
981 wret = push_node_left(trans, root, left, mid, 1);
984 if (btrfs_header_nritems(mid) < 2)
989 * then try to empty the right most buffer into the middle
992 wret = push_node_left(trans, root, mid, right, 1);
993 if (wret < 0 && wret != -ENOSPC)
995 if (btrfs_header_nritems(right) == 0) {
996 u64 bytenr = right->start;
997 u64 generation = btrfs_header_generation(parent);
998 u32 blocksize = right->len;
1000 clean_tree_block(trans, root, right);
1001 btrfs_tree_unlock(right);
1002 free_extent_buffer(right);
1004 wret = del_ptr(trans, root, path, level + 1, pslot +
1008 wret = btrfs_free_extent(trans, root, bytenr,
1009 blocksize, parent->start,
1010 btrfs_header_owner(parent),
1011 generation, level, 1);
1015 struct btrfs_disk_key right_key;
1016 btrfs_node_key(right, &right_key, 0);
1017 btrfs_set_node_key(parent, &right_key, pslot + 1);
1018 btrfs_mark_buffer_dirty(parent);
1021 if (btrfs_header_nritems(mid) == 1) {
1023 * we're not allowed to leave a node with one item in the
1024 * tree during a delete. A deletion from lower in the tree
1025 * could try to delete the only pointer in this node.
1026 * So, pull some keys from the left.
1027 * There has to be a left pointer at this point because
1028 * otherwise we would have pulled some pointers from the
1032 wret = balance_node_right(trans, root, mid, left);
1038 wret = push_node_left(trans, root, left, mid, 1);
1044 if (btrfs_header_nritems(mid) == 0) {
1045 /* we've managed to empty the middle node, drop it */
1046 u64 root_gen = btrfs_header_generation(parent);
1047 u64 bytenr = mid->start;
1048 u32 blocksize = mid->len;
1050 clean_tree_block(trans, root, mid);
1051 btrfs_tree_unlock(mid);
1052 free_extent_buffer(mid);
1054 wret = del_ptr(trans, root, path, level + 1, pslot);
1057 wret = btrfs_free_extent(trans, root, bytenr, blocksize,
1059 btrfs_header_owner(parent),
1060 root_gen, level, 1);
1064 /* update the parent key to reflect our changes */
1065 struct btrfs_disk_key mid_key;
1066 btrfs_node_key(mid, &mid_key, 0);
1067 btrfs_set_node_key(parent, &mid_key, pslot);
1068 btrfs_mark_buffer_dirty(parent);
1071 /* update the path */
1073 if (btrfs_header_nritems(left) > orig_slot) {
1074 extent_buffer_get(left);
1075 /* left was locked after cow */
1076 path->nodes[level] = left;
1077 path->slots[level + 1] -= 1;
1078 path->slots[level] = orig_slot;
1080 btrfs_tree_unlock(mid);
1081 free_extent_buffer(mid);
1084 orig_slot -= btrfs_header_nritems(left);
1085 path->slots[level] = orig_slot;
1088 /* double check we haven't messed things up */
1089 check_block(root, path, level);
1091 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1095 btrfs_tree_unlock(right);
1096 free_extent_buffer(right);
1099 if (path->nodes[level] != left)
1100 btrfs_tree_unlock(left);
1101 free_extent_buffer(left);
1106 /* Node balancing for insertion. Here we only split or push nodes around
1107 * when they are completely full. This is also done top down, so we
1108 * have to be pessimistic.
1110 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1111 struct btrfs_root *root,
1112 struct btrfs_path *path, int level)
1114 struct extent_buffer *right = NULL;
1115 struct extent_buffer *mid;
1116 struct extent_buffer *left = NULL;
1117 struct extent_buffer *parent = NULL;
1121 int orig_slot = path->slots[level];
1127 mid = path->nodes[level];
1128 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1129 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1131 if (level < BTRFS_MAX_LEVEL - 1)
1132 parent = path->nodes[level + 1];
1133 pslot = path->slots[level + 1];
1138 left = read_node_slot(root, parent, pslot - 1);
1140 /* first, try to make some room in the middle buffer */
1144 btrfs_tree_lock(left);
1145 btrfs_set_lock_blocking(left);
1147 left_nr = btrfs_header_nritems(left);
1148 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1151 ret = btrfs_cow_block(trans, root, left, parent,
1156 wret = push_node_left(trans, root,
1163 struct btrfs_disk_key disk_key;
1164 orig_slot += left_nr;
1165 btrfs_node_key(mid, &disk_key, 0);
1166 btrfs_set_node_key(parent, &disk_key, pslot);
1167 btrfs_mark_buffer_dirty(parent);
1168 if (btrfs_header_nritems(left) > orig_slot) {
1169 path->nodes[level] = left;
1170 path->slots[level + 1] -= 1;
1171 path->slots[level] = orig_slot;
1172 btrfs_tree_unlock(mid);
1173 free_extent_buffer(mid);
1176 btrfs_header_nritems(left);
1177 path->slots[level] = orig_slot;
1178 btrfs_tree_unlock(left);
1179 free_extent_buffer(left);
1183 btrfs_tree_unlock(left);
1184 free_extent_buffer(left);
1186 right = read_node_slot(root, parent, pslot + 1);
1189 * then try to empty the right most buffer into the middle
1194 btrfs_tree_lock(right);
1195 btrfs_set_lock_blocking(right);
1197 right_nr = btrfs_header_nritems(right);
1198 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1201 ret = btrfs_cow_block(trans, root, right,
1207 wret = balance_node_right(trans, root,
1214 struct btrfs_disk_key disk_key;
1216 btrfs_node_key(right, &disk_key, 0);
1217 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1218 btrfs_mark_buffer_dirty(parent);
1220 if (btrfs_header_nritems(mid) <= orig_slot) {
1221 path->nodes[level] = right;
1222 path->slots[level + 1] += 1;
1223 path->slots[level] = orig_slot -
1224 btrfs_header_nritems(mid);
1225 btrfs_tree_unlock(mid);
1226 free_extent_buffer(mid);
1228 btrfs_tree_unlock(right);
1229 free_extent_buffer(right);
1233 btrfs_tree_unlock(right);
1234 free_extent_buffer(right);
1240 * readahead one full node of leaves, finding things that are close
1241 * to the block in 'slot', and triggering ra on them.
1243 static noinline void reada_for_search(struct btrfs_root *root,
1244 struct btrfs_path *path,
1245 int level, int slot, u64 objectid)
1247 struct extent_buffer *node;
1248 struct btrfs_disk_key disk_key;
1253 int direction = path->reada;
1254 struct extent_buffer *eb;
1262 if (!path->nodes[level])
1265 node = path->nodes[level];
1267 search = btrfs_node_blockptr(node, slot);
1268 blocksize = btrfs_level_size(root, level - 1);
1269 eb = btrfs_find_tree_block(root, search, blocksize);
1271 free_extent_buffer(eb);
1277 nritems = btrfs_header_nritems(node);
1280 if (direction < 0) {
1284 } else if (direction > 0) {
1289 if (path->reada < 0 && objectid) {
1290 btrfs_node_key(node, &disk_key, nr);
1291 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1294 search = btrfs_node_blockptr(node, nr);
1295 if ((search <= target && target - search <= 65536) ||
1296 (search > target && search - target <= 65536)) {
1297 readahead_tree_block(root, search, blocksize,
1298 btrfs_node_ptr_generation(node, nr));
1302 if ((nread > 65536 || nscan > 32))
1308 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1311 static noinline int reada_for_balance(struct btrfs_root *root,
1312 struct btrfs_path *path, int level)
1316 struct extent_buffer *parent;
1317 struct extent_buffer *eb;
1324 parent = path->nodes[level - 1];
1328 nritems = btrfs_header_nritems(parent);
1329 slot = path->slots[level];
1330 blocksize = btrfs_level_size(root, level);
1333 block1 = btrfs_node_blockptr(parent, slot - 1);
1334 gen = btrfs_node_ptr_generation(parent, slot - 1);
1335 eb = btrfs_find_tree_block(root, block1, blocksize);
1336 if (eb && btrfs_buffer_uptodate(eb, gen))
1338 free_extent_buffer(eb);
1340 if (slot < nritems) {
1341 block2 = btrfs_node_blockptr(parent, slot + 1);
1342 gen = btrfs_node_ptr_generation(parent, slot + 1);
1343 eb = btrfs_find_tree_block(root, block2, blocksize);
1344 if (eb && btrfs_buffer_uptodate(eb, gen))
1346 free_extent_buffer(eb);
1348 if (block1 || block2) {
1350 btrfs_release_path(root, path);
1352 readahead_tree_block(root, block1, blocksize, 0);
1354 readahead_tree_block(root, block2, blocksize, 0);
1357 eb = read_tree_block(root, block1, blocksize, 0);
1358 free_extent_buffer(eb);
1361 eb = read_tree_block(root, block2, blocksize, 0);
1362 free_extent_buffer(eb);
1370 * when we walk down the tree, it is usually safe to unlock the higher layers
1371 * in the tree. The exceptions are when our path goes through slot 0, because
1372 * operations on the tree might require changing key pointers higher up in the
1375 * callers might also have set path->keep_locks, which tells this code to keep
1376 * the lock if the path points to the last slot in the block. This is part of
1377 * walking through the tree, and selecting the next slot in the higher block.
1379 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1380 * if lowest_unlock is 1, level 0 won't be unlocked
1382 static noinline void unlock_up(struct btrfs_path *path, int level,
1386 int skip_level = level;
1388 struct extent_buffer *t;
1390 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1391 if (!path->nodes[i])
1393 if (!path->locks[i])
1395 if (!no_skips && path->slots[i] == 0) {
1399 if (!no_skips && path->keep_locks) {
1402 nritems = btrfs_header_nritems(t);
1403 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1408 if (skip_level < i && i >= lowest_unlock)
1412 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1413 btrfs_tree_unlock(t);
1420 * This releases any locks held in the path starting at level and
1421 * going all the way up to the root.
1423 * btrfs_search_slot will keep the lock held on higher nodes in a few
1424 * corner cases, such as COW of the block at slot zero in the node. This
1425 * ignores those rules, and it should only be called when there are no
1426 * more updates to be done higher up in the tree.
1428 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1432 if (path->keep_locks || path->lowest_level)
1435 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1436 if (!path->nodes[i])
1438 if (!path->locks[i])
1440 btrfs_tree_unlock(path->nodes[i]);
1446 * look for key in the tree. path is filled in with nodes along the way
1447 * if key is found, we return zero and you can find the item in the leaf
1448 * level of the path (level 0)
1450 * If the key isn't found, the path points to the slot where it should
1451 * be inserted, and 1 is returned. If there are other errors during the
1452 * search a negative error number is returned.
1454 * if ins_len > 0, nodes and leaves will be split as we walk down the
1455 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1458 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1459 *root, struct btrfs_key *key, struct btrfs_path *p, int
1462 struct extent_buffer *b;
1463 struct extent_buffer *tmp;
1467 int should_reada = p->reada;
1468 int lowest_unlock = 1;
1470 u8 lowest_level = 0;
1474 lowest_level = p->lowest_level;
1475 WARN_ON(lowest_level && ins_len > 0);
1476 WARN_ON(p->nodes[0] != NULL);
1482 if (p->skip_locking)
1483 b = btrfs_root_node(root);
1485 b = btrfs_lock_root_node(root);
1488 level = btrfs_header_level(b);
1491 * setup the path here so we can release it under lock
1492 * contention with the cow code
1494 p->nodes[level] = b;
1495 if (!p->skip_locking)
1496 p->locks[level] = 1;
1501 /* is a cow on this block not required */
1502 if (btrfs_header_generation(b) == trans->transid &&
1503 btrfs_header_owner(b) == root->root_key.objectid &&
1504 !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) {
1507 btrfs_set_path_blocking(p);
1509 wret = btrfs_cow_block(trans, root, b,
1510 p->nodes[level + 1],
1511 p->slots[level + 1], &b);
1513 free_extent_buffer(b);
1519 BUG_ON(!cow && ins_len);
1520 if (level != btrfs_header_level(b))
1522 level = btrfs_header_level(b);
1524 p->nodes[level] = b;
1525 if (!p->skip_locking)
1526 p->locks[level] = 1;
1528 btrfs_clear_path_blocking(p, NULL);
1531 * we have a lock on b and as long as we aren't changing
1532 * the tree, there is no way to for the items in b to change.
1533 * It is safe to drop the lock on our parent before we
1534 * go through the expensive btree search on b.
1536 * If cow is true, then we might be changing slot zero,
1537 * which may require changing the parent. So, we can't
1538 * drop the lock until after we know which slot we're
1542 btrfs_unlock_up_safe(p, level + 1);
1544 ret = check_block(root, p, level);
1550 ret = bin_search(b, key, level, &slot);
1553 if (ret && slot > 0)
1555 p->slots[level] = slot;
1556 if ((p->search_for_split || ins_len > 0) &&
1557 btrfs_header_nritems(b) >=
1558 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1561 sret = reada_for_balance(root, p, level);
1565 btrfs_set_path_blocking(p);
1566 sret = split_node(trans, root, p, level);
1567 btrfs_clear_path_blocking(p, NULL);
1574 b = p->nodes[level];
1575 slot = p->slots[level];
1576 } else if (ins_len < 0 &&
1577 btrfs_header_nritems(b) <
1578 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) {
1581 sret = reada_for_balance(root, p, level);
1585 btrfs_set_path_blocking(p);
1586 sret = balance_level(trans, root, p, level);
1587 btrfs_clear_path_blocking(p, NULL);
1593 b = p->nodes[level];
1595 btrfs_release_path(NULL, p);
1598 slot = p->slots[level];
1599 BUG_ON(btrfs_header_nritems(b) == 1);
1601 unlock_up(p, level, lowest_unlock);
1603 /* this is only true while dropping a snapshot */
1604 if (level == lowest_level) {
1609 blocknr = btrfs_node_blockptr(b, slot);
1610 gen = btrfs_node_ptr_generation(b, slot);
1611 blocksize = btrfs_level_size(root, level - 1);
1613 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1614 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
1618 * reduce lock contention at high levels
1619 * of the btree by dropping locks before
1623 btrfs_release_path(NULL, p);
1625 free_extent_buffer(tmp);
1627 reada_for_search(root, p,
1631 tmp = read_tree_block(root, blocknr,
1634 free_extent_buffer(tmp);
1637 btrfs_set_path_blocking(p);
1639 free_extent_buffer(tmp);
1641 reada_for_search(root, p,
1644 b = read_node_slot(root, b, slot);
1647 if (!p->skip_locking) {
1650 btrfs_clear_path_blocking(p, NULL);
1651 lret = btrfs_try_spin_lock(b);
1654 btrfs_set_path_blocking(p);
1656 btrfs_clear_path_blocking(p, b);
1660 p->slots[level] = slot;
1662 btrfs_leaf_free_space(root, b) < ins_len) {
1665 btrfs_set_path_blocking(p);
1666 sret = split_leaf(trans, root, key,
1667 p, ins_len, ret == 0);
1668 btrfs_clear_path_blocking(p, NULL);
1676 if (!p->search_for_split)
1677 unlock_up(p, level, lowest_unlock);
1684 * we don't really know what they plan on doing with the path
1685 * from here on, so for now just mark it as blocking
1687 btrfs_set_path_blocking(p);
1691 int btrfs_merge_path(struct btrfs_trans_handle *trans,
1692 struct btrfs_root *root,
1693 struct btrfs_key *node_keys,
1694 u64 *nodes, int lowest_level)
1696 struct extent_buffer *eb;
1697 struct extent_buffer *parent;
1698 struct btrfs_key key;
1707 eb = btrfs_lock_root_node(root);
1708 ret = btrfs_cow_block(trans, root, eb, NULL, 0, &eb);
1711 btrfs_set_lock_blocking(eb);
1715 level = btrfs_header_level(parent);
1716 if (level == 0 || level <= lowest_level)
1719 ret = bin_search(parent, &node_keys[lowest_level], level,
1721 if (ret && slot > 0)
1724 bytenr = btrfs_node_blockptr(parent, slot);
1725 if (nodes[level - 1] == bytenr)
1728 blocksize = btrfs_level_size(root, level - 1);
1729 generation = btrfs_node_ptr_generation(parent, slot);
1730 btrfs_node_key_to_cpu(eb, &key, slot);
1731 key_match = !memcmp(&key, &node_keys[level - 1], sizeof(key));
1733 if (generation == trans->transid) {
1734 eb = read_tree_block(root, bytenr, blocksize,
1736 btrfs_tree_lock(eb);
1737 btrfs_set_lock_blocking(eb);
1741 * if node keys match and node pointer hasn't been modified
1742 * in the running transaction, we can merge the path. for
1743 * blocks owened by reloc trees, the node pointer check is
1744 * skipped, this is because these blocks are fully controlled
1745 * by the space balance code, no one else can modify them.
1747 if (!nodes[level - 1] || !key_match ||
1748 (generation == trans->transid &&
1749 btrfs_header_owner(eb) != BTRFS_TREE_RELOC_OBJECTID)) {
1750 if (level == 1 || level == lowest_level + 1) {
1751 if (generation == trans->transid) {
1752 btrfs_tree_unlock(eb);
1753 free_extent_buffer(eb);
1758 if (generation != trans->transid) {
1759 eb = read_tree_block(root, bytenr, blocksize,
1761 btrfs_tree_lock(eb);
1762 btrfs_set_lock_blocking(eb);
1765 ret = btrfs_cow_block(trans, root, eb, parent, slot,
1769 if (root->root_key.objectid ==
1770 BTRFS_TREE_RELOC_OBJECTID) {
1771 if (!nodes[level - 1]) {
1772 nodes[level - 1] = eb->start;
1773 memcpy(&node_keys[level - 1], &key,
1774 sizeof(node_keys[0]));
1780 btrfs_tree_unlock(parent);
1781 free_extent_buffer(parent);
1786 btrfs_set_node_blockptr(parent, slot, nodes[level - 1]);
1787 btrfs_set_node_ptr_generation(parent, slot, trans->transid);
1788 btrfs_mark_buffer_dirty(parent);
1790 ret = btrfs_inc_extent_ref(trans, root,
1792 blocksize, parent->start,
1793 btrfs_header_owner(parent),
1794 btrfs_header_generation(parent),
1799 * If the block was created in the running transaction,
1800 * it's possible this is the last reference to it, so we
1801 * should drop the subtree.
1803 if (generation == trans->transid) {
1804 ret = btrfs_drop_subtree(trans, root, eb, parent);
1806 btrfs_tree_unlock(eb);
1807 free_extent_buffer(eb);
1809 ret = btrfs_free_extent(trans, root, bytenr,
1810 blocksize, parent->start,
1811 btrfs_header_owner(parent),
1812 btrfs_header_generation(parent),
1818 btrfs_tree_unlock(parent);
1819 free_extent_buffer(parent);
1824 * adjust the pointers going up the tree, starting at level
1825 * making sure the right key of each node is points to 'key'.
1826 * This is used after shifting pointers to the left, so it stops
1827 * fixing up pointers when a given leaf/node is not in slot 0 of the
1830 * If this fails to write a tree block, it returns -1, but continues
1831 * fixing up the blocks in ram so the tree is consistent.
1833 static int fixup_low_keys(struct btrfs_trans_handle *trans,
1834 struct btrfs_root *root, struct btrfs_path *path,
1835 struct btrfs_disk_key *key, int level)
1839 struct extent_buffer *t;
1841 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1842 int tslot = path->slots[i];
1843 if (!path->nodes[i])
1846 btrfs_set_node_key(t, key, tslot);
1847 btrfs_mark_buffer_dirty(path->nodes[i]);
1857 * This function isn't completely safe. It's the caller's responsibility
1858 * that the new key won't break the order
1860 int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1861 struct btrfs_root *root, struct btrfs_path *path,
1862 struct btrfs_key *new_key)
1864 struct btrfs_disk_key disk_key;
1865 struct extent_buffer *eb;
1868 eb = path->nodes[0];
1869 slot = path->slots[0];
1871 btrfs_item_key(eb, &disk_key, slot - 1);
1872 if (comp_keys(&disk_key, new_key) >= 0)
1875 if (slot < btrfs_header_nritems(eb) - 1) {
1876 btrfs_item_key(eb, &disk_key, slot + 1);
1877 if (comp_keys(&disk_key, new_key) <= 0)
1881 btrfs_cpu_key_to_disk(&disk_key, new_key);
1882 btrfs_set_item_key(eb, &disk_key, slot);
1883 btrfs_mark_buffer_dirty(eb);
1885 fixup_low_keys(trans, root, path, &disk_key, 1);
1890 * try to push data from one node into the next node left in the
1893 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1894 * error, and > 0 if there was no room in the left hand block.
1896 static int push_node_left(struct btrfs_trans_handle *trans,
1897 struct btrfs_root *root, struct extent_buffer *dst,
1898 struct extent_buffer *src, int empty)
1905 src_nritems = btrfs_header_nritems(src);
1906 dst_nritems = btrfs_header_nritems(dst);
1907 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1908 WARN_ON(btrfs_header_generation(src) != trans->transid);
1909 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1911 if (!empty && src_nritems <= 8)
1914 if (push_items <= 0)
1918 push_items = min(src_nritems, push_items);
1919 if (push_items < src_nritems) {
1920 /* leave at least 8 pointers in the node if
1921 * we aren't going to empty it
1923 if (src_nritems - push_items < 8) {
1924 if (push_items <= 8)
1930 push_items = min(src_nritems - 8, push_items);
1932 copy_extent_buffer(dst, src,
1933 btrfs_node_key_ptr_offset(dst_nritems),
1934 btrfs_node_key_ptr_offset(0),
1935 push_items * sizeof(struct btrfs_key_ptr));
1937 if (push_items < src_nritems) {
1938 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1939 btrfs_node_key_ptr_offset(push_items),
1940 (src_nritems - push_items) *
1941 sizeof(struct btrfs_key_ptr));
1943 btrfs_set_header_nritems(src, src_nritems - push_items);
1944 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1945 btrfs_mark_buffer_dirty(src);
1946 btrfs_mark_buffer_dirty(dst);
1948 ret = btrfs_update_ref(trans, root, src, dst, dst_nritems, push_items);
1955 * try to push data from one node into the next node right in the
1958 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
1959 * error, and > 0 if there was no room in the right hand block.
1961 * this will only push up to 1/2 the contents of the left node over
1963 static int balance_node_right(struct btrfs_trans_handle *trans,
1964 struct btrfs_root *root,
1965 struct extent_buffer *dst,
1966 struct extent_buffer *src)
1974 WARN_ON(btrfs_header_generation(src) != trans->transid);
1975 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1977 src_nritems = btrfs_header_nritems(src);
1978 dst_nritems = btrfs_header_nritems(dst);
1979 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1980 if (push_items <= 0)
1983 if (src_nritems < 4)
1986 max_push = src_nritems / 2 + 1;
1987 /* don't try to empty the node */
1988 if (max_push >= src_nritems)
1991 if (max_push < push_items)
1992 push_items = max_push;
1994 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
1995 btrfs_node_key_ptr_offset(0),
1997 sizeof(struct btrfs_key_ptr));
1999 copy_extent_buffer(dst, src,
2000 btrfs_node_key_ptr_offset(0),
2001 btrfs_node_key_ptr_offset(src_nritems - push_items),
2002 push_items * sizeof(struct btrfs_key_ptr));
2004 btrfs_set_header_nritems(src, src_nritems - push_items);
2005 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2007 btrfs_mark_buffer_dirty(src);
2008 btrfs_mark_buffer_dirty(dst);
2010 ret = btrfs_update_ref(trans, root, src, dst, 0, push_items);
2017 * helper function to insert a new root level in the tree.
2018 * A new node is allocated, and a single item is inserted to
2019 * point to the existing root
2021 * returns zero on success or < 0 on failure.
2023 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2024 struct btrfs_root *root,
2025 struct btrfs_path *path, int level)
2028 struct extent_buffer *lower;
2029 struct extent_buffer *c;
2030 struct extent_buffer *old;
2031 struct btrfs_disk_key lower_key;
2034 BUG_ON(path->nodes[level]);
2035 BUG_ON(path->nodes[level-1] != root->node);
2037 lower = path->nodes[level-1];
2039 btrfs_item_key(lower, &lower_key, 0);
2041 btrfs_node_key(lower, &lower_key, 0);
2043 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2044 root->root_key.objectid, trans->transid,
2045 level, root->node->start, 0);
2049 memset_extent_buffer(c, 0, 0, root->nodesize);
2050 btrfs_set_header_nritems(c, 1);
2051 btrfs_set_header_level(c, level);
2052 btrfs_set_header_bytenr(c, c->start);
2053 btrfs_set_header_generation(c, trans->transid);
2054 btrfs_set_header_owner(c, root->root_key.objectid);
2056 write_extent_buffer(c, root->fs_info->fsid,
2057 (unsigned long)btrfs_header_fsid(c),
2060 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2061 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2064 btrfs_set_node_key(c, &lower_key, 0);
2065 btrfs_set_node_blockptr(c, 0, lower->start);
2066 lower_gen = btrfs_header_generation(lower);
2067 WARN_ON(lower_gen != trans->transid);
2069 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2071 btrfs_mark_buffer_dirty(c);
2073 spin_lock(&root->node_lock);
2076 spin_unlock(&root->node_lock);
2078 ret = btrfs_update_extent_ref(trans, root, lower->start,
2079 lower->len, lower->start, c->start,
2080 root->root_key.objectid,
2081 trans->transid, level - 1);
2084 /* the super has an extra ref to root->node */
2085 free_extent_buffer(old);
2087 add_root_to_dirty_list(root);
2088 extent_buffer_get(c);
2089 path->nodes[level] = c;
2090 path->locks[level] = 1;
2091 path->slots[level] = 0;
2096 * worker function to insert a single pointer in a node.
2097 * the node should have enough room for the pointer already
2099 * slot and level indicate where you want the key to go, and
2100 * blocknr is the block the key points to.
2102 * returns zero on success and < 0 on any error
2104 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2105 *root, struct btrfs_path *path, struct btrfs_disk_key
2106 *key, u64 bytenr, int slot, int level)
2108 struct extent_buffer *lower;
2111 BUG_ON(!path->nodes[level]);
2112 lower = path->nodes[level];
2113 nritems = btrfs_header_nritems(lower);
2116 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
2118 if (slot != nritems) {
2119 memmove_extent_buffer(lower,
2120 btrfs_node_key_ptr_offset(slot + 1),
2121 btrfs_node_key_ptr_offset(slot),
2122 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2124 btrfs_set_node_key(lower, key, slot);
2125 btrfs_set_node_blockptr(lower, slot, bytenr);
2126 WARN_ON(trans->transid == 0);
2127 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2128 btrfs_set_header_nritems(lower, nritems + 1);
2129 btrfs_mark_buffer_dirty(lower);
2134 * split the node at the specified level in path in two.
2135 * The path is corrected to point to the appropriate node after the split
2137 * Before splitting this tries to make some room in the node by pushing
2138 * left and right, if either one works, it returns right away.
2140 * returns 0 on success and < 0 on failure
2142 static noinline int split_node(struct btrfs_trans_handle *trans,
2143 struct btrfs_root *root,
2144 struct btrfs_path *path, int level)
2146 struct extent_buffer *c;
2147 struct extent_buffer *split;
2148 struct btrfs_disk_key disk_key;
2154 c = path->nodes[level];
2155 WARN_ON(btrfs_header_generation(c) != trans->transid);
2156 if (c == root->node) {
2157 /* trying to split the root, lets make a new one */
2158 ret = insert_new_root(trans, root, path, level + 1);
2162 ret = push_nodes_for_insert(trans, root, path, level);
2163 c = path->nodes[level];
2164 if (!ret && btrfs_header_nritems(c) <
2165 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
2171 c_nritems = btrfs_header_nritems(c);
2173 split = btrfs_alloc_free_block(trans, root, root->nodesize,
2174 path->nodes[level + 1]->start,
2175 root->root_key.objectid,
2176 trans->transid, level, c->start, 0);
2178 return PTR_ERR(split);
2180 btrfs_set_header_flags(split, btrfs_header_flags(c));
2181 btrfs_set_header_level(split, btrfs_header_level(c));
2182 btrfs_set_header_bytenr(split, split->start);
2183 btrfs_set_header_generation(split, trans->transid);
2184 btrfs_set_header_owner(split, root->root_key.objectid);
2185 btrfs_set_header_flags(split, 0);
2186 write_extent_buffer(split, root->fs_info->fsid,
2187 (unsigned long)btrfs_header_fsid(split),
2189 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2190 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2193 mid = (c_nritems + 1) / 2;
2195 copy_extent_buffer(split, c,
2196 btrfs_node_key_ptr_offset(0),
2197 btrfs_node_key_ptr_offset(mid),
2198 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2199 btrfs_set_header_nritems(split, c_nritems - mid);
2200 btrfs_set_header_nritems(c, mid);
2203 btrfs_mark_buffer_dirty(c);
2204 btrfs_mark_buffer_dirty(split);
2206 btrfs_node_key(split, &disk_key, 0);
2207 wret = insert_ptr(trans, root, path, &disk_key, split->start,
2208 path->slots[level + 1] + 1,
2213 ret = btrfs_update_ref(trans, root, c, split, 0, c_nritems - mid);
2216 if (path->slots[level] >= mid) {
2217 path->slots[level] -= mid;
2218 btrfs_tree_unlock(c);
2219 free_extent_buffer(c);
2220 path->nodes[level] = split;
2221 path->slots[level + 1] += 1;
2223 btrfs_tree_unlock(split);
2224 free_extent_buffer(split);
2230 * how many bytes are required to store the items in a leaf. start
2231 * and nr indicate which items in the leaf to check. This totals up the
2232 * space used both by the item structs and the item data
2234 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2237 int nritems = btrfs_header_nritems(l);
2238 int end = min(nritems, start + nr) - 1;
2242 data_len = btrfs_item_end_nr(l, start);
2243 data_len = data_len - btrfs_item_offset_nr(l, end);
2244 data_len += sizeof(struct btrfs_item) * nr;
2245 WARN_ON(data_len < 0);
2250 * The space between the end of the leaf items and
2251 * the start of the leaf data. IOW, how much room
2252 * the leaf has left for both items and data
2254 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2255 struct extent_buffer *leaf)
2257 int nritems = btrfs_header_nritems(leaf);
2259 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2261 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2262 "used %d nritems %d\n",
2263 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2264 leaf_space_used(leaf, 0, nritems), nritems);
2270 * push some data in the path leaf to the right, trying to free up at
2271 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2273 * returns 1 if the push failed because the other node didn't have enough
2274 * room, 0 if everything worked out and < 0 if there were major errors.
2276 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2277 *root, struct btrfs_path *path, int data_size,
2280 struct extent_buffer *left = path->nodes[0];
2281 struct extent_buffer *right;
2282 struct extent_buffer *upper;
2283 struct btrfs_disk_key disk_key;
2289 struct btrfs_item *item;
2297 slot = path->slots[1];
2298 if (!path->nodes[1])
2301 upper = path->nodes[1];
2302 if (slot >= btrfs_header_nritems(upper) - 1)
2305 btrfs_assert_tree_locked(path->nodes[1]);
2307 right = read_node_slot(root, upper, slot + 1);
2308 btrfs_tree_lock(right);
2309 btrfs_set_lock_blocking(right);
2311 free_space = btrfs_leaf_free_space(root, right);
2312 if (free_space < data_size)
2315 /* cow and double check */
2316 ret = btrfs_cow_block(trans, root, right, upper,
2321 free_space = btrfs_leaf_free_space(root, right);
2322 if (free_space < data_size)
2325 left_nritems = btrfs_header_nritems(left);
2326 if (left_nritems == 0)
2334 if (path->slots[0] >= left_nritems)
2335 push_space += data_size;
2337 i = left_nritems - 1;
2339 item = btrfs_item_nr(left, i);
2341 if (!empty && push_items > 0) {
2342 if (path->slots[0] > i)
2344 if (path->slots[0] == i) {
2345 int space = btrfs_leaf_free_space(root, left);
2346 if (space + push_space * 2 > free_space)
2351 if (path->slots[0] == i)
2352 push_space += data_size;
2354 if (!left->map_token) {
2355 map_extent_buffer(left, (unsigned long)item,
2356 sizeof(struct btrfs_item),
2357 &left->map_token, &left->kaddr,
2358 &left->map_start, &left->map_len,
2362 this_item_size = btrfs_item_size(left, item);
2363 if (this_item_size + sizeof(*item) + push_space > free_space)
2367 push_space += this_item_size + sizeof(*item);
2372 if (left->map_token) {
2373 unmap_extent_buffer(left, left->map_token, KM_USER1);
2374 left->map_token = NULL;
2377 if (push_items == 0)
2380 if (!empty && push_items == left_nritems)
2383 /* push left to right */
2384 right_nritems = btrfs_header_nritems(right);
2386 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2387 push_space -= leaf_data_end(root, left);
2389 /* make room in the right data area */
2390 data_end = leaf_data_end(root, right);
2391 memmove_extent_buffer(right,
2392 btrfs_leaf_data(right) + data_end - push_space,
2393 btrfs_leaf_data(right) + data_end,
2394 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2396 /* copy from the left data area */
2397 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
2398 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2399 btrfs_leaf_data(left) + leaf_data_end(root, left),
2402 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2403 btrfs_item_nr_offset(0),
2404 right_nritems * sizeof(struct btrfs_item));
2406 /* copy the items from left to right */
2407 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2408 btrfs_item_nr_offset(left_nritems - push_items),
2409 push_items * sizeof(struct btrfs_item));
2411 /* update the item pointers */
2412 right_nritems += push_items;
2413 btrfs_set_header_nritems(right, right_nritems);
2414 push_space = BTRFS_LEAF_DATA_SIZE(root);
2415 for (i = 0; i < right_nritems; i++) {
2416 item = btrfs_item_nr(right, i);
2417 if (!right->map_token) {
2418 map_extent_buffer(right, (unsigned long)item,
2419 sizeof(struct btrfs_item),
2420 &right->map_token, &right->kaddr,
2421 &right->map_start, &right->map_len,
2424 push_space -= btrfs_item_size(right, item);
2425 btrfs_set_item_offset(right, item, push_space);
2428 if (right->map_token) {
2429 unmap_extent_buffer(right, right->map_token, KM_USER1);
2430 right->map_token = NULL;
2432 left_nritems -= push_items;
2433 btrfs_set_header_nritems(left, left_nritems);
2436 btrfs_mark_buffer_dirty(left);
2437 btrfs_mark_buffer_dirty(right);
2439 ret = btrfs_update_ref(trans, root, left, right, 0, push_items);
2442 btrfs_item_key(right, &disk_key, 0);
2443 btrfs_set_node_key(upper, &disk_key, slot + 1);
2444 btrfs_mark_buffer_dirty(upper);
2446 /* then fixup the leaf pointer in the path */
2447 if (path->slots[0] >= left_nritems) {
2448 path->slots[0] -= left_nritems;
2449 if (btrfs_header_nritems(path->nodes[0]) == 0)
2450 clean_tree_block(trans, root, path->nodes[0]);
2451 btrfs_tree_unlock(path->nodes[0]);
2452 free_extent_buffer(path->nodes[0]);
2453 path->nodes[0] = right;
2454 path->slots[1] += 1;
2456 btrfs_tree_unlock(right);
2457 free_extent_buffer(right);
2462 btrfs_tree_unlock(right);
2463 free_extent_buffer(right);
2468 * push some data in the path leaf to the left, trying to free up at
2469 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2471 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2472 *root, struct btrfs_path *path, int data_size,
2475 struct btrfs_disk_key disk_key;
2476 struct extent_buffer *right = path->nodes[0];
2477 struct extent_buffer *left;
2483 struct btrfs_item *item;
2484 u32 old_left_nritems;
2490 u32 old_left_item_size;
2492 slot = path->slots[1];
2495 if (!path->nodes[1])
2498 right_nritems = btrfs_header_nritems(right);
2499 if (right_nritems == 0)
2502 btrfs_assert_tree_locked(path->nodes[1]);
2504 left = read_node_slot(root, path->nodes[1], slot - 1);
2505 btrfs_tree_lock(left);
2506 btrfs_set_lock_blocking(left);
2508 free_space = btrfs_leaf_free_space(root, left);
2509 if (free_space < data_size) {
2514 /* cow and double check */
2515 ret = btrfs_cow_block(trans, root, left,
2516 path->nodes[1], slot - 1, &left);
2518 /* we hit -ENOSPC, but it isn't fatal here */
2523 free_space = btrfs_leaf_free_space(root, left);
2524 if (free_space < data_size) {
2532 nr = right_nritems - 1;
2534 for (i = 0; i < nr; i++) {
2535 item = btrfs_item_nr(right, i);
2536 if (!right->map_token) {
2537 map_extent_buffer(right, (unsigned long)item,
2538 sizeof(struct btrfs_item),
2539 &right->map_token, &right->kaddr,
2540 &right->map_start, &right->map_len,
2544 if (!empty && push_items > 0) {
2545 if (path->slots[0] < i)
2547 if (path->slots[0] == i) {
2548 int space = btrfs_leaf_free_space(root, right);
2549 if (space + push_space * 2 > free_space)
2554 if (path->slots[0] == i)
2555 push_space += data_size;
2557 this_item_size = btrfs_item_size(right, item);
2558 if (this_item_size + sizeof(*item) + push_space > free_space)
2562 push_space += this_item_size + sizeof(*item);
2565 if (right->map_token) {
2566 unmap_extent_buffer(right, right->map_token, KM_USER1);
2567 right->map_token = NULL;
2570 if (push_items == 0) {
2574 if (!empty && push_items == btrfs_header_nritems(right))
2577 /* push data from right to left */
2578 copy_extent_buffer(left, right,
2579 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2580 btrfs_item_nr_offset(0),
2581 push_items * sizeof(struct btrfs_item));
2583 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2584 btrfs_item_offset_nr(right, push_items - 1);
2586 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2587 leaf_data_end(root, left) - push_space,
2588 btrfs_leaf_data(right) +
2589 btrfs_item_offset_nr(right, push_items - 1),
2591 old_left_nritems = btrfs_header_nritems(left);
2592 BUG_ON(old_left_nritems <= 0);
2594 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2595 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2598 item = btrfs_item_nr(left, i);
2599 if (!left->map_token) {
2600 map_extent_buffer(left, (unsigned long)item,
2601 sizeof(struct btrfs_item),
2602 &left->map_token, &left->kaddr,
2603 &left->map_start, &left->map_len,
2607 ioff = btrfs_item_offset(left, item);
2608 btrfs_set_item_offset(left, item,
2609 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
2611 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2612 if (left->map_token) {
2613 unmap_extent_buffer(left, left->map_token, KM_USER1);
2614 left->map_token = NULL;
2617 /* fixup right node */
2618 if (push_items > right_nritems) {
2619 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2624 if (push_items < right_nritems) {
2625 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2626 leaf_data_end(root, right);
2627 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2628 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2629 btrfs_leaf_data(right) +
2630 leaf_data_end(root, right), push_space);
2632 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2633 btrfs_item_nr_offset(push_items),
2634 (btrfs_header_nritems(right) - push_items) *
2635 sizeof(struct btrfs_item));
2637 right_nritems -= push_items;
2638 btrfs_set_header_nritems(right, right_nritems);
2639 push_space = BTRFS_LEAF_DATA_SIZE(root);
2640 for (i = 0; i < right_nritems; i++) {
2641 item = btrfs_item_nr(right, i);
2643 if (!right->map_token) {
2644 map_extent_buffer(right, (unsigned long)item,
2645 sizeof(struct btrfs_item),
2646 &right->map_token, &right->kaddr,
2647 &right->map_start, &right->map_len,
2651 push_space = push_space - btrfs_item_size(right, item);
2652 btrfs_set_item_offset(right, item, push_space);
2654 if (right->map_token) {
2655 unmap_extent_buffer(right, right->map_token, KM_USER1);
2656 right->map_token = NULL;
2659 btrfs_mark_buffer_dirty(left);
2661 btrfs_mark_buffer_dirty(right);
2663 ret = btrfs_update_ref(trans, root, right, left,
2664 old_left_nritems, push_items);
2667 btrfs_item_key(right, &disk_key, 0);
2668 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2672 /* then fixup the leaf pointer in the path */
2673 if (path->slots[0] < push_items) {
2674 path->slots[0] += old_left_nritems;
2675 if (btrfs_header_nritems(path->nodes[0]) == 0)
2676 clean_tree_block(trans, root, path->nodes[0]);
2677 btrfs_tree_unlock(path->nodes[0]);
2678 free_extent_buffer(path->nodes[0]);
2679 path->nodes[0] = left;
2680 path->slots[1] -= 1;
2682 btrfs_tree_unlock(left);
2683 free_extent_buffer(left);
2684 path->slots[0] -= push_items;
2686 BUG_ON(path->slots[0] < 0);
2689 btrfs_tree_unlock(left);
2690 free_extent_buffer(left);
2695 * split the path's leaf in two, making sure there is at least data_size
2696 * available for the resulting leaf level of the path.
2698 * returns 0 if all went well and < 0 on failure.
2700 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2701 struct btrfs_root *root,
2702 struct btrfs_key *ins_key,
2703 struct btrfs_path *path, int data_size,
2706 struct extent_buffer *l;
2710 struct extent_buffer *right;
2717 int num_doubles = 0;
2718 struct btrfs_disk_key disk_key;
2720 /* first try to make some room by pushing left and right */
2721 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
2722 wret = push_leaf_right(trans, root, path, data_size, 0);
2726 wret = push_leaf_left(trans, root, path, data_size, 0);
2732 /* did the pushes work? */
2733 if (btrfs_leaf_free_space(root, l) >= data_size)
2737 if (!path->nodes[1]) {
2738 ret = insert_new_root(trans, root, path, 1);
2745 slot = path->slots[0];
2746 nritems = btrfs_header_nritems(l);
2747 mid = (nritems + 1) / 2;
2749 right = btrfs_alloc_free_block(trans, root, root->leafsize,
2750 path->nodes[1]->start,
2751 root->root_key.objectid,
2752 trans->transid, 0, l->start, 0);
2753 if (IS_ERR(right)) {
2755 return PTR_ERR(right);
2758 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2759 btrfs_set_header_bytenr(right, right->start);
2760 btrfs_set_header_generation(right, trans->transid);
2761 btrfs_set_header_owner(right, root->root_key.objectid);
2762 btrfs_set_header_level(right, 0);
2763 write_extent_buffer(right, root->fs_info->fsid,
2764 (unsigned long)btrfs_header_fsid(right),
2767 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2768 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2772 leaf_space_used(l, mid, nritems - mid) + data_size >
2773 BTRFS_LEAF_DATA_SIZE(root)) {
2774 if (slot >= nritems) {
2775 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2776 btrfs_set_header_nritems(right, 0);
2777 wret = insert_ptr(trans, root, path,
2778 &disk_key, right->start,
2779 path->slots[1] + 1, 1);
2783 btrfs_tree_unlock(path->nodes[0]);
2784 free_extent_buffer(path->nodes[0]);
2785 path->nodes[0] = right;
2787 path->slots[1] += 1;
2788 btrfs_mark_buffer_dirty(right);
2792 if (mid != nritems &&
2793 leaf_space_used(l, mid, nritems - mid) +
2794 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2799 if (leaf_space_used(l, 0, mid) + data_size >
2800 BTRFS_LEAF_DATA_SIZE(root)) {
2801 if (!extend && data_size && slot == 0) {
2802 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2803 btrfs_set_header_nritems(right, 0);
2804 wret = insert_ptr(trans, root, path,
2810 btrfs_tree_unlock(path->nodes[0]);
2811 free_extent_buffer(path->nodes[0]);
2812 path->nodes[0] = right;
2814 if (path->slots[1] == 0) {
2815 wret = fixup_low_keys(trans, root,
2816 path, &disk_key, 1);
2820 btrfs_mark_buffer_dirty(right);
2822 } else if ((extend || !data_size) && slot == 0) {
2826 if (mid != nritems &&
2827 leaf_space_used(l, mid, nritems - mid) +
2828 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2834 nritems = nritems - mid;
2835 btrfs_set_header_nritems(right, nritems);
2836 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2838 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2839 btrfs_item_nr_offset(mid),
2840 nritems * sizeof(struct btrfs_item));
2842 copy_extent_buffer(right, l,
2843 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2844 data_copy_size, btrfs_leaf_data(l) +
2845 leaf_data_end(root, l), data_copy_size);
2847 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2848 btrfs_item_end_nr(l, mid);
2850 for (i = 0; i < nritems; i++) {
2851 struct btrfs_item *item = btrfs_item_nr(right, i);
2854 if (!right->map_token) {
2855 map_extent_buffer(right, (unsigned long)item,
2856 sizeof(struct btrfs_item),
2857 &right->map_token, &right->kaddr,
2858 &right->map_start, &right->map_len,
2862 ioff = btrfs_item_offset(right, item);
2863 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2866 if (right->map_token) {
2867 unmap_extent_buffer(right, right->map_token, KM_USER1);
2868 right->map_token = NULL;
2871 btrfs_set_header_nritems(l, mid);
2873 btrfs_item_key(right, &disk_key, 0);
2874 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2875 path->slots[1] + 1, 1);
2879 btrfs_mark_buffer_dirty(right);
2880 btrfs_mark_buffer_dirty(l);
2881 BUG_ON(path->slots[0] != slot);
2883 ret = btrfs_update_ref(trans, root, l, right, 0, nritems);
2887 btrfs_tree_unlock(path->nodes[0]);
2888 free_extent_buffer(path->nodes[0]);
2889 path->nodes[0] = right;
2890 path->slots[0] -= mid;
2891 path->slots[1] += 1;
2893 btrfs_tree_unlock(right);
2894 free_extent_buffer(right);
2897 BUG_ON(path->slots[0] < 0);
2900 BUG_ON(num_doubles != 0);
2908 * This function splits a single item into two items,
2909 * giving 'new_key' to the new item and splitting the
2910 * old one at split_offset (from the start of the item).
2912 * The path may be released by this operation. After
2913 * the split, the path is pointing to the old item. The
2914 * new item is going to be in the same node as the old one.
2916 * Note, the item being split must be smaller enough to live alone on
2917 * a tree block with room for one extra struct btrfs_item
2919 * This allows us to split the item in place, keeping a lock on the
2920 * leaf the entire time.
2922 int btrfs_split_item(struct btrfs_trans_handle *trans,
2923 struct btrfs_root *root,
2924 struct btrfs_path *path,
2925 struct btrfs_key *new_key,
2926 unsigned long split_offset)
2929 struct extent_buffer *leaf;
2930 struct btrfs_key orig_key;
2931 struct btrfs_item *item;
2932 struct btrfs_item *new_item;
2937 struct btrfs_disk_key disk_key;
2940 leaf = path->nodes[0];
2941 btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]);
2942 if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item))
2945 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2946 btrfs_release_path(root, path);
2948 path->search_for_split = 1;
2949 path->keep_locks = 1;
2951 ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1);
2952 path->search_for_split = 0;
2954 /* if our item isn't there or got smaller, return now */
2955 if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0],
2957 path->keep_locks = 0;
2961 ret = split_leaf(trans, root, &orig_key, path,
2962 sizeof(struct btrfs_item), 1);
2963 path->keep_locks = 0;
2967 * make sure any changes to the path from split_leaf leave it
2968 * in a blocking state
2970 btrfs_set_path_blocking(path);
2972 leaf = path->nodes[0];
2973 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
2976 item = btrfs_item_nr(leaf, path->slots[0]);
2977 orig_offset = btrfs_item_offset(leaf, item);
2978 item_size = btrfs_item_size(leaf, item);
2981 buf = kmalloc(item_size, GFP_NOFS);
2982 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
2983 path->slots[0]), item_size);
2984 slot = path->slots[0] + 1;
2985 leaf = path->nodes[0];
2987 nritems = btrfs_header_nritems(leaf);
2989 if (slot != nritems) {
2990 /* shift the items */
2991 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
2992 btrfs_item_nr_offset(slot),
2993 (nritems - slot) * sizeof(struct btrfs_item));
2997 btrfs_cpu_key_to_disk(&disk_key, new_key);
2998 btrfs_set_item_key(leaf, &disk_key, slot);
3000 new_item = btrfs_item_nr(leaf, slot);
3002 btrfs_set_item_offset(leaf, new_item, orig_offset);
3003 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3005 btrfs_set_item_offset(leaf, item,
3006 orig_offset + item_size - split_offset);
3007 btrfs_set_item_size(leaf, item, split_offset);
3009 btrfs_set_header_nritems(leaf, nritems + 1);
3011 /* write the data for the start of the original item */
3012 write_extent_buffer(leaf, buf,
3013 btrfs_item_ptr_offset(leaf, path->slots[0]),
3016 /* write the data for the new item */
3017 write_extent_buffer(leaf, buf + split_offset,
3018 btrfs_item_ptr_offset(leaf, slot),
3019 item_size - split_offset);
3020 btrfs_mark_buffer_dirty(leaf);
3023 if (btrfs_leaf_free_space(root, leaf) < 0) {
3024 btrfs_print_leaf(root, leaf);
3032 * make the item pointed to by the path smaller. new_size indicates
3033 * how small to make it, and from_end tells us if we just chop bytes
3034 * off the end of the item or if we shift the item to chop bytes off
3037 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3038 struct btrfs_root *root,
3039 struct btrfs_path *path,
3040 u32 new_size, int from_end)
3045 struct extent_buffer *leaf;
3046 struct btrfs_item *item;
3048 unsigned int data_end;
3049 unsigned int old_data_start;
3050 unsigned int old_size;
3051 unsigned int size_diff;
3054 slot_orig = path->slots[0];
3055 leaf = path->nodes[0];
3056 slot = path->slots[0];
3058 old_size = btrfs_item_size_nr(leaf, slot);
3059 if (old_size == new_size)
3062 nritems = btrfs_header_nritems(leaf);
3063 data_end = leaf_data_end(root, leaf);
3065 old_data_start = btrfs_item_offset_nr(leaf, slot);
3067 size_diff = old_size - new_size;
3070 BUG_ON(slot >= nritems);
3073 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3075 /* first correct the data pointers */
3076 for (i = slot; i < nritems; i++) {
3078 item = btrfs_item_nr(leaf, i);
3080 if (!leaf->map_token) {
3081 map_extent_buffer(leaf, (unsigned long)item,
3082 sizeof(struct btrfs_item),
3083 &leaf->map_token, &leaf->kaddr,
3084 &leaf->map_start, &leaf->map_len,
3088 ioff = btrfs_item_offset(leaf, item);
3089 btrfs_set_item_offset(leaf, item, ioff + size_diff);
3092 if (leaf->map_token) {
3093 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3094 leaf->map_token = NULL;
3097 /* shift the data */
3099 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3100 data_end + size_diff, btrfs_leaf_data(leaf) +
3101 data_end, old_data_start + new_size - data_end);
3103 struct btrfs_disk_key disk_key;
3106 btrfs_item_key(leaf, &disk_key, slot);
3108 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3110 struct btrfs_file_extent_item *fi;
3112 fi = btrfs_item_ptr(leaf, slot,
3113 struct btrfs_file_extent_item);
3114 fi = (struct btrfs_file_extent_item *)(
3115 (unsigned long)fi - size_diff);
3117 if (btrfs_file_extent_type(leaf, fi) ==
3118 BTRFS_FILE_EXTENT_INLINE) {
3119 ptr = btrfs_item_ptr_offset(leaf, slot);
3120 memmove_extent_buffer(leaf, ptr,
3122 offsetof(struct btrfs_file_extent_item,
3127 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3128 data_end + size_diff, btrfs_leaf_data(leaf) +
3129 data_end, old_data_start - data_end);
3131 offset = btrfs_disk_key_offset(&disk_key);
3132 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3133 btrfs_set_item_key(leaf, &disk_key, slot);
3135 fixup_low_keys(trans, root, path, &disk_key, 1);
3138 item = btrfs_item_nr(leaf, slot);
3139 btrfs_set_item_size(leaf, item, new_size);
3140 btrfs_mark_buffer_dirty(leaf);
3143 if (btrfs_leaf_free_space(root, leaf) < 0) {
3144 btrfs_print_leaf(root, leaf);
3151 * make the item pointed to by the path bigger, data_size is the new size.
3153 int btrfs_extend_item(struct btrfs_trans_handle *trans,
3154 struct btrfs_root *root, struct btrfs_path *path,
3160 struct extent_buffer *leaf;
3161 struct btrfs_item *item;
3163 unsigned int data_end;
3164 unsigned int old_data;
3165 unsigned int old_size;
3168 slot_orig = path->slots[0];
3169 leaf = path->nodes[0];
3171 nritems = btrfs_header_nritems(leaf);
3172 data_end = leaf_data_end(root, leaf);
3174 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3175 btrfs_print_leaf(root, leaf);
3178 slot = path->slots[0];
3179 old_data = btrfs_item_end_nr(leaf, slot);
3182 if (slot >= nritems) {
3183 btrfs_print_leaf(root, leaf);
3184 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3190 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3192 /* first correct the data pointers */
3193 for (i = slot; i < nritems; i++) {
3195 item = btrfs_item_nr(leaf, i);
3197 if (!leaf->map_token) {
3198 map_extent_buffer(leaf, (unsigned long)item,
3199 sizeof(struct btrfs_item),
3200 &leaf->map_token, &leaf->kaddr,
3201 &leaf->map_start, &leaf->map_len,
3204 ioff = btrfs_item_offset(leaf, item);
3205 btrfs_set_item_offset(leaf, item, ioff - data_size);
3208 if (leaf->map_token) {
3209 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3210 leaf->map_token = NULL;
3213 /* shift the data */
3214 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3215 data_end - data_size, btrfs_leaf_data(leaf) +
3216 data_end, old_data - data_end);
3218 data_end = old_data;
3219 old_size = btrfs_item_size_nr(leaf, slot);
3220 item = btrfs_item_nr(leaf, slot);
3221 btrfs_set_item_size(leaf, item, old_size + data_size);
3222 btrfs_mark_buffer_dirty(leaf);
3225 if (btrfs_leaf_free_space(root, leaf) < 0) {
3226 btrfs_print_leaf(root, leaf);
3233 * Given a key and some data, insert items into the tree.
3234 * This does all the path init required, making room in the tree if needed.
3235 * Returns the number of keys that were inserted.
3237 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3238 struct btrfs_root *root,
3239 struct btrfs_path *path,
3240 struct btrfs_key *cpu_key, u32 *data_size,
3243 struct extent_buffer *leaf;
3244 struct btrfs_item *item;
3251 unsigned int data_end;
3252 struct btrfs_disk_key disk_key;
3253 struct btrfs_key found_key;
3255 for (i = 0; i < nr; i++) {
3256 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3257 BTRFS_LEAF_DATA_SIZE(root)) {
3261 total_data += data_size[i];
3262 total_size += data_size[i] + sizeof(struct btrfs_item);
3266 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3272 leaf = path->nodes[0];
3274 nritems = btrfs_header_nritems(leaf);
3275 data_end = leaf_data_end(root, leaf);
3277 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3278 for (i = nr; i >= 0; i--) {
3279 total_data -= data_size[i];
3280 total_size -= data_size[i] + sizeof(struct btrfs_item);
3281 if (total_size < btrfs_leaf_free_space(root, leaf))
3287 slot = path->slots[0];
3290 if (slot != nritems) {
3291 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3293 item = btrfs_item_nr(leaf, slot);
3294 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3296 /* figure out how many keys we can insert in here */
3297 total_data = data_size[0];
3298 for (i = 1; i < nr; i++) {
3299 if (comp_cpu_keys(&found_key, cpu_key + i) <= 0)
3301 total_data += data_size[i];
3305 if (old_data < data_end) {
3306 btrfs_print_leaf(root, leaf);
3307 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3308 slot, old_data, data_end);
3312 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3314 /* first correct the data pointers */
3315 WARN_ON(leaf->map_token);
3316 for (i = slot; i < nritems; i++) {
3319 item = btrfs_item_nr(leaf, i);
3320 if (!leaf->map_token) {
3321 map_extent_buffer(leaf, (unsigned long)item,
3322 sizeof(struct btrfs_item),
3323 &leaf->map_token, &leaf->kaddr,
3324 &leaf->map_start, &leaf->map_len,
3328 ioff = btrfs_item_offset(leaf, item);
3329 btrfs_set_item_offset(leaf, item, ioff - total_data);
3331 if (leaf->map_token) {
3332 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3333 leaf->map_token = NULL;
3336 /* shift the items */
3337 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3338 btrfs_item_nr_offset(slot),
3339 (nritems - slot) * sizeof(struct btrfs_item));
3341 /* shift the data */
3342 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3343 data_end - total_data, btrfs_leaf_data(leaf) +
3344 data_end, old_data - data_end);
3345 data_end = old_data;
3348 * this sucks but it has to be done, if we are inserting at
3349 * the end of the leaf only insert 1 of the items, since we
3350 * have no way of knowing whats on the next leaf and we'd have
3351 * to drop our current locks to figure it out
3356 /* setup the item for the new data */
3357 for (i = 0; i < nr; i++) {
3358 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3359 btrfs_set_item_key(leaf, &disk_key, slot + i);
3360 item = btrfs_item_nr(leaf, slot + i);
3361 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3362 data_end -= data_size[i];
3363 btrfs_set_item_size(leaf, item, data_size[i]);
3365 btrfs_set_header_nritems(leaf, nritems + nr);
3366 btrfs_mark_buffer_dirty(leaf);
3370 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3371 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3374 if (btrfs_leaf_free_space(root, leaf) < 0) {
3375 btrfs_print_leaf(root, leaf);
3385 * Given a key and some data, insert items into the tree.
3386 * This does all the path init required, making room in the tree if needed.
3388 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3389 struct btrfs_root *root,
3390 struct btrfs_path *path,
3391 struct btrfs_key *cpu_key, u32 *data_size,
3394 struct extent_buffer *leaf;
3395 struct btrfs_item *item;
3403 unsigned int data_end;
3404 struct btrfs_disk_key disk_key;
3406 for (i = 0; i < nr; i++)
3407 total_data += data_size[i];
3409 total_size = total_data + (nr * sizeof(struct btrfs_item));
3410 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3416 slot_orig = path->slots[0];
3417 leaf = path->nodes[0];
3419 nritems = btrfs_header_nritems(leaf);
3420 data_end = leaf_data_end(root, leaf);
3422 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3423 btrfs_print_leaf(root, leaf);
3424 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3425 total_size, btrfs_leaf_free_space(root, leaf));
3429 slot = path->slots[0];
3432 if (slot != nritems) {
3433 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3435 if (old_data < data_end) {
3436 btrfs_print_leaf(root, leaf);
3437 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3438 slot, old_data, data_end);
3442 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3444 /* first correct the data pointers */
3445 WARN_ON(leaf->map_token);
3446 for (i = slot; i < nritems; i++) {
3449 item = btrfs_item_nr(leaf, i);
3450 if (!leaf->map_token) {
3451 map_extent_buffer(leaf, (unsigned long)item,
3452 sizeof(struct btrfs_item),
3453 &leaf->map_token, &leaf->kaddr,
3454 &leaf->map_start, &leaf->map_len,
3458 ioff = btrfs_item_offset(leaf, item);
3459 btrfs_set_item_offset(leaf, item, ioff - total_data);
3461 if (leaf->map_token) {
3462 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3463 leaf->map_token = NULL;
3466 /* shift the items */
3467 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3468 btrfs_item_nr_offset(slot),
3469 (nritems - slot) * sizeof(struct btrfs_item));
3471 /* shift the data */
3472 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3473 data_end - total_data, btrfs_leaf_data(leaf) +
3474 data_end, old_data - data_end);
3475 data_end = old_data;
3478 /* setup the item for the new data */
3479 for (i = 0; i < nr; i++) {
3480 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3481 btrfs_set_item_key(leaf, &disk_key, slot + i);
3482 item = btrfs_item_nr(leaf, slot + i);
3483 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3484 data_end -= data_size[i];
3485 btrfs_set_item_size(leaf, item, data_size[i]);
3487 btrfs_set_header_nritems(leaf, nritems + nr);
3488 btrfs_mark_buffer_dirty(leaf);
3492 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3493 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3496 if (btrfs_leaf_free_space(root, leaf) < 0) {
3497 btrfs_print_leaf(root, leaf);
3501 btrfs_unlock_up_safe(path, 1);
3506 * Given a key and some data, insert an item into the tree.
3507 * This does all the path init required, making room in the tree if needed.
3509 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3510 *root, struct btrfs_key *cpu_key, void *data, u32
3514 struct btrfs_path *path;
3515 struct extent_buffer *leaf;
3518 path = btrfs_alloc_path();
3520 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3522 leaf = path->nodes[0];
3523 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3524 write_extent_buffer(leaf, data, ptr, data_size);
3525 btrfs_mark_buffer_dirty(leaf);
3527 btrfs_free_path(path);
3532 * delete the pointer from a given node.
3534 * the tree should have been previously balanced so the deletion does not
3537 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3538 struct btrfs_path *path, int level, int slot)
3540 struct extent_buffer *parent = path->nodes[level];
3545 nritems = btrfs_header_nritems(parent);
3546 if (slot != nritems - 1) {
3547 memmove_extent_buffer(parent,
3548 btrfs_node_key_ptr_offset(slot),
3549 btrfs_node_key_ptr_offset(slot + 1),
3550 sizeof(struct btrfs_key_ptr) *
3551 (nritems - slot - 1));
3554 btrfs_set_header_nritems(parent, nritems);
3555 if (nritems == 0 && parent == root->node) {
3556 BUG_ON(btrfs_header_level(root->node) != 1);
3557 /* just turn the root into a leaf and break */
3558 btrfs_set_header_level(root->node, 0);
3559 } else if (slot == 0) {
3560 struct btrfs_disk_key disk_key;
3562 btrfs_node_key(parent, &disk_key, 0);
3563 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
3567 btrfs_mark_buffer_dirty(parent);
3572 * a helper function to delete the leaf pointed to by path->slots[1] and
3573 * path->nodes[1]. bytenr is the node block pointer, but since the callers
3574 * already know it, it is faster to have them pass it down than to
3575 * read it out of the node again.
3577 * This deletes the pointer in path->nodes[1] and frees the leaf
3578 * block extent. zero is returned if it all worked out, < 0 otherwise.
3580 * The path must have already been setup for deleting the leaf, including
3581 * all the proper balancing. path->nodes[1] must be locked.
3583 noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3584 struct btrfs_root *root,
3585 struct btrfs_path *path, u64 bytenr)
3588 u64 root_gen = btrfs_header_generation(path->nodes[1]);
3589 u64 parent_start = path->nodes[1]->start;
3590 u64 parent_owner = btrfs_header_owner(path->nodes[1]);
3592 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3597 * btrfs_free_extent is expensive, we want to make sure we
3598 * aren't holding any locks when we call it
3600 btrfs_unlock_up_safe(path, 0);
3602 ret = btrfs_free_extent(trans, root, bytenr,
3603 btrfs_level_size(root, 0),
3604 parent_start, parent_owner,
3609 * delete the item at the leaf level in path. If that empties
3610 * the leaf, remove it from the tree
3612 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3613 struct btrfs_path *path, int slot, int nr)
3615 struct extent_buffer *leaf;
3616 struct btrfs_item *item;
3624 leaf = path->nodes[0];
3625 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3627 for (i = 0; i < nr; i++)
3628 dsize += btrfs_item_size_nr(leaf, slot + i);
3630 nritems = btrfs_header_nritems(leaf);
3632 if (slot + nr != nritems) {
3633 int data_end = leaf_data_end(root, leaf);
3635 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3637 btrfs_leaf_data(leaf) + data_end,
3638 last_off - data_end);
3640 for (i = slot + nr; i < nritems; i++) {
3643 item = btrfs_item_nr(leaf, i);
3644 if (!leaf->map_token) {
3645 map_extent_buffer(leaf, (unsigned long)item,
3646 sizeof(struct btrfs_item),
3647 &leaf->map_token, &leaf->kaddr,
3648 &leaf->map_start, &leaf->map_len,
3651 ioff = btrfs_item_offset(leaf, item);
3652 btrfs_set_item_offset(leaf, item, ioff + dsize);
3655 if (leaf->map_token) {
3656 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3657 leaf->map_token = NULL;
3660 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
3661 btrfs_item_nr_offset(slot + nr),
3662 sizeof(struct btrfs_item) *
3663 (nritems - slot - nr));
3665 btrfs_set_header_nritems(leaf, nritems - nr);
3668 /* delete the leaf if we've emptied it */
3670 if (leaf == root->node) {
3671 btrfs_set_header_level(leaf, 0);
3673 ret = btrfs_del_leaf(trans, root, path, leaf->start);
3677 int used = leaf_space_used(leaf, 0, nritems);
3679 struct btrfs_disk_key disk_key;
3681 btrfs_item_key(leaf, &disk_key, 0);
3682 wret = fixup_low_keys(trans, root, path,
3688 /* delete the leaf if it is mostly empty */
3689 if (used < BTRFS_LEAF_DATA_SIZE(root) / 4) {
3690 /* push_leaf_left fixes the path.
3691 * make sure the path still points to our leaf
3692 * for possible call to del_ptr below
3694 slot = path->slots[1];
3695 extent_buffer_get(leaf);
3697 wret = push_leaf_left(trans, root, path, 1, 1);
3698 if (wret < 0 && wret != -ENOSPC)
3701 if (path->nodes[0] == leaf &&
3702 btrfs_header_nritems(leaf)) {
3703 wret = push_leaf_right(trans, root, path, 1, 1);
3704 if (wret < 0 && wret != -ENOSPC)
3708 if (btrfs_header_nritems(leaf) == 0) {
3709 path->slots[1] = slot;
3710 ret = btrfs_del_leaf(trans, root, path,
3713 free_extent_buffer(leaf);
3715 /* if we're still in the path, make sure
3716 * we're dirty. Otherwise, one of the
3717 * push_leaf functions must have already
3718 * dirtied this buffer
3720 if (path->nodes[0] == leaf)
3721 btrfs_mark_buffer_dirty(leaf);
3722 free_extent_buffer(leaf);
3725 btrfs_mark_buffer_dirty(leaf);
3732 * search the tree again to find a leaf with lesser keys
3733 * returns 0 if it found something or 1 if there are no lesser leaves.
3734 * returns < 0 on io errors.
3736 * This may release the path, and so you may lose any locks held at the
3739 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3741 struct btrfs_key key;
3742 struct btrfs_disk_key found_key;
3745 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
3749 else if (key.type > 0)
3751 else if (key.objectid > 0)
3756 btrfs_release_path(root, path);
3757 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3760 btrfs_item_key(path->nodes[0], &found_key, 0);
3761 ret = comp_keys(&found_key, &key);
3768 * A helper function to walk down the tree starting at min_key, and looking
3769 * for nodes or leaves that are either in cache or have a minimum
3770 * transaction id. This is used by the btree defrag code, and tree logging
3772 * This does not cow, but it does stuff the starting key it finds back
3773 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3774 * key and get a writable path.
3776 * This does lock as it descends, and path->keep_locks should be set
3777 * to 1 by the caller.
3779 * This honors path->lowest_level to prevent descent past a given level
3782 * min_trans indicates the oldest transaction that you are interested
3783 * in walking through. Any nodes or leaves older than min_trans are
3784 * skipped over (without reading them).
3786 * returns zero if something useful was found, < 0 on error and 1 if there
3787 * was nothing in the tree that matched the search criteria.
3789 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
3790 struct btrfs_key *max_key,
3791 struct btrfs_path *path, int cache_only,
3794 struct extent_buffer *cur;
3795 struct btrfs_key found_key;
3802 WARN_ON(!path->keep_locks);
3804 cur = btrfs_lock_root_node(root);
3805 level = btrfs_header_level(cur);
3806 WARN_ON(path->nodes[level]);
3807 path->nodes[level] = cur;
3808 path->locks[level] = 1;
3810 if (btrfs_header_generation(cur) < min_trans) {
3815 nritems = btrfs_header_nritems(cur);
3816 level = btrfs_header_level(cur);
3817 sret = bin_search(cur, min_key, level, &slot);
3819 /* at the lowest level, we're done, setup the path and exit */
3820 if (level == path->lowest_level) {
3821 if (slot >= nritems)
3824 path->slots[level] = slot;
3825 btrfs_item_key_to_cpu(cur, &found_key, slot);
3828 if (sret && slot > 0)
3831 * check this node pointer against the cache_only and
3832 * min_trans parameters. If it isn't in cache or is too
3833 * old, skip to the next one.
3835 while (slot < nritems) {
3838 struct extent_buffer *tmp;
3839 struct btrfs_disk_key disk_key;
3841 blockptr = btrfs_node_blockptr(cur, slot);
3842 gen = btrfs_node_ptr_generation(cur, slot);
3843 if (gen < min_trans) {
3851 btrfs_node_key(cur, &disk_key, slot);
3852 if (comp_keys(&disk_key, max_key) >= 0) {
3858 tmp = btrfs_find_tree_block(root, blockptr,
3859 btrfs_level_size(root, level - 1));
3861 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
3862 free_extent_buffer(tmp);
3866 free_extent_buffer(tmp);
3871 * we didn't find a candidate key in this node, walk forward
3872 * and find another one
3874 if (slot >= nritems) {
3875 path->slots[level] = slot;
3876 btrfs_set_path_blocking(path);
3877 sret = btrfs_find_next_key(root, path, min_key, level,
3878 cache_only, min_trans);
3880 btrfs_release_path(root, path);
3886 /* save our key for returning back */
3887 btrfs_node_key_to_cpu(cur, &found_key, slot);
3888 path->slots[level] = slot;
3889 if (level == path->lowest_level) {
3891 unlock_up(path, level, 1);
3894 btrfs_set_path_blocking(path);
3895 cur = read_node_slot(root, cur, slot);
3897 btrfs_tree_lock(cur);
3899 path->locks[level - 1] = 1;
3900 path->nodes[level - 1] = cur;
3901 unlock_up(path, level, 1);
3902 btrfs_clear_path_blocking(path, NULL);
3906 memcpy(min_key, &found_key, sizeof(found_key));
3907 btrfs_set_path_blocking(path);
3912 * this is similar to btrfs_next_leaf, but does not try to preserve
3913 * and fixup the path. It looks for and returns the next key in the
3914 * tree based on the current path and the cache_only and min_trans
3917 * 0 is returned if another key is found, < 0 if there are any errors
3918 * and 1 is returned if there are no higher keys in the tree
3920 * path->keep_locks should be set to 1 on the search made before
3921 * calling this function.
3923 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
3924 struct btrfs_key *key, int lowest_level,
3925 int cache_only, u64 min_trans)
3927 int level = lowest_level;
3929 struct extent_buffer *c;
3931 WARN_ON(!path->keep_locks);
3932 while (level < BTRFS_MAX_LEVEL) {
3933 if (!path->nodes[level])
3936 slot = path->slots[level] + 1;
3937 c = path->nodes[level];
3939 if (slot >= btrfs_header_nritems(c)) {
3941 if (level == BTRFS_MAX_LEVEL)
3946 btrfs_item_key_to_cpu(c, key, slot);
3948 u64 blockptr = btrfs_node_blockptr(c, slot);
3949 u64 gen = btrfs_node_ptr_generation(c, slot);
3952 struct extent_buffer *cur;
3953 cur = btrfs_find_tree_block(root, blockptr,
3954 btrfs_level_size(root, level - 1));
3955 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
3958 free_extent_buffer(cur);
3961 free_extent_buffer(cur);
3963 if (gen < min_trans) {
3967 btrfs_node_key_to_cpu(c, key, slot);
3975 * search the tree again to find a leaf with greater keys
3976 * returns 0 if it found something or 1 if there are no greater leaves.
3977 * returns < 0 on io errors.
3979 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
3983 struct extent_buffer *c;
3984 struct extent_buffer *next = NULL;
3985 struct btrfs_key key;
3989 nritems = btrfs_header_nritems(path->nodes[0]);
3993 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
3995 btrfs_release_path(root, path);
3996 path->keep_locks = 1;
3997 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3998 path->keep_locks = 0;
4003 btrfs_set_path_blocking(path);
4004 nritems = btrfs_header_nritems(path->nodes[0]);
4006 * by releasing the path above we dropped all our locks. A balance
4007 * could have added more items next to the key that used to be
4008 * at the very end of the block. So, check again here and
4009 * advance the path if there are now more items available.
4011 if (nritems > 0 && path->slots[0] < nritems - 1) {
4016 while (level < BTRFS_MAX_LEVEL) {
4017 if (!path->nodes[level])
4020 slot = path->slots[level] + 1;
4021 c = path->nodes[level];
4022 if (slot >= btrfs_header_nritems(c)) {
4024 if (level == BTRFS_MAX_LEVEL)
4030 btrfs_tree_unlock(next);
4031 free_extent_buffer(next);
4034 /* the path was set to blocking above */
4035 if (level == 1 && (path->locks[1] || path->skip_locking) &&
4037 reada_for_search(root, path, level, slot, 0);
4039 next = read_node_slot(root, c, slot);
4040 if (!path->skip_locking) {
4041 btrfs_assert_tree_locked(c);
4042 btrfs_tree_lock(next);
4043 btrfs_set_lock_blocking(next);
4047 path->slots[level] = slot;
4050 c = path->nodes[level];
4051 if (path->locks[level])
4052 btrfs_tree_unlock(c);
4053 free_extent_buffer(c);
4054 path->nodes[level] = next;
4055 path->slots[level] = 0;
4056 if (!path->skip_locking)
4057 path->locks[level] = 1;
4061 btrfs_set_path_blocking(path);
4062 if (level == 1 && path->locks[1] && path->reada)
4063 reada_for_search(root, path, level, slot, 0);
4064 next = read_node_slot(root, next, 0);
4065 if (!path->skip_locking) {
4066 btrfs_assert_tree_locked(path->nodes[level]);
4067 btrfs_tree_lock(next);
4068 btrfs_set_lock_blocking(next);
4072 unlock_up(path, 0, 1);
4077 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4078 * searching until it gets past min_objectid or finds an item of 'type'
4080 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4082 int btrfs_previous_item(struct btrfs_root *root,
4083 struct btrfs_path *path, u64 min_objectid,
4086 struct btrfs_key found_key;
4087 struct extent_buffer *leaf;
4092 if (path->slots[0] == 0) {
4093 btrfs_set_path_blocking(path);
4094 ret = btrfs_prev_leaf(root, path);
4100 leaf = path->nodes[0];
4101 nritems = btrfs_header_nritems(leaf);
4104 if (path->slots[0] == nritems)
4107 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4108 if (found_key.type == type)
4110 if (found_key.objectid < min_objectid)
4112 if (found_key.objectid == min_objectid &&
4113 found_key.type < type)